You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ar...@apache.org on 2007/08/11 18:56:44 UTC
svn commit: r564939 [3/8] - in /incubator/lucene.net/trunk/C#/src: ./ Demo/
Demo/DeleteFiles/ Demo/DemoLib/ Demo/IndexFiles/ Demo/IndexHtml/
Demo/SearchFiles/ Lucene.Net/ Lucene.Net/Analysis/Standard/
Lucene.Net/Document/ Lucene.Net/Index/ Lucene.Net/Q...
Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexReader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexReader.cs Sat Aug 11 09:56:37 2007
@@ -16,14 +16,21 @@
*/
using System;
+
using NUnit.Framework;
-using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
+
+using Directory = Lucene.Net.Store.Directory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using FSDirectory = Lucene.Net.Store.FSDirectory;
using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
+using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
using Document = Lucene.Net.Documents.Document;
using Field = Lucene.Net.Documents.Field;
-using Directory = Lucene.Net.Store.Directory;
-using FSDirectory = Lucene.Net.Store.FSDirectory;
-using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+using Hits = Lucene.Net.Search.Hits;
+using TermQuery = Lucene.Net.Search.TermQuery;
+using _TestUtil = Lucene.Net.Util._TestUtil;
+using MockRAMDirectory = Lucene.Net.Store.MockRAMDirectory;
namespace Lucene.Net.Index
{
@@ -41,7 +48,11 @@
// TestRunner.run (new TestIndexReader("testFilesOpenClose"));
}
- public virtual void TestIsCurrent()
+ // public TestIndexReader(System.String name)
+ // {
+ // }
+
+ public virtual void TestIsCurrent()
{
RAMDirectory d = new RAMDirectory();
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);
@@ -220,22 +231,105 @@
Assert.AreEqual(100, deleted, "deleted count");
Assert.AreEqual(100, reader.DocFreq(searchTerm), "deleted docFreq");
AssertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
- reader.Close();
- // CREATE A NEW READER and re-test
+ // open a 2nd reader to make sure first reader can
+ // commit its changes (.del) while second reader
+ // is open:
+ IndexReader reader2 = IndexReader.Open(dir);
+ reader.Close();
+
+ // CREATE A NEW READER and re-test
reader = IndexReader.Open(dir);
Assert.AreEqual(100, reader.DocFreq(searchTerm), "deleted docFreq");
AssertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
reader.Close();
}
- [Test]
+ // Make sure you can set norms & commit even if a reader
+ // is open against the index:
+ [Test]
+ public virtual void TestWritingNorms()
+ {
+ //UPGRADE_ISSUE: Method 'java.lang.System.getProperty' was not converted. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1000_javalangSystem'"
+ System.String tempDir = SupportClass.AppSettings.Get("tempDir", "");
+ if (tempDir == null)
+ throw new System.IO.IOException("tempDir undefined, cannot run test");
+
+ System.IO.FileInfo indexDir = new System.IO.FileInfo(System.IO.Path.Combine(tempDir, "lucenetestnormwriter"));
+ Directory dir = FSDirectory.GetDirectory(indexDir);
+ IndexWriter writer = null;
+ IndexReader reader = null;
+ Term searchTerm = new Term("content", "aaa");
+
+ // add 1 documents with term : aaa
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer, searchTerm.Text());
+ writer.Close();
+
+ // now open reader & set norm for doc 0
+ reader = IndexReader.Open(dir);
+ reader.SetNorm(0, "content", (float) 2.0);
+
+ // we should be holding the write lock now:
+ Assert.IsTrue(IndexReader.IsLocked(dir), "locked");
+
+ reader.Commit();
+
+ // we should not be holding the write lock now:
+ Assert.IsTrue(!IndexReader.IsLocked(dir), "not locked");
+
+ // open a 2nd reader:
+ IndexReader reader2 = IndexReader.Open(dir);
+
+ // set norm again for doc 0
+ reader.SetNorm(0, "content", (float) 3.0);
+ Assert.IsTrue(IndexReader.IsLocked(dir), "locked");
+
+ reader.Close();
+
+ // we should not be holding the write lock now:
+ Assert.IsTrue(!IndexReader.IsLocked(dir), "not locked");
+
+ reader2.Close();
+ dir.Close();
+
+ RmDir(indexDir);
+ }
+
+
+ [Test]
public virtual void TestDeleteReaderWriterConflictUnoptimized()
{
DeleteReaderWriterConflict(false);
}
- [Test]
+ [Test]
+ public virtual void TestOpenEmptyDirectory()
+ {
+ System.String dirName = "test.empty";
+ System.IO.FileInfo fileDirName = new System.IO.FileInfo(dirName);
+ bool tmpBool;
+ if (System.IO.File.Exists(fileDirName.FullName))
+ tmpBool = true;
+ else
+ tmpBool = System.IO.Directory.Exists(fileDirName.FullName);
+ if (!tmpBool)
+ {
+ System.IO.Directory.CreateDirectory(fileDirName.FullName);
+ }
+ try
+ {
+ IndexReader reader = IndexReader.Open(fileDirName);
+ Assert.Fail("opening IndexReader on empty directory failed to produce FileNotFoundException");
+ }
+ catch (System.IO.FileNotFoundException e)
+ {
+ // GOOD
+ }
+ RmDir(fileDirName);
+ }
+
+ [Test]
public virtual void TestDeleteReaderWriterConflictOptimized()
{
DeleteReaderWriterConflict(true);
@@ -244,7 +338,7 @@
private void DeleteReaderWriterConflict(bool optimize)
{
//Directory dir = new RAMDirectory();
- Directory dir = GetDirectory(true);
+ Directory dir = GetDirectory();
Term searchTerm = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
@@ -327,38 +421,65 @@
reader.Close();
}
- private Directory GetDirectory(bool create)
- {
- return FSDirectory.GetDirectory(System.IO.Path.Combine(SupportClass.AppSettings.Get("tempDir", ""), "testIndex"), create);
- }
-
- [Test]
+ [Test]
public virtual void TestFilesOpenClose()
+ {
+ // Create initial data set
+ System.IO.FileInfo dirFile = new System.IO.FileInfo(System.IO.Path.Combine("tempDir", "testIndex"));
+ Directory dir = GetDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer, "test");
+ writer.Close();
+ dir.Close();
+
+ // Try to erase the data - this ensures that the writer closed all files
+ _TestUtil.RmDir(dirFile);
+ dir = GetDirectory();
+
+ // Now create the data set again, just as before
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer, "test");
+ writer.Close();
+ dir.Close();
+
+ // Now open existing directory and test that reader closes all files
+ dir = GetDirectory();
+ IndexReader reader1 = IndexReader.Open(dir);
+ reader1.Close();
+ dir.Close();
+
+ // The following will fail if reader did not Close
+ // all files
+ _TestUtil.RmDir(dirFile);
+ }
+
+ public virtual void testLastModified()
{
- // Create initial data set
- Directory dir = GetDirectory(true);
+ Assert.IsFalse(IndexReader.IndexExists("there_is_no_such_index"));
+ Directory dir = new RAMDirectory();
+ Assert.IsFalse(IndexReader.IndexExists(dir));
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
- AddDoc(writer, "test");
+ AddDocumentWithFields(writer);
+ Assert.IsTrue(IndexReader.IsLocked(dir)); // writer open, so dir is locked
writer.Close();
- dir.Close();
-
- // Try to erase the data - this ensures that the writer closed all files
- dir = GetDirectory(true);
-
- // Now create the data set again, just as before
+ Assert.IsTrue(IndexReader.IndexExists(dir));
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.IsFalse(IndexReader.IsLocked(dir)); // reader only, no lock
+ long version = IndexReader.LastModified(dir);
+ reader.Close();
+ // modify index and check version has been
+ // incremented:
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
- AddDoc(writer, "test");
+ AddDocumentWithFields(writer);
writer.Close();
- dir.Close();
-
- // Now open existing directory and test that reader closes all files
- dir = GetDirectory(false);
- IndexReader reader1 = IndexReader.Open(dir);
- reader1.Close();
- dir.Close();
-
- // The following will fail if reader did not close all files
- dir = GetDirectory(true);
+ reader = IndexReader.Open(dir);
+ Assert.IsTrue(version <= IndexReader.LastModified(dir), "old lastModified is " + version + "; new lastModified is " + IndexReader.LastModified(dir));
+ reader.Close();
+ }
+
+ private Directory GetDirectory()
+ {
+ return FSDirectory.GetDirectory(new System.IO.FileInfo(System.IO.Path.Combine(SupportClass.AppSettings.Get("tempDir", ""), "testIndex")));
}
[Test]
@@ -377,15 +498,41 @@
long version = IndexReader.LastModified(dir);
reader.Close();
// modify index and check version has been incremented:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ // incremented:
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
AddDocumentWithFields(writer);
writer.Close();
reader = IndexReader.Open(dir);
- Assert.IsTrue(version < IndexReader.GetCurrentVersion(dir));
- reader.Close();
+ Assert.IsTrue(version <= IndexReader.LastModified(dir), "old lastModified is " + version + "; new lastModified is " + IndexReader.LastModified(dir));
+ reader.Close();
}
- [Test]
+ [Test]
+ public virtual void TestVersion()
+ {
+ Assert.IsFalse(IndexReader.IndexExists("there_is_no_such_index"));
+ Directory dir = new RAMDirectory();
+ Assert.IsFalse(IndexReader.IndexExists(dir));
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDocumentWithFields(writer);
+ Assert.IsTrue(IndexReader.IsLocked(dir)); // writer open, so dir is locked
+ writer.Close();
+ Assert.IsTrue(IndexReader.IndexExists(dir));
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.IsFalse(IndexReader.IsLocked(dir)); // reader only, no lock
+ long version = IndexReader.GetCurrentVersion(dir);
+ reader.Close();
+ // modify index and check version has been
+ // incremented:
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDocumentWithFields(writer);
+ writer.Close();
+ reader = IndexReader.Open(dir);
+ Assert.IsTrue(version < IndexReader.GetCurrentVersion(dir), "old version is " + version + "; new version is " + IndexReader.GetCurrentVersion(dir));
+ reader.Close();
+ }
+
+ [Test]
public virtual void TestLock()
{
Directory dir = new RAMDirectory();
@@ -427,7 +574,45 @@
reader.Close();
}
- [Test]
+ [Test]
+ public virtual void TestUndeleteAllAfterClose()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDocumentWithFields(writer);
+ AddDocumentWithFields(writer);
+ writer.Close();
+ IndexReader reader = IndexReader.Open(dir);
+ reader.DeleteDocument(0);
+ reader.DeleteDocument(1);
+ reader.Close();
+ reader = IndexReader.Open(dir);
+ reader.UndeleteAll();
+ Assert.AreEqual(2, reader.NumDocs()); // nothing has really been deleted thanks to undeleteAll()
+ reader.Close();
+ }
+
+ [Test]
+ public virtual void TestUndeleteAllAfterCloseThenReopen()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDocumentWithFields(writer);
+ AddDocumentWithFields(writer);
+ writer.Close();
+ IndexReader reader = IndexReader.Open(dir);
+ reader.DeleteDocument(0);
+ reader.DeleteDocument(1);
+ reader.Close();
+ reader = IndexReader.Open(dir);
+ reader.UndeleteAll();
+ reader.Close();
+ reader = IndexReader.Open(dir);
+ Assert.AreEqual(2, reader.NumDocs()); // nothing has really been deleted thanks to undeleteAll()
+ reader.Close();
+ }
+
+ [Test]
public virtual void TestDeleteReaderReaderConflictUnoptimized()
{
DeleteReaderReaderConflict(false);
@@ -439,9 +624,342 @@
DeleteReaderReaderConflict(true);
}
- private void DeleteReaderReaderConflict(bool optimize)
+ /// <summary> Make sure if reader tries to commit but hits disk
+ /// full that reader remains consistent and usable.
+ /// </summary>
+ [Test]
+ public virtual void TestDiskFull()
+ {
+
+ bool debug = false;
+ Term searchTerm = new Term("content", "aaa");
+ int START_COUNT = 157;
+ int END_COUNT = 144;
+
+ // First build up a starting index:
+ RAMDirectory startDir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true);
+ for (int i = 0; i < 157; i++)
+ {
+ Lucene.Net.Documents.Document d = new Lucene.Net.Documents.Document();
+ d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
+ writer.AddDocument(d);
+ }
+ writer.Close();
+
+ long diskUsage = startDir.SizeInBytes();
+ long diskFree = diskUsage + 100;
+
+ System.IO.IOException err = null;
+
+ bool done = false;
+
+ // Iterate w/ ever increasing free disk space:
+ while (!done)
+ {
+ MockRAMDirectory dir = new MockRAMDirectory(startDir);
+ IndexReader reader = IndexReader.Open(dir);
+
+ // For each disk size, first try to commit against
+ // dir that will hit random IOExceptions & disk
+ // full; after, give it infinite disk space & turn
+ // off random IOExceptions & retry w/ same reader:
+ bool success = false;
+
+ for (int x = 0; x < 2; x++)
+ {
+
+ double rate = 0.05;
+ double diskRatio = ((double) diskFree) / diskUsage;
+ long thisDiskFree;
+ System.String testName;
+
+ if (0 == x)
+ {
+ thisDiskFree = diskFree;
+ if (diskRatio >= 2.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 4.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 6.0)
+ {
+ rate = 0.0;
+ }
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
+ }
+ testName = "disk full during reader.Close() @ " + thisDiskFree + " bytes";
+ }
+ else
+ {
+ thisDiskFree = 0;
+ rate = 0.0;
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
+ }
+ testName = "reader re-use after disk full";
+ }
+
+ dir.SetMaxSizeInBytes(thisDiskFree);
+ dir.SetRandomIOExceptionRate(rate, diskFree);
+
+ try
+ {
+ if (0 == x)
+ {
+ int docId = 12;
+ for (int i = 0; i < 13; i++)
+ {
+ reader.DeleteDocument(docId);
+ reader.SetNorm(docId, "contents", (float) 2.0);
+ docId += 12;
+ }
+ }
+ reader.Close();
+ success = true;
+ if (0 == x)
+ {
+ done = true;
+ }
+ }
+ catch (System.IO.IOException e)
+ {
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" hit IOException: " + e);
+ }
+ err = e;
+ if (1 == x)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + " hit IOException after disk space was freed up");
+ }
+ }
+
+ // Whether we succeeded or failed, check that all
+ // un-referenced files were in fact deleted (ie,
+ // we did not create garbage). Just create a
+ // new IndexFileDeleter, have it delete
+ // unreferenced files, then verify that in fact
+ // no files were deleted:
+ System.String[] startFiles = dir.List();
+ SegmentInfos infos = new SegmentInfos();
+ infos.Read(dir);
+ IndexFileDeleter d = new IndexFileDeleter(infos, dir);
+ d.FindDeletableFiles();
+ d.DeleteFiles();
+ System.String[] endFiles = dir.List();
+
+ System.Array.Sort(startFiles);
+ System.Array.Sort(endFiles);
+
+ //for(int i=0;i<startFiles.length;i++) {
+ // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
+ //}
+
+ if (!startFiles.Equals(endFiles))
+ {
+ System.String successStr;
+ if (success)
+ {
+ successStr = "success";
+ }
+ else
+ {
+ successStr = "IOException";
+ System.Console.Error.WriteLine(err.StackTrace);
+ }
+ Assert.Fail("reader.Close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + ArrayToString(startFiles) + "\n after delete:\n " + ArrayToString(endFiles));
+ }
+
+ // Finally, verify index is not corrupt, and, if
+ // we succeeded, we see all docs changed, and if
+ // we failed, we see either all docs or no docs
+ // changed (transactional semantics):
+ IndexReader newReader = null;
+ try
+ {
+ newReader = IndexReader.Open(dir);
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ":exception when creating IndexReader after disk full during Close: " + e);
+ }
+ /*
+ int result = newReader.docFreq(searchTerm);
+ if (success) {
+ if (result != END_COUNT) {
+ fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
+ }
+ } else {
+ // On hitting exception we still may have added
+ // all docs:
+ if (result != START_COUNT && result != END_COUNT) {
+ err.printStackTrace();
+ fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+ }
+ }
+ */
+
+ IndexSearcher searcher = new IndexSearcher(newReader);
+ Hits hits = null;
+ try
+ {
+ hits = searcher.Search(new TermQuery(searchTerm));
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ": exception when searching: " + e);
+ }
+ int result2 = hits.Length();
+ if (success)
+ {
+ if (result2 != END_COUNT)
+ {
+ Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
+ }
+ }
+ else
+ {
+ // On hitting exception we still may have added
+ // all docs:
+ if (result2 != START_COUNT && result2 != END_COUNT)
+ {
+ System.Console.Error.WriteLine(err.StackTrace);
+ Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
+ }
+ }
+
+ searcher.Close();
+ newReader.Close();
+
+ if (result2 == END_COUNT)
+ {
+ break;
+ }
+ }
+
+ dir.Close();
+
+ // Try again with 10 more bytes of free space:
+ diskFree += 10;
+ }
+ }
+
+ [Test]
+ public virtual void TestDocsOutOfOrderJIRA140()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ for (int i = 0; i < 11; i++)
+ {
+ AddDoc(writer, "aaa");
+ }
+ writer.Close();
+ IndexReader reader = IndexReader.Open(dir);
+
+ // Try to delete an invalid docId, yet, within range
+ // of the final bits of the BitVector:
+
+ bool gotException = false;
+ try
+ {
+ reader.DeleteDocument(11);
+ }
+ catch (System.IndexOutOfRangeException e)
+ {
+ gotException = true;
+ }
+ reader.Close();
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+
+ // We must add more docs to get a new segment written
+ for (int i = 0; i < 11; i++)
+ {
+ AddDoc(writer, "aaa");
+ }
+
+ // Without the fix for LUCENE-140 this call will
+ // [incorrectly] hit a "docs out of order"
+ // IllegalStateException because above out-of-bounds
+ // deleteDocument corrupted the index:
+ writer.Optimize();
+
+ if (!gotException)
+ {
+ Assert.Fail("delete of out-of-bounds doc number failed to hit exception");
+ }
+ }
+
+ [Test]
+ public virtual void TestExceptionReleaseWriteLockJIRA768()
+ {
+
+ Directory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer, "aaa");
+ writer.Close();
+
+ IndexReader reader = IndexReader.Open(dir);
+ try
+ {
+ reader.DeleteDocument(1);
+ Assert.Fail("did not hit exception when deleting an invalid doc number");
+ }
+ catch (System.IndexOutOfRangeException e)
+ {
+ // expected
+ }
+ reader.Close();
+ if (IndexReader.IsLocked(dir))
+ {
+ Assert.Fail("write lock is still held after Close");
+ }
+
+ reader = IndexReader.Open(dir);
+ try
+ {
+ reader.SetNorm(1, "content", (float) 2.0);
+ Assert.Fail("did not hit exception when calling setNorm on an invalid doc number");
+ }
+ catch (System.IndexOutOfRangeException e)
+ {
+ // expected
+ }
+ reader.Close();
+ if (IndexReader.IsLocked(dir))
+ {
+ Assert.Fail("write lock is still held after Close");
+ }
+ }
+
+ private System.String ArrayToString(System.String[] l)
+ {
+ System.String s = "";
+ for (int i = 0; i < l.Length; i++)
+ {
+ if (i > 0)
+ {
+ s += "\n ";
+ }
+ s += l[i];
+ }
+ return s;
+ }
+
+ private void DeleteReaderReaderConflict(bool optimize)
{
- Directory dir = GetDirectory(true);
+ Directory dir = GetDirectory();
Term searchTerm1 = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
@@ -584,5 +1102,41 @@
doc.Add(new Field("content", value_Renamed, Field.Store.NO, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
- }
+
+ private void RmDir(System.IO.FileInfo dir)
+ {
+ System.IO.FileInfo[] files = SupportClass.FileSupport.GetFiles(dir);
+ for (int i = 0; i < files.Length; i++)
+ {
+ bool tmpBool;
+ if (System.IO.File.Exists(files[i].FullName))
+ {
+ System.IO.File.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else if (System.IO.Directory.Exists(files[i].FullName))
+ {
+ System.IO.Directory.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else
+ tmpBool = false;
+ bool generatedAux = tmpBool;
+ }
+ bool tmpBool2;
+ if (System.IO.File.Exists(dir.FullName))
+ {
+ System.IO.File.Delete(dir.FullName);
+ tmpBool2 = true;
+ }
+ else if (System.IO.Directory.Exists(dir.FullName))
+ {
+ System.IO.Directory.Delete(dir.FullName);
+ tmpBool2 = true;
+ }
+ else
+ tmpBool2 = false;
+ bool generatedAux2 = tmpBool2;
+ }
+ }
}
Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriter.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriter.cs Sat Aug 11 09:56:37 2007
@@ -16,12 +16,24 @@
*/
using System;
+
using NUnit.Framework;
+
using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
using Document = Lucene.Net.Documents.Document;
using Field = Lucene.Net.Documents.Field;
+using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+using Hits = Lucene.Net.Search.Hits;
+using TermQuery = Lucene.Net.Search.TermQuery;
using Directory = Lucene.Net.Store.Directory;
+using FSDirectory = Lucene.Net.Store.FSDirectory;
using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using IndexInput = Lucene.Net.Store.IndexInput;
+using IndexOutput = Lucene.Net.Store.IndexOutput;
+using MockRAMDirectory = Lucene.Net.Store.MockRAMDirectory;
+using LockFactory = Lucene.Net.Store.LockFactory;
+using Lock = Lucene.Net.Store.Lock;
+using SingleInstanceLockFactory = Lucene.Net.Store.SingleInstanceLockFactory;
namespace Lucene.Net.Index
{
@@ -34,6 +46,35 @@
[TestFixture]
public class TestIndexWriter
{
+ [Serializable]
+ public class MyRAMDirectory : RAMDirectory
+ {
+ private void InitBlock(TestIndexWriter enclosingInstance)
+ {
+ this.enclosingInstance = enclosingInstance;
+ }
+ private TestIndexWriter enclosingInstance;
+ public TestIndexWriter Enclosing_Instance
+ {
+ get
+ {
+ return enclosingInstance;
+ }
+
+ }
+ private LockFactory myLockFactory;
+ internal MyRAMDirectory(TestIndexWriter enclosingInstance)
+ {
+ InitBlock(enclosingInstance);
+ lockFactory = null;
+ myLockFactory = new SingleInstanceLockFactory();
+ }
+ public override Lock MakeLock(System.String name)
+ {
+ return myLockFactory.MakeLock(name);
+ }
+ }
+
[Test]
public virtual void TestDocCount()
{
@@ -43,9 +84,14 @@
IndexReader reader = null;
int i;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ IndexWriter.SetDefaultWriteLockTimeout(2000);
+ Assert.AreEqual(2000, IndexWriter.GetDefaultWriteLockTimeout());
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer());
+
+ IndexWriter.SetDefaultWriteLockTimeout(1000);
- // add 100 documents
+ // add 100 documents
for (i = 0; i < 100; i++)
{
AddDoc(writer);
@@ -62,7 +108,7 @@
reader.Close();
// test doc count before segments are merged/index is optimized
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer());
Assert.AreEqual(100, writer.DocCount());
writer.Close();
@@ -72,7 +118,7 @@
reader.Close();
// optimize the index and check that the new doc count is correct
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer());
writer.Optimize();
Assert.AreEqual(60, writer.DocCount());
writer.Close();
@@ -82,7 +128,13 @@
Assert.AreEqual(60, reader.MaxDoc());
Assert.AreEqual(60, reader.NumDocs());
reader.Close();
- }
+
+ // make sure opening a new index for create over
+ // this existing one works correctly:
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ Assert.AreEqual(0, writer.DocCount());
+ writer.Close();
+ }
private void AddDoc(IndexWriter writer)
{
@@ -90,5 +142,773 @@
doc.Add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
- }
+
+ private void AddDocWithIndex(IndexWriter writer, int index)
+ {
+ Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+ doc.Add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.Add(new Field("id", "" + index, Field.Store.YES, Field.Index.TOKENIZED));
+ writer.AddDocument(doc);
+ }
+
+ /*
+ Test: make sure when we run out of disk space or hit
+ random IOExceptions in any of the addIndexes(*) calls
+ that 1) index is not corrupt (searcher can open/search
+ it) and 2) transactional semantics are followed:
+ either all or none of the incoming documents were in
+ fact added.
+ */
+ [Test]
+ public virtual void TestAddIndexOnDiskFull()
+ {
+
+ int START_COUNT = 57;
+ int NUM_DIR = 50;
+ int END_COUNT = START_COUNT + NUM_DIR * 25;
+
+ bool debug = false;
+
+ // Build up a bunch of dirs that have indexes which we
+ // will then merge together by calling addIndexes(*):
+ Directory[] dirs = new Directory[NUM_DIR];
+ long inputDiskUsage = 0;
+ for (int i = 0; i < NUM_DIR; i++)
+ {
+ dirs[i] = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(), true);
+ for (int j = 0; j < 25; j++)
+ {
+ AddDocWithIndex(writer, 25 * i + j);
+ }
+ writer.Close();
+ System.String[] files = dirs[i].List();
+ for (int j = 0; j < files.Length; j++)
+ {
+ inputDiskUsage += dirs[i].FileLength(files[j]);
+ }
+ }
+
+ // Now, build a starting index that has START_COUNT docs. We
+ // will then try to addIndexes into a copy of this:
+ RAMDirectory startDir = new RAMDirectory();
+ IndexWriter writer2 = new IndexWriter(startDir, new WhitespaceAnalyzer(), true);
+ for (int j = 0; j < START_COUNT; j++)
+ {
+ AddDocWithIndex(writer2, j);
+ }
+ writer2.Close();
+
+ // Make sure starting index seems to be working properly:
+ Term searchTerm = new Term("content", "aaa");
+ IndexReader reader = IndexReader.Open(startDir);
+ Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");
+
+ IndexSearcher searcher = new IndexSearcher(reader);
+ Hits hits = searcher.Search(new TermQuery(searchTerm));
+ Assert.AreEqual(57, hits.Length(), "first number of hits");
+ searcher.Close();
+ reader.Close();
+
+ // Iterate with larger and larger amounts of free
+ // disk space. With little free disk space,
+ // addIndexes will certainly run out of space &
+ // fail. Verify that when this happens, index is
+ // not corrupt and index in fact has added no
+ // documents. Then, we increase disk space by 1000
+ // bytes each iteration. At some point there is
+ // enough free disk space and addIndexes should
+ // succeed and index should show all documents were
+ // added.
+
+ // String[] files = startDir.list();
+ long diskUsage = startDir.SizeInBytes();
+
+ long startDiskUsage = 0;
+ System.String[] files2 = startDir.List();
+ for (int i = 0; i < files2.Length; i++)
+ {
+ startDiskUsage += startDir.FileLength(files2[i]);
+ }
+
+ for (int method = 0; method < 3; method++)
+ {
+
+ // Start with 100 bytes more than we are currently using:
+ long diskFree = diskUsage + 100;
+
+ bool success = false;
+ bool done = false;
+
+ System.String methodName;
+ if (0 == method)
+ {
+ methodName = "addIndexes(Directory[])";
+ }
+ else if (1 == method)
+ {
+ methodName = "addIndexes(IndexReader[])";
+ }
+ else
+ {
+ methodName = "addIndexesNoOptimize(Directory[])";
+ }
+
+ System.String testName = "disk full test for method " + methodName + " with disk full at " + diskFree + " bytes";
+
+ int cycleCount = 0;
+
+ while (!done)
+ {
+
+ cycleCount++;
+
+ // Make a new dir that will enforce disk usage:
+ MockRAMDirectory dir = new MockRAMDirectory(startDir);
+ writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+ System.IO.IOException err = null;
+
+ for (int x = 0; x < 2; x++)
+ {
+
+ // Two loops: first time, limit disk space &
+ // throw random IOExceptions; second time, no
+ // disk space limit:
+
+ double rate = 0.05;
+ double diskRatio = ((double) diskFree) / diskUsage;
+ long thisDiskFree;
+
+ if (0 == x)
+ {
+ thisDiskFree = diskFree;
+ if (diskRatio >= 2.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 4.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 6.0)
+ {
+ rate = 0.0;
+ }
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: " + methodName + ": " + diskFree + " bytes");
+ }
+ }
+ else
+ {
+ thisDiskFree = 0;
+ rate = 0.0;
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: " + methodName + ", same writer: unlimited disk space");
+ }
+ }
+
+ dir.SetMaxSizeInBytes(thisDiskFree);
+ dir.SetRandomIOExceptionRate(rate, diskFree);
+
+ try
+ {
+
+ if (0 == method)
+ {
+ writer2.AddIndexes(dirs);
+ }
+ else if (1 == method)
+ {
+ IndexReader[] readers = new IndexReader[dirs.Length];
+ for (int i = 0; i < dirs.Length; i++)
+ {
+ readers[i] = IndexReader.Open(dirs[i]);
+ }
+ try
+ {
+ writer2.AddIndexes(readers);
+ }
+ finally
+ {
+ for (int i = 0; i < dirs.Length; i++)
+ {
+ readers[i].Close();
+ }
+ }
+ }
+ else
+ {
+ writer2.AddIndexesNoOptimize(dirs);
+ }
+
+ success = true;
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" success!");
+ }
+
+ if (0 == x)
+ {
+ done = true;
+ }
+ }
+ catch (System.IO.IOException e)
+ {
+ success = false;
+ err = e;
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" hit IOException: " + e);
+ }
+
+ if (1 == x)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(methodName + " hit IOException after disk space was freed up");
+ }
+ }
+
+ // Whether we succeeded or failed, check that all
+ // un-referenced files were in fact deleted (ie,
+ // we did not create garbage). Just create a
+ // new IndexFileDeleter, have it delete
+ // unreferenced files, then verify that in fact
+ // no files were deleted:
+ System.String[] startFiles = dir.List();
+ SegmentInfos infos = new SegmentInfos();
+ infos.Read(dir);
+ IndexFileDeleter d = new IndexFileDeleter(infos, dir);
+ d.FindDeletableFiles();
+ d.DeleteFiles();
+ System.String[] endFiles = dir.List();
+
+ System.Array.Sort(startFiles);
+ System.Array.Sort(endFiles);
+
+ /*
+ for(int i=0;i<startFiles.length;i++) {
+ System.out.println(" " + i + ": " + startFiles[i]);
+ }
+ */
+
+ if (!startFiles.Equals(endFiles))
+ {
+ System.String successStr;
+ if (success)
+ {
+ successStr = "success";
+ }
+ else
+ {
+ successStr = "IOException";
+ System.Console.Error.WriteLine(err.StackTrace);
+ }
+ Assert.Fail(methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + ArrayToString(startFiles) + "\n after delete:\n " + ArrayToString(endFiles));
+ }
+
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" now test readers");
+ }
+
+ // Finally, verify index is not corrupt, and, if
+ // we succeeded, we see all docs added, and if we
+ // failed, we see either all docs or no docs added
+ // (transactional semantics):
+ try
+ {
+ reader = IndexReader.Open(dir);
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ": exception when creating IndexReader: " + e);
+ }
+ int result = reader.DocFreq(searchTerm);
+ if (success)
+ {
+ if (result != END_COUNT)
+ {
+ Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
+ }
+ }
+ else
+ {
+ // On hitting exception we still may have added
+ // all docs:
+ if (result != START_COUNT && result != END_COUNT)
+ {
+ System.Console.Error.WriteLine(err.StackTrace);
+ Assert.Fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+ }
+ }
+
+ searcher = new IndexSearcher(reader);
+ try
+ {
+ hits = searcher.Search(new TermQuery(searchTerm));
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ": exception when searching: " + e);
+ }
+ int result2 = hits.Length();
+ if (success)
+ {
+ if (result2 != result)
+ {
+ Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+ }
+ }
+ else
+ {
+ // On hitting exception we still may have added
+ // all docs:
+ if (result2 != result)
+ {
+ System.Console.Error.WriteLine(err.StackTrace);
+ Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+ }
+ }
+
+ searcher.Close();
+ reader.Close();
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" count is " + result);
+ }
+
+ if (result == END_COUNT)
+ {
+ break;
+ }
+ }
+
+ // Javadocs state that temp free Directory space
+ // required is at most 2X total input size of
+ // indices so let's make sure:
+ Assert.IsTrue((dir.GetMaxUsedSizeInBytes() - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage), "max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.GetMaxUsedSizeInBytes() - startDiskUsage) + " bytes; " + "starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes");
+
+ writer2.Close();
+ dir.Close();
+
+ // Try again with 1000 more bytes of free space:
+ diskFree += 1000;
+ }
+ }
+
+ startDir.Close();
+ }
+
+ /// <summary> Make sure optimize doesn't use any more than 1X
+ /// starting index size as its temporary free space
+ /// required.
+ /// </summary>
+ [Test]
+ public virtual void TestOptimizeTempSpaceUsage()
+ {
+
+ MockRAMDirectory dir = new MockRAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ for (int j = 0; j < 500; j++)
+ {
+ AddDocWithIndex(writer, j);
+ }
+ writer.Close();
+
+ long startDiskUsage = 0;
+ System.String[] files = dir.List();
+ for (int i = 0; i < files.Length; i++)
+ {
+ startDiskUsage += dir.FileLength(files[i]);
+ }
+
+ dir.ResetMaxUsedSizeInBytes();
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+ writer.Optimize();
+ writer.Close();
+ long maxDiskUsage = dir.GetMaxUsedSizeInBytes();
+
+ Assert.IsTrue(maxDiskUsage <= 2 * startDiskUsage, "optimized used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (2 * startDiskUsage) + " (= 2X starting usage)");
+ }
+
+ [Test]
+ private System.String ArrayToString(System.String[] l)
+ {
+ System.String s = "";
+ for (int i = 0; i < l.Length; i++)
+ {
+ if (i > 0)
+ {
+ s += "\n ";
+ }
+ s += l[i];
+ }
+ return s;
+ }
+
+ // Make sure we can open an index for create even when a
+ // reader holds it open (this fails pre lock-less
+ // commits on windows):
+ [Test]
+ public virtual void TestCreateWithReader()
+ {
+ System.String tempDir = System.IO.Path.GetTempPath();
+ if (tempDir == null)
+ throw new System.IO.IOException("java.io.tmpdir undefined, cannot run test");
+ System.IO.FileInfo indexDir = new System.IO.FileInfo(tempDir + "\\" + "lucenetestindexwriter");
+
+ try
+ {
+ Directory dir = FSDirectory.GetDirectory(indexDir);
+
+ // add one document & Close writer
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer);
+ writer.Close();
+
+ // now open reader:
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+
+ // now open index for create:
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ Assert.AreEqual(writer.DocCount(), 0, "should be zero documents");
+ AddDoc(writer);
+ writer.Close();
+
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+ IndexReader reader2 = IndexReader.Open(dir);
+ Assert.AreEqual(reader2.NumDocs(), 1, "should be one document");
+ reader.Close();
+ reader2.Close();
+ }
+ finally
+ {
+ RmDir(indexDir);
+ }
+ }
+
+
+ // Same test as above, but use IndexWriter constructor
+ // that takes File:
+ [Test]
+ public virtual void TestCreateWithReader2()
+ {
+ System.String tempDir = System.IO.Path.GetTempPath();
+ if (tempDir == null)
+ throw new System.IO.IOException("java.io.tmpdir undefined, cannot run test");
+ System.IO.FileInfo indexDir = new System.IO.FileInfo(System.IO.Path.Combine(tempDir, "lucenetestindexwriter"));
+ try
+ {
+ // add one document & Close writer
+ IndexWriter writer = new IndexWriter(indexDir, new WhitespaceAnalyzer(), true);
+ AddDoc(writer);
+ writer.Close();
+
+ // now open reader:
+ IndexReader reader = IndexReader.Open(indexDir);
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+
+ // now open index for create:
+ writer = new IndexWriter(indexDir, new WhitespaceAnalyzer(), true);
+ Assert.AreEqual(writer.DocCount(), 0, "should be zero documents");
+ AddDoc(writer);
+ writer.Close();
+
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+ IndexReader reader2 = IndexReader.Open(indexDir);
+ Assert.AreEqual(reader2.NumDocs(), 1, "should be one document");
+ reader.Close();
+ reader2.Close();
+ }
+ finally
+ {
+ RmDir(indexDir);
+ }
+ }
+
+ // Same test as above, but use IndexWriter constructor
+ // that takes String:
+ [Test]
+ public virtual void TestCreateWithReader3()
+ {
+ System.String tempDir = SupportClass.AppSettings.Get("tempDir", "");
+ if (tempDir == null)
+ throw new System.IO.IOException("java.io.tmpdir undefined, cannot run test");
+
+ System.String dirName = tempDir + "/lucenetestindexwriter";
+ try
+ {
+
+ // add one document & Close writer
+ IndexWriter writer = new IndexWriter(dirName, new WhitespaceAnalyzer(), true);
+ AddDoc(writer);
+ writer.Close();
+
+ // now open reader:
+ IndexReader reader = IndexReader.Open(dirName);
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+
+ // now open index for create:
+ writer = new IndexWriter(dirName, new WhitespaceAnalyzer(), true);
+ Assert.AreEqual(writer.DocCount(), 0, "should be zero documents");
+ AddDoc(writer);
+ writer.Close();
+
+ Assert.AreEqual(reader.NumDocs(), 1, "should be one document");
+ IndexReader reader2 = IndexReader.Open(dirName);
+ Assert.AreEqual(reader2.NumDocs(), 1, "should be one document");
+ reader.Close();
+ reader2.Close();
+ }
+ finally
+ {
+ RmDir(new System.IO.FileInfo(dirName));
+ }
+ }
+
+ // Simulate a writer that crashed while writing segments
+ // file: make sure we can still open the index (ie,
+ // gracefully fallback to the previous segments file),
+ // and that we can add to the index:
+ [Test]
+ public virtual void TestSimulatedCrashedWriter()
+ {
+ Directory dir = new RAMDirectory();
+
+ IndexWriter writer = null;
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+
+ // add 100 documents
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(writer);
+ }
+
+ // Close
+ writer.Close();
+
+ long gen = SegmentInfos.GetCurrentSegmentGeneration(dir);
+ Assert.IsTrue(gen > 1, "segment generation should be > 1 but got " + gen);
+
+ // Make the next segments file, with last byte
+ // missing, to simulate a writer that crashed while
+ // writing segments file:
+ System.String fileNameIn = SegmentInfos.GetCurrentSegmentFileName(dir);
+ System.String fileNameOut = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1 + gen);
+ IndexInput in_Renamed = dir.OpenInput(fileNameIn);
+ IndexOutput out_Renamed = dir.CreateOutput(fileNameOut);
+ long length = in_Renamed.Length();
+ for (int i = 0; i < length - 1; i++)
+ {
+ out_Renamed.WriteByte(in_Renamed.ReadByte());
+ }
+ in_Renamed.Close();
+ out_Renamed.Close();
+
+ IndexReader reader = null;
+ try
+ {
+ reader = IndexReader.Open(dir);
+ }
+ catch (System.Exception e)
+ {
+ Assert.Fail("reader failed to open on a crashed index");
+ }
+ reader.Close();
+
+ try
+ {
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ }
+ catch (System.Exception e)
+ {
+ Assert.Fail("writer failed to open on a crashed index");
+ }
+
+ // add 100 documents
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(writer);
+ }
+
+ // Close
+ writer.Close();
+ }
+
+ // Simulate a corrupt index by removing last byte of
+ // latest segments file and make sure we get an
+ // IOException trying to open the index:
+ [Test]
+ public virtual void TestSimulatedCorruptIndex1()
+ {
+ Directory dir = new RAMDirectory();
+
+ IndexWriter writer = null;
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+
+ // add 100 documents
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(writer);
+ }
+
+ // Close
+ writer.Close();
+
+ long gen = SegmentInfos.GetCurrentSegmentGeneration(dir);
+ Assert.IsTrue(gen > 1, "segment generation should be > 1 but got " + gen);
+
+ System.String fileNameIn = SegmentInfos.GetCurrentSegmentFileName(dir);
+ System.String fileNameOut = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1 + gen);
+ IndexInput in_Renamed = dir.OpenInput(fileNameIn);
+ IndexOutput out_Renamed = dir.CreateOutput(fileNameOut);
+ long length = in_Renamed.Length();
+ for (int i = 0; i < length - 1; i++)
+ {
+ out_Renamed.WriteByte(in_Renamed.ReadByte());
+ }
+ in_Renamed.Close();
+ out_Renamed.Close();
+ dir.DeleteFile(fileNameIn);
+
+ IndexReader reader = null;
+ try
+ {
+ reader = IndexReader.Open(dir);
+ Assert.Fail("reader did not hit IOException on opening a corrupt index");
+ }
+ catch (System.Exception e)
+ {
+ }
+ if (reader != null)
+ {
+ reader.Close();
+ }
+ }
+
+ // Simulate a corrupt index by removing one of the cfs
+ // files and make sure we get an IOException trying to
+ // open the index:
+ [Test]
+ public virtual void TestSimulatedCorruptIndex2()
+ {
+ Directory dir = new RAMDirectory();
+
+ IndexWriter writer = null;
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+
+ // add 100 documents
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(writer);
+ }
+
+ // Close
+ writer.Close();
+
+ long gen = SegmentInfos.GetCurrentSegmentGeneration(dir);
+ Assert.IsTrue(gen > 1, "segment generation should be > 1 but got " + gen);
+
+ System.String[] files = dir.List();
+ for (int i = 0; i < files.Length; i++)
+ {
+ if (files[i].EndsWith(".cfs"))
+ {
+ dir.DeleteFile(files[i]);
+ break;
+ }
+ }
+
+ IndexReader reader = null;
+ try
+ {
+ reader = IndexReader.Open(dir);
+ Assert.Fail("reader did not hit IOException on opening a corrupt index");
+ }
+ catch (System.Exception e)
+ {
+ }
+ if (reader != null)
+ {
+ reader.Close();
+ }
+ }
+
+ // Make sure that a Directory implementation that does
+ // not use LockFactory at all (ie overrides makeLock and
+ // implements its own private locking) works OK. This
+ // was raised on java-dev as loss of backwards
+ // compatibility.
+ [Test]
+ public virtual void TestNullLockFactory()
+ {
+
+
+ Directory dir = new MyRAMDirectory(this);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(writer);
+ }
+ writer.Close();
+ IndexReader reader = IndexReader.Open(dir);
+ Term searchTerm = new Term("content", "aaa");
+ IndexSearcher searcher = new IndexSearcher(dir);
+ Hits hits = searcher.Search(new TermQuery(searchTerm));
+ Assert.AreEqual(100, hits.Length(), "did not get right number of hits");
+ writer.Close();
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ writer.Close();
+
+ dir.Close();
+ }
+
+ private void RmDir(System.IO.FileInfo dir)
+ {
+ System.IO.FileInfo[] files = SupportClass.FileSupport.GetFiles(dir);
+ if (files != null)
+ {
+ for (int i = 0; i < files.Length; i++)
+ {
+ bool tmpBool;
+ if (System.IO.File.Exists(files[i].FullName))
+ {
+ System.IO.File.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else if (System.IO.Directory.Exists(files[i].FullName))
+ {
+ System.IO.Directory.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else
+ tmpBool = false;
+ bool generatedAux = tmpBool;
+ }
+ }
+ bool tmpBool2;
+ if (System.IO.File.Exists(dir.FullName))
+ {
+ System.IO.File.Delete(dir.FullName);
+ tmpBool2 = true;
+ }
+ else if (System.IO.Directory.Exists(dir.FullName))
+ {
+ System.IO.Directory.Delete(dir.FullName);
+ tmpBool2 = true;
+ }
+ else
+ tmpBool2 = false;
+ bool generatedAux2 = tmpBool2;
+ }
+ }
}
Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriterDelete.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterDelete.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,496 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+using Hits = Lucene.Net.Search.Hits;
+using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+using TermQuery = Lucene.Net.Search.TermQuery;
+using Directory = Lucene.Net.Store.Directory;
+using MockRAMDirectory = Lucene.Net.Store.MockRAMDirectory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+
+namespace Lucene.Net.Index
+{
+
+ [TestFixture]
+ public class TestIndexWriterDelete
+ {
+
+ // test the simple case
+ [Test]
+ public virtual void TestSimpleCase()
+ {
+ System.String[] keywords = new System.String[]{"1", "2"};
+ System.String[] unindexed = new System.String[]{"Netherlands", "Italy"};
+ System.String[] unstored = new System.String[]{"Amsterdam has lots of bridges", "Venice has lots of canals"};
+ System.String[] text = new System.String[]{"Amsterdam", "Venice"};
+
+ Directory dir = new RAMDirectory();
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ modifier.SetUseCompoundFile(true);
+ modifier.SetMaxBufferedDeleteTerms(1);
+
+ for (int i = 0; i < keywords.Length; i++)
+ {
+ Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+ doc.Add(new Field("id", keywords[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.Add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO));
+ doc.Add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.TOKENIZED));
+ doc.Add(new Field("city", text[i], Field.Store.YES, Field.Index.TOKENIZED));
+ modifier.AddDocument(doc);
+ }
+ modifier.Optimize();
+
+ Term term = new Term("city", "Amsterdam");
+ int hitCount = GetHitCount(dir, term);
+ Assert.AreEqual(1, hitCount);
+ modifier.DeleteDocuments(term);
+ hitCount = GetHitCount(dir, term);
+ Assert.AreEqual(0, hitCount);
+
+ modifier.Close();
+ }
+
+ // test when delete terms only apply to disk segments
+ [Test]
+ public virtual void TestNonRAMDelete()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ modifier.SetMaxBufferedDocs(2);
+ modifier.SetMaxBufferedDeleteTerms(2);
+
+ int id = 0;
+ int value_Renamed = 100;
+
+ for (int i = 0; i < 7; i++)
+ {
+ AddDoc(modifier, ++id, value_Renamed);
+ }
+ modifier.Flush();
+
+ Assert.AreEqual(0, modifier.GetRamSegmentCount());
+ Assert.IsTrue(0 < modifier.GetSegmentCount());
+
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.AreEqual(7, reader.NumDocs());
+ reader.Close();
+
+ modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
+ modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
+
+ reader = IndexReader.Open(dir);
+ Assert.AreEqual(0, reader.NumDocs());
+ reader.Close();
+
+ modifier.Close();
+ }
+
+ // test when delete terms only apply to ram segments
+ [Test]
+ public virtual void TestRAMDeletes()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ modifier.SetMaxBufferedDocs(4);
+ modifier.SetMaxBufferedDeleteTerms(4);
+
+ int id = 0;
+ int value_Renamed = 100;
+
+ AddDoc(modifier, ++id, value_Renamed);
+ modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
+ AddDoc(modifier, ++id, value_Renamed);
+ modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
+
+ Assert.AreEqual(2, modifier.GetNumBufferedDeleteTerms());
+ Assert.AreEqual(1, modifier.GetBufferedDeleteTermsSize());
+
+ AddDoc(modifier, ++id, value_Renamed);
+ Assert.AreEqual(0, modifier.GetSegmentCount());
+ modifier.Flush();
+
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.AreEqual(1, reader.NumDocs());
+
+ int hitCount = GetHitCount(dir, new Term("id", System.Convert.ToString(id)));
+ Assert.AreEqual(1, hitCount);
+ reader.Close();
+
+ modifier.Close();
+ }
+
+ // test when delete terms apply to both disk and ram segments
+ [Test]
+ public virtual void TestBothDeletes()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ modifier.SetMaxBufferedDocs(100);
+ modifier.SetMaxBufferedDeleteTerms(100);
+
+ int id = 0;
+ int value_Renamed = 100;
+
+ for (int i = 0; i < 5; i++)
+ {
+ AddDoc(modifier, ++id, value_Renamed);
+ }
+
+ value_Renamed = 200;
+ for (int i = 0; i < 5; i++)
+ {
+ AddDoc(modifier, ++id, value_Renamed);
+ }
+ modifier.Flush();
+
+ for (int i = 0; i < 5; i++)
+ {
+ AddDoc(modifier, ++id, value_Renamed);
+ }
+ modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed)));
+ modifier.Flush();
+
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.AreEqual(5, reader.NumDocs());
+
+ modifier.Close();
+ }
+
+ // test that batched delete terms are flushed together
+ [Test]
+ public virtual void TestBatchDeletes()
+ {
+ Directory dir = new RAMDirectory();
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ modifier.SetMaxBufferedDocs(2);
+ modifier.SetMaxBufferedDeleteTerms(2);
+
+ int id = 0;
+ int value_Renamed = 100;
+
+ for (int i = 0; i < 7; i++)
+ {
+ AddDoc(modifier, ++id, value_Renamed);
+ }
+ modifier.Flush();
+
+ IndexReader reader = IndexReader.Open(dir);
+ Assert.AreEqual(7, reader.NumDocs());
+ reader.Close();
+
+ id = 0;
+ modifier.DeleteDocuments(new Term("id", System.Convert.ToString(++id)));
+ modifier.DeleteDocuments(new Term("id", System.Convert.ToString(++id)));
+
+ reader = IndexReader.Open(dir);
+ Assert.AreEqual(5, reader.NumDocs());
+ reader.Close();
+
+ Term[] terms = new Term[3];
+ for (int i = 0; i < terms.Length; i++)
+ {
+ terms[i] = new Term("id", System.Convert.ToString(++id));
+ }
+ modifier.DeleteDocuments(terms);
+
+ reader = IndexReader.Open(dir);
+ Assert.AreEqual(2, reader.NumDocs());
+ reader.Close();
+
+ modifier.Close();
+ }
+
+ private void AddDoc(IndexWriter modifier, int id, int value_Renamed)
+ {
+ Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+ doc.Add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.Add(new Field("id", System.Convert.ToString(id), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.Add(new Field("value", System.Convert.ToString(value_Renamed), Field.Store.NO, Field.Index.UN_TOKENIZED));
+ modifier.AddDocument(doc);
+ }
+
+ private int GetHitCount(Directory dir, Term term)
+ {
+ IndexSearcher searcher = new IndexSearcher(dir);
+ int hitCount = searcher.Search(new TermQuery(term)).Length();
+ searcher.Close();
+ return hitCount;
+ }
+
+ [Test]
+ public virtual void TestDeletesOnDiskFull()
+ {
+ TestOperationsOnDiskFull(false);
+ }
+
+ [Test]
+ public virtual void TestUpdatesOnDiskFull()
+ {
+ TestOperationsOnDiskFull(true);
+ }
+
+ /// <summary> Make sure if modifier tries to commit but hits disk full that modifier
+ /// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
+ /// </summary>
+ private void TestOperationsOnDiskFull(bool updates)
+ {
+
+ bool debug = false;
+ Term searchTerm = new Term("content", "aaa");
+ int START_COUNT = 157;
+ int END_COUNT = 144;
+
+ // First build up a starting index:
+ RAMDirectory startDir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true);
+ for (int i = 0; i < 157; i++)
+ {
+ Lucene.Net.Documents.Document d = new Lucene.Net.Documents.Document();
+ d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
+ writer.AddDocument(d);
+ }
+ writer.Close();
+
+ long diskUsage = startDir.SizeInBytes();
+ long diskFree = diskUsage + 10;
+
+ System.IO.IOException err = null;
+
+ bool done = false;
+
+ // Iterate w/ ever increasing free disk space:
+ while (!done)
+ {
+ MockRAMDirectory dir = new MockRAMDirectory(startDir);
+ IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+
+ modifier.SetMaxBufferedDocs(1000); // use flush or Close
+ modifier.SetMaxBufferedDeleteTerms(1000); // use flush or Close
+
+ // For each disk size, first try to commit against
+ // dir that will hit random IOExceptions & disk
+ // full; after, give it infinite disk space & turn
+ // off random IOExceptions & retry w/ same reader:
+ bool success = false;
+
+ for (int x = 0; x < 2; x++)
+ {
+
+ double rate = 0.1;
+ double diskRatio = ((double) diskFree) / diskUsage;
+ long thisDiskFree;
+ System.String testName;
+
+ if (0 == x)
+ {
+ thisDiskFree = diskFree;
+ if (diskRatio >= 2.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 4.0)
+ {
+ rate /= 2;
+ }
+ if (diskRatio >= 6.0)
+ {
+ rate = 0.0;
+ }
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes");
+ }
+ testName = "disk full during reader.Close() @ " + thisDiskFree + " bytes";
+ }
+ else
+ {
+ thisDiskFree = 0;
+ rate = 0.0;
+ if (debug)
+ {
+ System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space");
+ }
+ testName = "reader re-use after disk full";
+ }
+
+ dir.SetMaxSizeInBytes(thisDiskFree);
+ dir.SetRandomIOExceptionRate(rate, diskFree);
+
+ try
+ {
+ if (0 == x)
+ {
+ int docId = 12;
+ for (int i = 0; i < 13; i++)
+ {
+ if (updates)
+ {
+ Lucene.Net.Documents.Document d = new Lucene.Net.Documents.Document();
+ d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.TOKENIZED));
+ modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d);
+ }
+ else
+ {
+ // deletes
+ modifier.DeleteDocuments(new Term("id", System.Convert.ToString(docId)));
+ // modifier.setNorm(docId, "contents", (float)2.0);
+ }
+ docId += 12;
+ }
+ }
+ modifier.Close();
+ success = true;
+ if (0 == x)
+ {
+ done = true;
+ }
+ }
+ catch (System.IO.IOException e)
+ {
+ if (debug)
+ {
+ System.Console.Out.WriteLine(" hit IOException: " + e);
+ }
+ err = e;
+ if (1 == x)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + " hit IOException after disk space was freed up");
+ }
+ }
+
+ // Whether we succeeded or failed, check that all
+ // un-referenced files were in fact deleted (ie,
+ // we did not create garbage). Just create a
+ // new IndexFileDeleter, have it delete
+ // unreferenced files, then verify that in fact
+ // no files were deleted:
+ System.String[] startFiles = dir.List();
+ SegmentInfos infos = new SegmentInfos();
+ infos.Read(dir);
+ IndexFileDeleter d2 = new IndexFileDeleter(infos, dir);
+ d2.FindDeletableFiles();
+ d2.DeleteFiles();
+ System.String[] endFiles = dir.List();
+
+ System.Array.Sort(startFiles);
+ System.Array.Sort(endFiles);
+
+ // for(int i=0;i<startFiles.length;i++) {
+ // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
+ // }
+
+ if (!startFiles.Equals(endFiles))
+ {
+ System.String successStr;
+ if (success)
+ {
+ successStr = "success";
+ }
+ else
+ {
+ successStr = "IOException";
+ System.Console.Error.WriteLine(err.StackTrace);
+ }
+ Assert.Fail("reader.Close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + ArrayToString(startFiles) + "\n after delete:\n " + ArrayToString(endFiles));
+ }
+
+ // Finally, verify index is not corrupt, and, if
+ // we succeeded, we see all docs changed, and if
+ // we failed, we see either all docs or no docs
+ // changed (transactional semantics):
+ IndexReader newReader = null;
+ try
+ {
+ newReader = IndexReader.Open(dir);
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ":exception when creating IndexReader after disk full during Close: " + e);
+ }
+
+ IndexSearcher searcher = new IndexSearcher(newReader);
+ Hits hits = null;
+ try
+ {
+ hits = searcher.Search(new TermQuery(searchTerm));
+ }
+ catch (System.IO.IOException e)
+ {
+ System.Console.Error.WriteLine(e.StackTrace);
+ Assert.Fail(testName + ": exception when searching: " + e);
+ }
+ int result2 = hits.Length();
+ if (success)
+ {
+ if (result2 != END_COUNT)
+ {
+ Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
+ }
+ }
+ else
+ {
+ // On hitting exception we still may have added
+ // all docs:
+ if (result2 != START_COUNT && result2 != END_COUNT)
+ {
+ System.Console.Error.WriteLine(err.StackTrace);
+ Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
+ }
+ }
+
+ searcher.Close();
+ newReader.Close();
+
+ if (result2 == END_COUNT)
+ {
+ break;
+ }
+ }
+
+ dir.Close();
+
+ // Try again with 10 more bytes of free space:
+ diskFree += 10;
+ }
+ }
+
+ private System.String ArrayToString(System.String[] l)
+ {
+ System.String s = "";
+ for (int i = 0; i < l.Length; i++)
+ {
+ if (i > 0)
+ {
+ s += "\n ";
+ }
+ s += l[i];
+ }
+ return s;
+ }
+ }
+}
\ No newline at end of file
Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterLockRelease.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriterLockRelease.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterLockRelease.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterLockRelease.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+namespace Lucene.Net.Index
+{
+
+ /// <summary> This tests the patch for issue #LUCENE-715 (IndexWriter does not
+ /// release its write lock when trying to open an index which does not yet
+ /// exist).
+ ///
+ /// </summary>
+ /// <author> mbogosian
+ /// </author>
+ /// <version> $Id$
+ /// </version>
+
+ [TestFixture]
+ public class TestIndexWriterLockRelease
+ {
+ private System.IO.FileInfo __test_dir;
+
+ [SetUp]
+ public virtual void SetUp()
+ {
+ if (this.__test_dir == null)
+ {
+ System.String tmp_dir = SupportClass.AppSettings.Get("java.io.tmpdir", "tmp");
+ this.__test_dir = new System.IO.FileInfo(System.IO.Path.Combine(tmp_dir, "testIndexWriter"));
+
+ bool tmpBool;
+ if (System.IO.File.Exists(this.__test_dir.FullName))
+ tmpBool = true;
+ else
+ tmpBool = System.IO.Directory.Exists(this.__test_dir.FullName);
+ if (tmpBool)
+ {
+ throw new System.IO.IOException("test directory \"" + this.__test_dir.FullName + "\" already exists (please remove by hand)");
+ }
+
+ bool mustThrow = false;
+ try
+ {
+ System.IO.Directory.CreateDirectory(this.__test_dir.FullName);
+ if (!System.IO.Directory.Exists(this.__test_dir.FullName))
+ mustThrow = true;
+ }
+ catch
+ {
+ mustThrow = true;
+ }
+
+ if (mustThrow)
+ throw new System.IO.IOException("unable to create test directory \"" + this.__test_dir.FullName + "\"");
+ }
+ }
+
+ [TearDown]
+ public virtual void TearDown()
+ {
+ if (this.__test_dir != null)
+ {
+ System.IO.FileInfo[] files = SupportClass.FileSupport.GetFiles(this.__test_dir);
+
+ for (int i = 0; i < files.Length; ++i)
+ {
+ bool tmpBool;
+ if (System.IO.File.Exists(files[i].FullName))
+ {
+ System.IO.File.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else if (System.IO.Directory.Exists(files[i].FullName))
+ {
+ System.IO.Directory.Delete(files[i].FullName);
+ tmpBool = true;
+ }
+ else
+ tmpBool = false;
+ if (!tmpBool)
+ {
+ throw new System.IO.IOException("unable to remove file in test directory \"" + this.__test_dir.FullName + "\" (please remove by hand)");
+ }
+ }
+
+ bool tmpBool2;
+ if (System.IO.File.Exists(this.__test_dir.FullName))
+ {
+ System.IO.File.Delete(this.__test_dir.FullName);
+ tmpBool2 = true;
+ }
+ else if (System.IO.Directory.Exists(this.__test_dir.FullName))
+ {
+ System.IO.Directory.Delete(this.__test_dir.FullName);
+ tmpBool2 = true;
+ }
+ else
+ tmpBool2 = false;
+ if (!tmpBool2)
+ {
+ throw new System.IO.IOException("unable to remove test directory \"" + this.__test_dir.FullName + "\" (please remove by hand)");
+ }
+ }
+ }
+
+ [Test]
+ public virtual void _TestIndexWriterLockRelease()
+ {
+ IndexModifier im;
+
+ try
+ {
+ im = new IndexModifier(this.__test_dir, new Lucene.Net.Analysis.Standard.StandardAnalyzer(), false);
+ }
+ catch (System.IO.FileNotFoundException e)
+ {
+ try
+ {
+ im = new IndexModifier(this.__test_dir, new Lucene.Net.Analysis.Standard.StandardAnalyzer(), false);
+ }
+ catch (System.IO.FileNotFoundException e1)
+ {
+ }
+ }
+ }
+ }
+}
\ No newline at end of file