You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/26 23:37:16 UTC

[28/72] [abbrv] [partial] lucenenet git commit: Lucene.Net.Tests: Removed \core directory and put its contents in root directory

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterForceMerge.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterForceMerge.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterForceMerge.cs
new file mode 100644
index 0000000..62de270
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterForceMerge.cs
@@ -0,0 +1,260 @@
+using System;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using NUnit.Framework;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+    using TestUtil = Lucene.Net.Util.TestUtil;
+
+    [TestFixture]
+    public class TestIndexWriterForceMerge : LuceneTestCase
+    {
+        private static readonly FieldType StoredTextType = new FieldType(TextField.TYPE_NOT_STORED);
+
+        [Test]
+        public virtual void TestPartialMerge()
+        {
+            Directory dir = NewDirectory();
+
+            Document doc = new Document();
+            doc.Add(NewStringField("content", "aaa", Field.Store.NO));
+            int incrMin = TEST_NIGHTLY ? 15 : 40;
+            for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt(Random(), incrMin, 5 * incrMin))
+            {
+                LogDocMergePolicy ldmp = new LogDocMergePolicy();
+                ldmp.MinMergeDocs = 1;
+                ldmp.MergeFactor = 5;
+                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
+                for (int j = 0; j < numDocs; j++)
+                {
+                    writer.AddDocument(doc);
+                }
+                writer.Dispose();
+
+                SegmentInfos sis = new SegmentInfos();
+                sis.Read(dir);
+                int segCount = sis.Count;
+
+                ldmp = new LogDocMergePolicy();
+                ldmp.MergeFactor = 5;
+                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(ldmp));
+                writer.ForceMerge(3);
+                writer.Dispose();
+
+                sis = new SegmentInfos();
+                sis.Read(dir);
+                int optSegCount = sis.Count;
+
+                if (segCount < 3)
+                {
+                    Assert.AreEqual(segCount, optSegCount);
+                }
+                else
+                {
+                    Assert.AreEqual(3, optSegCount);
+                }
+            }
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestMaxNumSegments2([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+        {
+            Directory dir = NewDirectory();
+
+            Document doc = new Document();
+            doc.Add(NewStringField("content", "aaa", Field.Store.NO));
+
+            LogDocMergePolicy ldmp = new LogDocMergePolicy();
+            ldmp.MinMergeDocs = 1;
+            ldmp.MergeFactor = 4;
+            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+                            .SetMaxBufferedDocs(2)
+                            .SetMergePolicy(ldmp)
+                            .SetMergeScheduler(scheduler);
+            IndexWriter writer = new IndexWriter(dir, config);
+
+            for (int iter = 0; iter < 10; iter++)
+            {
+                for (int i = 0; i < 19; i++)
+                {
+                    writer.AddDocument(doc);
+                }
+
+                writer.Commit();
+                writer.WaitForMerges();
+                writer.Commit();
+
+                SegmentInfos sis = new SegmentInfos();
+                sis.Read(dir);
+
+                int segCount = sis.Count;
+                writer.ForceMerge(7);
+                writer.Commit();
+                writer.WaitForMerges();
+
+                sis = new SegmentInfos();
+                sis.Read(dir);
+                int optSegCount = sis.Count;
+
+                if (segCount < 7)
+                {
+                    Assert.AreEqual(segCount, optSegCount);
+                }
+                else
+                {
+                    Assert.AreEqual(7, optSegCount, "seg: " + segCount);
+                }
+            }
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        /// <summary>
+        /// Make sure forceMerge doesn't use any more than 1X
+        /// starting index size as its temporary free space
+        /// required.
+        /// </summary>
+        [Test]
+        public virtual void TestForceMergeTempSpaceUsage()
+        {
+            MockDirectoryWrapper dir = NewMockDirectory();
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy()));
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: config1=" + writer.Config);
+            }
+
+            for (int j = 0; j < 500; j++)
+            {
+                AddDocWithIndex(writer, j);
+            }
+            int termIndexInterval = writer.Config.TermIndexInterval;
+            // force one extra segment w/ different doc store so
+            // we see the doc stores get merged
+            writer.Commit();
+            AddDocWithIndex(writer, 500);
+            writer.Dispose();
+
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: start disk usage");
+            }
+            long startDiskUsage = 0;
+            string[] files = dir.ListAll();
+            for (int i = 0; i < files.Length; i++)
+            {
+                startDiskUsage += dir.FileLength(files[i]);
+                if (VERBOSE)
+                {
+                    Console.WriteLine(files[i] + ": " + dir.FileLength(files[i]));
+                }
+            }
+
+            dir.ResetMaxUsedSizeInBytes();
+            dir.TrackDiskUsage = true;
+
+            // Import to use same term index interval else a
+            // smaller one here could increase the disk usage and
+            // cause a false failure:
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetTermIndexInterval(termIndexInterval).SetMergePolicy(NewLogMergePolicy()));
+            writer.ForceMerge(1);
+            writer.Dispose();
+            long maxDiskUsage = dir.MaxUsedSizeInBytes;
+            Assert.IsTrue(maxDiskUsage <= 4 * startDiskUsage, "forceMerge used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4 * startDiskUsage) + " (= 4X starting usage)");
+            dir.Dispose();
+        }
+
+        // Test calling forceMerge(1, false) whereby forceMerge is kicked
+        // off but we don't wait for it to finish (but
+        // writer.Dispose()) does wait
+        [Test]
+        public virtual void TestBackgroundForceMerge()
+        {
+            Directory dir = NewDirectory();
+            for (int pass = 0; pass < 2; pass++)
+            {
+                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(51)));
+                Document doc = new Document();
+                doc.Add(NewStringField("field", "aaa", Field.Store.NO));
+                for (int i = 0; i < 100; i++)
+                {
+                    writer.AddDocument(doc);
+                }
+                writer.ForceMerge(1, false);
+
+                if (0 == pass)
+                {
+                    writer.Dispose();
+                    DirectoryReader reader = DirectoryReader.Open(dir);
+                    Assert.AreEqual(1, reader.Leaves.Count);
+                    reader.Dispose();
+                }
+                else
+                {
+                    // Get another segment to flush so we can verify it is
+                    // NOT included in the merging
+                    writer.AddDocument(doc);
+                    writer.AddDocument(doc);
+                    writer.Dispose();
+
+                    DirectoryReader reader = DirectoryReader.Open(dir);
+                    Assert.IsTrue(reader.Leaves.Count > 1);
+                    reader.Dispose();
+
+                    SegmentInfos infos = new SegmentInfos();
+                    infos.Read(dir);
+                    Assert.AreEqual(2, infos.Count);
+                }
+            }
+
+            dir.Dispose();
+        }
+
+        /// <summary>
+        /// LUCENENET specific
+        ///
+        /// Copied from <seealso cref="TestIndexWriter.AddDoc(IndexWriter)"/>
+        /// to remove inter-class dependency on TestIndexWriter.
+        /// </summary>
+        private void AddDoc(IndexWriter writer)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+            writer.AddDocument(doc);
+        }
+
+        private void AddDocWithIndex(IndexWriter writer, int index)
+        {
+            Document doc = new Document();
+            doc.Add(NewField("content", "aaa " + index, StoredTextType));
+            doc.Add(NewField("id", "" + index, StoredTextType));
+            writer.AddDocument(doc);
+        }
+
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterLockRelease.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterLockRelease.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterLockRelease.cs
new file mode 100644
index 0000000..c0705f0
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterLockRelease.cs
@@ -0,0 +1,64 @@
+using NUnit.Framework;
+using System.IO;
+
+namespace Lucene.Net.Index
+{
+    using Directory = Lucene.Net.Store.Directory;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+
+    /// <summary>
+    /// this tests the patch for issue #LUCENE-715 (IndexWriter does not
+    /// release its write lock when trying to open an index which does not yet
+    /// exist).
+    /// </summary>
+    [TestFixture]
+    public class TestIndexWriterLockRelease : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestIndexWriterLockRelease_Mem()
+        {
+            Directory dir = NewFSDirectory(CreateTempDir("testLockRelease"));
+            try
+            {
+                new IndexWriter(dir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetOpenMode(OpenMode.APPEND));
+            }
+#pragma warning disable 168
+            catch (FileNotFoundException /*| NoSuchFileException*/ e)
+#pragma warning restore 168
+            {
+                try
+                {
+                    new IndexWriter(dir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetOpenMode(OpenMode.APPEND));
+                }
+#pragma warning disable 168
+                catch (FileNotFoundException /*| NoSuchFileException*/ e1)
+#pragma warning restore 168
+                {
+                }
+            }
+            finally
+            {
+                dir.Dispose();
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterMergePolicy.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterMergePolicy.cs
new file mode 100644
index 0000000..a6b95d7
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterMergePolicy.cs
@@ -0,0 +1,311 @@
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using NUnit.Framework;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+
+    [TestFixture]
+    public class TestIndexWriterMergePolicy : LuceneTestCase
+    {
+        // Test the normal case
+        [Test]
+        public virtual void TestNormalCase()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(new LogDocMergePolicy()));
+
+            for (int i = 0; i < 100; i++)
+            {
+                AddDoc(writer);
+                CheckInvariants(writer);
+            }
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // Test to see if there is over merge
+        [Test]
+        public virtual void TestNoOverMerge()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(new LogDocMergePolicy()));
+
+            bool noOverMerge = false;
+            for (int i = 0; i < 100; i++)
+            {
+                AddDoc(writer);
+                CheckInvariants(writer);
+                if (writer.NumBufferedDocuments + writer.SegmentCount >= 18)
+                {
+                    noOverMerge = true;
+                }
+            }
+            Assert.IsTrue(noOverMerge);
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // Test the case where flush is forced after every addDoc
+        [Test]
+        public virtual void TestForceFlush()
+        {
+            Directory dir = NewDirectory();
+
+            LogDocMergePolicy mp = new LogDocMergePolicy();
+            mp.MinMergeDocs = 100;
+            mp.MergeFactor = 10;
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(mp));
+
+            for (int i = 0; i < 100; i++)
+            {
+                AddDoc(writer);
+                writer.Dispose();
+
+                mp = new LogDocMergePolicy();
+                mp.MergeFactor = 10;
+                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(mp));
+                mp.MinMergeDocs = 100;
+                CheckInvariants(writer);
+            }
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // Test the case where mergeFactor changes
+        [Test]
+        public virtual void TestMergeFactorChange()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));
+
+            for (int i = 0; i < 250; i++)
+            {
+                AddDoc(writer);
+                CheckInvariants(writer);
+            }
+
+            ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 5;
+
+            // merge policy only fixes segments on levels where merges
+            // have been triggered, so check invariants after all adds
+            for (int i = 0; i < 10; i++)
+            {
+                AddDoc(writer);
+            }
+            CheckInvariants(writer);
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // Test the case where both mergeFactor and maxBufferedDocs change
+        [Test]
+        public virtual void TestMaxBufferedDocsChange()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));
+
+            // leftmost* segment has 1 doc
+            // rightmost* segment has 100 docs
+            for (int i = 1; i <= 100; i++)
+            {
+                for (int j = 0; j < i; j++)
+                {
+                    AddDoc(writer);
+                    CheckInvariants(writer);
+                }
+                writer.Dispose();
+
+                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));
+            }
+
+            writer.Dispose();
+            LogDocMergePolicy ldmp = new LogDocMergePolicy();
+            ldmp.MergeFactor = 10;
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(ldmp).SetMergeScheduler(new SerialMergeScheduler()));
+
+            // merge policy only fixes segments on levels where merges
+            // have been triggered, so check invariants after all adds
+            for (int i = 0; i < 100; i++)
+            {
+                AddDoc(writer);
+            }
+            CheckInvariants(writer);
+
+            for (int i = 100; i < 1000; i++)
+            {
+                AddDoc(writer);
+            }
+            writer.Commit();
+            writer.WaitForMerges();
+            writer.Commit();
+            CheckInvariants(writer);
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        // Test the case where a merge results in no doc at all
+        [Test]
+        public virtual void TestMergeDocCount0([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+        {
+            Directory dir = NewDirectory();
+
+            LogDocMergePolicy ldmp = new LogDocMergePolicy();
+            ldmp.MergeFactor = 100;
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(ldmp));
+
+            for (int i = 0; i < 250; i++)
+            {
+                AddDoc(writer);
+                CheckInvariants(writer);
+            }
+            writer.Dispose();
+
+            // delete some docs without merging
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
+            writer.DeleteDocuments(new Term("content", "aaa"));
+            writer.Dispose();
+
+            ldmp = new LogDocMergePolicy();
+            ldmp.MergeFactor = 5;
+            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+                .SetOpenMode(OpenMode.APPEND)
+                .SetMaxBufferedDocs(10)
+                .SetMergePolicy(ldmp)
+                .SetMergeScheduler(scheduler);
+            writer = new IndexWriter(dir, config);
+
+            // merge factor is changed, so check invariants after all adds
+            for (int i = 0; i < 10; i++)
+            {
+                AddDoc(writer);
+            }
+            writer.Commit();
+            writer.WaitForMerges();
+            writer.Commit();
+            CheckInvariants(writer);
+            Assert.AreEqual(10, writer.MaxDoc);
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        private void AddDoc(IndexWriter writer)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+            writer.AddDocument(doc);
+        }
+
+        private void CheckInvariants(IndexWriter writer)
+        {
+            writer.WaitForMerges();
+            int maxBufferedDocs = writer.Config.MaxBufferedDocs;
+            int mergeFactor = ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor;
+            int maxMergeDocs = ((LogMergePolicy)writer.Config.MergePolicy).MaxMergeDocs;
+
+            int ramSegmentCount = writer.NumBufferedDocuments;
+            Assert.IsTrue(ramSegmentCount < maxBufferedDocs);
+
+            int lowerBound = -1;
+            int upperBound = maxBufferedDocs;
+            int numSegments = 0;
+
+            int segmentCount = writer.SegmentCount;
+            for (int i = segmentCount - 1; i >= 0; i--)
+            {
+                int docCount = writer.GetDocCount(i);
+                Assert.IsTrue(docCount > lowerBound, "docCount=" + docCount + " lowerBound=" + lowerBound + " upperBound=" + upperBound + " i=" + i + " segmentCount=" + segmentCount + " index=" + writer.SegString() + " config=" + writer.Config);
+
+                if (docCount <= upperBound)
+                {
+                    numSegments++;
+                }
+                else
+                {
+                    if (upperBound * mergeFactor <= maxMergeDocs)
+                    {
+                        Assert.IsTrue(numSegments < mergeFactor, "maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor + "; segs=" + writer.SegString() + " config=" + writer.Config);
+                    }
+
+                    do
+                    {
+                        lowerBound = upperBound;
+                        upperBound *= mergeFactor;
+                    } while (docCount > upperBound);
+                    numSegments = 1;
+                }
+            }
+            if (upperBound * mergeFactor <= maxMergeDocs)
+            {
+                Assert.IsTrue(numSegments < mergeFactor);
+            }
+        }
+
+        private const double EPSILON = 1E-14;
+
+        [Test]
+        public virtual void TestSetters()
+        {
+            AssertSetters(new LogByteSizeMergePolicy());
+            AssertSetters(new LogDocMergePolicy());
+        }
+
+        private void AssertSetters(MergePolicy lmp)
+        {
+            lmp.MaxCFSSegmentSizeMB = 2.0;
+            Assert.AreEqual(2.0, lmp.MaxCFSSegmentSizeMB, EPSILON);
+
+            lmp.MaxCFSSegmentSizeMB = double.PositiveInfinity;
+            Assert.AreEqual(long.MaxValue / 1024 / 1024.0, lmp.MaxCFSSegmentSizeMB, EPSILON * long.MaxValue);
+
+            lmp.MaxCFSSegmentSizeMB = long.MaxValue / 1024 / 1024.0;
+            Assert.AreEqual(long.MaxValue / 1024 / 1024.0, lmp.MaxCFSSegmentSizeMB, EPSILON * long.MaxValue);
+
+            try
+            {
+                lmp.MaxCFSSegmentSizeMB = -2.0;
+                Assert.Fail("Didn't throw IllegalArgumentException");
+            }
+#pragma warning disable 168
+            catch (System.ArgumentException iae)
+#pragma warning restore 168
+            {
+                // pass
+            }
+
+            // TODO: Add more checks for other non-double setters!
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterMerging.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterMerging.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterMerging.cs
new file mode 100644
index 0000000..37fc3cd
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterMerging.cs
@@ -0,0 +1,488 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Threading;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using Attributes;
+    using Lucene.Net.Support;
+    using NUnit.Framework;
+
+    /*
+        /// Copyright 2006 The Apache Software Foundation
+        ///
+        /// Licensed under the Apache License, Version 2.0 (the "License");
+        /// you may not use this file except in compliance with the License.
+        /// You may obtain a copy of the License at
+        ///
+        ///     http://www.apache.org/licenses/LICENSE-2.0
+        ///
+        /// Unless required by applicable law or agreed to in writing, software
+        /// distributed under the License is distributed on an "AS IS" BASIS,
+        /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        /// See the License for the specific language governing permissions and
+        /// limitations under the License.
+        */
+
+    using AlreadyClosedException = Lucene.Net.Store.AlreadyClosedException;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using FieldType = FieldType;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using TextField = TextField;
+
+    [TestFixture]
+    public class TestIndexWriterMerging : LuceneTestCase
+    {
+        /// <summary>
+        /// Tests that index merging (specifically addIndexes(Directory...)) doesn't
+        /// change the index order of documents.
+        /// </summary>
+        [Test]
+        public virtual void TestLucene()
+        {
+            int num = 100;
+
+            Directory indexA = NewDirectory();
+            Directory indexB = NewDirectory();
+
+            FillIndex(Random(), indexA, 0, num);
+            bool fail = VerifyIndex(indexA, 0);
+            if (fail)
+            {
+                Assert.Fail("Index a is invalid");
+            }
+
+            FillIndex(Random(), indexB, num, num);
+            fail = VerifyIndex(indexB, num);
+            if (fail)
+            {
+                Assert.Fail("Index b is invalid");
+            }
+
+            Directory merged = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(merged, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy(2)));
+            writer.AddIndexes(indexA, indexB);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            fail = VerifyIndex(merged, 0);
+
+            Assert.IsFalse(fail, "The merged index is invalid");
+            indexA.Dispose();
+            indexB.Dispose();
+            merged.Dispose();
+        }
+
+        private bool VerifyIndex(Directory directory, int startAt)
+        {
+            bool fail = false;
+            IndexReader reader = DirectoryReader.Open(directory);
+
+            int max = reader.MaxDoc;
+            for (int i = 0; i < max; i++)
+            {
+                Document temp = reader.Document(i);
+                //System.out.println("doc "+i+"="+temp.GetField("count").StringValue);
+                //compare the index doc number to the value that it should be
+                if (!temp.GetField("count").GetStringValue().Equals((i + startAt) + ""))
+                {
+                    fail = true;
+                    Console.WriteLine("Document " + (i + startAt) + " is returning document " + temp.GetField("count").GetStringValue());
+                }
+            }
+            reader.Dispose();
+            return fail;
+        }
+
+        private void FillIndex(Random random, Directory dir, int start, int numDocs)
+        {
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(2)));
+
+            for (int i = start; i < (start + numDocs); i++)
+            {
+                Document temp = new Document();
+                temp.Add(NewStringField("count", ("" + i), Field.Store.YES));
+
+                writer.AddDocument(temp);
+            }
+            writer.Dispose();
+        }
+
+        // LUCENE-325: test forceMergeDeletes, when 2 singular merges
+        // are required
+        [Test]
+        public virtual void TestForceMergeDeletes()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
+            Document document = new Document();
+
+            FieldType customType = new FieldType();
+            customType.IsStored = true;
+
+            FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
+            customType1.IsTokenized = false;
+            customType1.StoreTermVectors = true;
+            customType1.StoreTermVectorPositions = true;
+            customType1.StoreTermVectorOffsets = true;
+
+            Field idField = NewStringField("id", "", Field.Store.NO);
+            document.Add(idField);
+            Field storedField = NewField("stored", "stored", customType);
+            document.Add(storedField);
+            Field termVectorField = NewField("termVector", "termVector", customType1);
+            document.Add(termVectorField);
+            for (int i = 0; i < 10; i++)
+            {
+                idField.SetStringValue("" + i);
+                writer.AddDocument(document);
+            }
+            writer.Dispose();
+
+            IndexReader ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(10, ir.MaxDoc);
+            Assert.AreEqual(10, ir.NumDocs);
+            ir.Dispose();
+
+            IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+            writer = new IndexWriter(dir, dontMergeConfig);
+            writer.DeleteDocuments(new Term("id", "0"));
+            writer.DeleteDocuments(new Term("id", "7"));
+            writer.Dispose();
+
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(8, ir.NumDocs);
+            ir.Dispose();
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy()));
+            Assert.AreEqual(8, writer.NumDocs);
+            Assert.AreEqual(10, writer.MaxDoc);
+            writer.ForceMergeDeletes();
+            Assert.AreEqual(8, writer.NumDocs);
+            writer.Dispose();
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(8, ir.MaxDoc);
+            Assert.AreEqual(8, ir.NumDocs);
+            ir.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-325: test forceMergeDeletes, when many adjacent merges are required
+        [Test]
+        public virtual void TestForceMergeDeletes2()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetMergePolicy(NewLogMergePolicy(50)));
+
+            Document document = new Document();
+
+            FieldType customType = new FieldType();
+            customType.IsStored = true;
+
+            FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
+            customType1.IsTokenized = false;
+            customType1.StoreTermVectors = true;
+            customType1.StoreTermVectorPositions = true;
+            customType1.StoreTermVectorOffsets = true;
+
+            Field storedField = NewField("stored", "stored", customType);
+            document.Add(storedField);
+            Field termVectorField = NewField("termVector", "termVector", customType1);
+            document.Add(termVectorField);
+            Field idField = NewStringField("id", "", Field.Store.NO);
+            document.Add(idField);
+            for (int i = 0; i < 98; i++)
+            {
+                idField.SetStringValue("" + i);
+                writer.AddDocument(document);
+            }
+            writer.Dispose();
+
+            IndexReader ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(98, ir.MaxDoc);
+            Assert.AreEqual(98, ir.NumDocs);
+            ir.Dispose();
+
+            IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+            writer = new IndexWriter(dir, dontMergeConfig);
+            for (int i = 0; i < 98; i += 2)
+            {
+                writer.DeleteDocuments(new Term("id", "" + i));
+            }
+            writer.Dispose();
+
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(49, ir.NumDocs);
+            ir.Dispose();
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy(3)));
+            Assert.AreEqual(49, writer.NumDocs);
+            writer.ForceMergeDeletes();
+            writer.Dispose();
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(49, ir.MaxDoc);
+            Assert.AreEqual(49, ir.NumDocs);
+            ir.Dispose();
+            dir.Dispose();
+        }
+
+        // LUCENE-325: test forceMergeDeletes without waiting, when
+        // many adjacent merges are required
+        [Test]
+        public virtual void TestForceMergeDeletes3()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetMergePolicy(NewLogMergePolicy(50)));
+
+            FieldType customType = new FieldType();
+            customType.IsStored = true;
+
+            FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
+            customType1.IsTokenized = false;
+            customType1.StoreTermVectors = true;
+            customType1.StoreTermVectorPositions = true;
+            customType1.StoreTermVectorOffsets = true;
+
+            Document document = new Document();
+            Field storedField = NewField("stored", "stored", customType);
+            document.Add(storedField);
+            Field termVectorField = NewField("termVector", "termVector", customType1);
+            document.Add(termVectorField);
+            Field idField = NewStringField("id", "", Field.Store.NO);
+            document.Add(idField);
+            for (int i = 0; i < 98; i++)
+            {
+                idField.SetStringValue("" + i);
+                writer.AddDocument(document);
+            }
+            writer.Dispose();
+
+            IndexReader ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(98, ir.MaxDoc);
+            Assert.AreEqual(98, ir.NumDocs);
+            ir.Dispose();
+
+            IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+            writer = new IndexWriter(dir, dontMergeConfig);
+            for (int i = 0; i < 98; i += 2)
+            {
+                writer.DeleteDocuments(new Term("id", "" + i));
+            }
+            writer.Dispose();
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(49, ir.NumDocs);
+            ir.Dispose();
+
+            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy(3)));
+            writer.ForceMergeDeletes(false);
+            writer.Dispose();
+            ir = DirectoryReader.Open(dir);
+            Assert.AreEqual(49, ir.MaxDoc);
+            Assert.AreEqual(49, ir.NumDocs);
+            ir.Dispose();
+            dir.Dispose();
+        }
+
+        // Just intercepts all merges & verifies that we are never
+        // merging a segment with >= 20 (maxMergeDocs) docs
+        private class MyMergeScheduler : MergeScheduler
+        {
+            private readonly TestIndexWriterMerging OuterInstance;
+
+            public MyMergeScheduler(TestIndexWriterMerging outerInstance)
+            {
+                this.OuterInstance = outerInstance;
+            }
+
+            public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound)
+            {
+                lock (this)
+                {
+                    while (true)
+                    {
+                        MergePolicy.OneMerge merge = writer.NextMerge();
+                        if (merge == null)
+                        {
+                            break;
+                        }
+                        for (int i = 0; i < merge.Segments.Count; i++)
+                        {
+                            Debug.Assert(merge.Segments[i].Info.DocCount < 20);
+                        }
+                        writer.Merge(merge);
+                    }
+                }
+            }
+
+            protected override void Dispose(bool disposing)
+            {
+            }
+        }
+
+        // LUCENE-1013
+        [Test]
+        public virtual void TestSetMaxMergeDocs()
+        {
+            Directory dir = NewDirectory();
+            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(new MyMergeScheduler(this)).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy());
+            LogMergePolicy lmp = (LogMergePolicy)conf.MergePolicy;
+            lmp.MaxMergeDocs = 20;
+            lmp.MergeFactor = 2;
+            IndexWriter iw = new IndexWriter(dir, conf);
+            Document document = new Document();
+
+            FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
+            customType.StoreTermVectors = true;
+
+            document.Add(NewField("tvtest", "a b c", customType));
+            for (int i = 0; i < 177; i++)
+            {
+                iw.AddDocument(document);
+            }
+            iw.Dispose();
+            dir.Dispose();
+        }
+
+#if !NETSTANDARD
+        // LUCENENET: There is no Timeout on NUnit for .NET Core.
+        [Timeout(80000)]
+#endif
+        [Test, HasTimeout]
+        public virtual void TestNoWaitClose()
+        {
+            Directory directory = NewDirectory();
+
+            Document doc = new Document();
+            FieldType customType = new FieldType(TextField.TYPE_STORED);
+            customType.IsTokenized = false;
+
+            Field idField = NewField("id", "", customType);
+            doc.Add(idField);
+
+            for (int pass = 0; pass < 2; pass++)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TEST: pass=" + pass);
+                }
+
+                IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy());
+                if (pass == 2)
+                {
+                    conf.SetMergeScheduler(new SerialMergeScheduler());
+                }
+
+                IndexWriter writer = new IndexWriter(directory, conf);
+                ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 100;
+
+                for (int iter = 0; iter < 10; iter++)
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("TEST: iter=" + iter);
+                    }
+                    for (int j = 0; j < 199; j++)
+                    {
+                        idField.SetStringValue(Convert.ToString(iter * 201 + j));
+                        writer.AddDocument(doc);
+                    }
+
+                    int delID = iter * 199;
+                    for (int j = 0; j < 20; j++)
+                    {
+                        writer.DeleteDocuments(new Term("id", Convert.ToString(delID)));
+                        delID += 5;
+                    }
+
+                    // Force a bunch of merge threads to kick off so we
+                    // stress out aborting them on close:
+                    ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 2;
+
+                    IndexWriter finalWriter = writer;
+                    List<Exception> failure = new List<Exception>();
+                    ThreadClass t1 = new ThreadAnonymousInnerClassHelper(this, doc, finalWriter, failure);
+
+                    if (failure.Count > 0)
+                    {
+                        throw failure[0];
+                    }
+
+                    t1.Start();
+
+                    writer.Dispose(false);
+                    t1.Join();
+
+                    // Make sure reader can read
+                    IndexReader reader = DirectoryReader.Open(directory);
+                    reader.Dispose();
+
+                    // Reopen
+                    writer = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMergePolicy(NewLogMergePolicy()));
+                }
+                writer.Dispose();
+            }
+
+            directory.Dispose();
+        }
+
+        private class ThreadAnonymousInnerClassHelper : ThreadClass
+        {
+            private readonly TestIndexWriterMerging OuterInstance;
+
+            private Document Doc;
+            private IndexWriter FinalWriter;
+            private List<Exception> Failure;
+
+            public ThreadAnonymousInnerClassHelper(TestIndexWriterMerging outerInstance, Document doc, IndexWriter finalWriter, List<Exception> failure)
+            {
+                this.OuterInstance = outerInstance;
+                this.Doc = doc;
+                this.FinalWriter = finalWriter;
+                this.Failure = failure;
+            }
+
+            public override void Run()
+            {
+                bool done = false;
+                while (!done)
+                {
+                    for (int i = 0; i < 100; i++)
+                    {
+                        try
+                        {
+                            FinalWriter.AddDocument(Doc);
+                        }
+#pragma warning disable 168
+                        catch (AlreadyClosedException e)
+#pragma warning restore 168
+                        {
+                            done = true;
+                            break;
+                        }
+#pragma warning disable 168
+                        catch (System.NullReferenceException e)
+#pragma warning restore 168
+                        {
+                            done = true;
+                            break;
+                        }
+                        catch (Exception e)
+                        {
+                            Console.WriteLine(e.StackTrace);
+                            Failure.Add(e);
+                            done = true;
+                            break;
+                        }
+                    }
+                    Thread.Sleep(0);
+                }
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterNRTIsCurrent.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterNRTIsCurrent.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterNRTIsCurrent.cs
new file mode 100644
index 0000000..3866d4d
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterNRTIsCurrent.cs
@@ -0,0 +1,260 @@
+using System;
+using System.Threading;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using Lucene.Net.Randomized.Generators;
+    using Lucene.Net.Support;
+    using NUnit.Framework;
+    using System.IO;
+    using BytesRef = Lucene.Net.Util.BytesRef;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements. See the NOTICE file distributed with this
+         * work for additional information regarding copyright ownership. The ASF
+         * licenses this file to You under the Apache License, Version 2.0 (the
+         * "License"); you may not use this file except in compliance with the License.
+         * You may obtain a copy of the License at
+         *
+         * http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+         * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+         * License for the specific language governing permissions and limitations under
+         * the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using TextField = TextField;
+
+    [TestFixture]
+    public class TestIndexWriterNRTIsCurrent : LuceneTestCase
+    {
+        public class ReaderHolder
+        {
+            internal volatile DirectoryReader Reader;
+            internal volatile bool Stop = false;
+        }
+
+        [Test]
+        public virtual void TestIsCurrentWithThreads()
+        {
+            Directory dir = NewDirectory();
+            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            IndexWriter writer = new IndexWriter(dir, conf);
+            ReaderHolder holder = new ReaderHolder();
+            ReaderThread[] threads = new ReaderThread[AtLeast(3)];
+            CountdownEvent latch = new CountdownEvent(1);
+            WriterThread writerThread = new WriterThread(holder, writer, AtLeast(500), Random(), latch);
+            for (int i = 0; i < threads.Length; i++)
+            {
+                threads[i] = new ReaderThread(holder, latch);
+                threads[i].Start();
+            }
+            writerThread.Start();
+
+            writerThread.Join();
+            bool failed = writerThread.Failed != null;
+            if (failed)
+            {
+                Console.WriteLine(writerThread.Failed.ToString());
+                Console.Write(writerThread.Failed.StackTrace);
+            }
+            for (int i = 0; i < threads.Length; i++)
+            {
+                threads[i].Join();
+                if (threads[i].Failed != null)
+                {
+                    Console.WriteLine(threads[i].Failed.ToString());
+                    Console.Write(threads[i].Failed.StackTrace);
+                    failed = true;
+                }
+            }
+            Assert.IsFalse(failed);
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        public class WriterThread : ThreadClass
+        {
+            internal readonly ReaderHolder Holder;
+            internal readonly IndexWriter Writer;
+            internal readonly int NumOps;
+            internal bool Countdown = true;
+            internal readonly CountdownEvent Latch;
+            internal Exception Failed;
+
+            internal WriterThread(ReaderHolder holder, IndexWriter writer, int numOps, Random random, CountdownEvent latch)
+                : base()
+            {
+                this.Holder = holder;
+                this.Writer = writer;
+                this.NumOps = numOps;
+                this.Latch = latch;
+            }
+
+            public override void Run()
+            {
+                DirectoryReader currentReader = null;
+                Random random = LuceneTestCase.Random();
+                try
+                {
+                    Document doc = new Document();
+                    doc.Add(new TextField("id", "1", Field.Store.NO));
+                    Writer.AddDocument(doc);
+                    Holder.Reader = currentReader = Writer.GetReader(true);
+                    Term term = new Term("id");
+                    for (int i = 0; i < NumOps && !Holder.Stop; i++)
+                    {
+                        float nextOp = (float)random.NextDouble();
+                        if (nextOp < 0.3)
+                        {
+                            term.Set("id", new BytesRef("1"));
+                            Writer.UpdateDocument(term, doc);
+                        }
+                        else if (nextOp < 0.5)
+                        {
+                            Writer.AddDocument(doc);
+                        }
+                        else
+                        {
+                            term.Set("id", new BytesRef("1"));
+                            Writer.DeleteDocuments(term);
+                        }
+                        if (Holder.Reader != currentReader)
+                        {
+                            Holder.Reader = currentReader;
+                            if (Countdown)
+                            {
+                                Countdown = false;
+                                Latch.Signal();
+                            }
+                        }
+                        if (random.NextBoolean())
+                        {
+                            Writer.Commit();
+                            DirectoryReader newReader = DirectoryReader.OpenIfChanged(currentReader);
+                            if (newReader != null)
+                            {
+                                currentReader.DecRef();
+                                currentReader = newReader;
+                            }
+                            if (currentReader.NumDocs == 0)
+                            {
+                                Writer.AddDocument(doc);
+                            }
+                        }
+                    }
+                }
+                catch (Exception e)
+                {
+                    Failed = e;
+                }
+                finally
+                {
+                    Holder.Reader = null;
+                    if (Countdown)
+                    {
+                        Latch.Signal();
+                    }
+                    if (currentReader != null)
+                    {
+                        try
+                        {
+                            currentReader.DecRef();
+                        }
+#pragma warning disable 168
+                        catch (IOException e)
+#pragma warning restore 168
+                        {
+                        }
+                    }
+                }
+                if (VERBOSE)
+                {
+                    Console.WriteLine("writer stopped - forced by reader: " + Holder.Stop);
+                }
+            }
+        }
+
+        public sealed class ReaderThread : ThreadClass
+        {
+            internal readonly ReaderHolder Holder;
+            internal readonly CountdownEvent Latch;
+            internal Exception Failed;
+
+            internal ReaderThread(ReaderHolder holder, CountdownEvent latch)
+                : base()
+            {
+                this.Holder = holder;
+                this.Latch = latch;
+            }
+
+            public override void Run()
+            {
+#if !NETSTANDARD
+                try
+                {
+#endif
+                    Latch.Wait();
+#if !NETSTANDARD
+                }
+                catch (ThreadInterruptedException e)
+                {
+                    Failed = e;
+                    return;
+                }
+#endif
+                DirectoryReader reader;
+                while ((reader = Holder.Reader) != null)
+                {
+                    if (reader.TryIncRef())
+                    {
+                        try
+                        {
+                            bool current = reader.IsCurrent;
+                            if (VERBOSE)
+                            {
+                                Console.WriteLine("Thread: " + Thread.CurrentThread + " Reader: " + reader + " isCurrent:" + current);
+                            }
+
+                            Assert.IsFalse(current);
+                        }
+                        catch (Exception e)
+                        {
+                            if (VERBOSE)
+                            {
+                                Console.WriteLine("FAILED Thread: " + Thread.CurrentThread + " Reader: " + reader + " isCurrent: false");
+                            }
+                            Failed = e;
+                            Holder.Stop = true;
+                            return;
+                        }
+                        finally
+                        {
+                            try
+                            {
+                                reader.DecRef();
+                            }
+                            catch (IOException e)
+                            {
+                                if (Failed == null)
+                                {
+                                    Failed = e;
+                                }
+                            }
+                        }
+                        return;
+                    }
+                }
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestIndexWriterOnDiskFull.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestIndexWriterOnDiskFull.cs b/src/Lucene.Net.Tests/Index/TestIndexWriterOnDiskFull.cs
new file mode 100644
index 0000000..eae0626
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestIndexWriterOnDiskFull.cs
@@ -0,0 +1,703 @@
+using System;
+using System.Diagnostics;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using NUnit.Framework;
+    using System.IO;
+    using Util;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using Field = Field;
+    using FieldType = FieldType;
+    using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+    using NumericDocValuesField = NumericDocValuesField;
+    using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+    using ScoreDoc = Lucene.Net.Search.ScoreDoc;
+    using TermQuery = Lucene.Net.Search.TermQuery;
+    using TestUtil = Lucene.Net.Util.TestUtil;
+    using TextField = TextField;
+
+    /// <summary>
+    /// Tests for IndexWriter when the disk runs out of space
+    /// </summary>
+    [TestFixture]
+    public class TestIndexWriterOnDiskFull : LuceneTestCase
+    {
+        /*
+         * Make sure IndexWriter cleans up on hitting a disk
+         * full exception in addDocument.
+         * TODO: how to do this on windows with FSDirectory?
+         */
+
+        [Test]
+        public virtual void TestAddDocumentOnDiskFull()
+        {
+            for (int pass = 0; pass < 2; pass++)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TEST: pass=" + pass);
+                }
+                bool doAbort = pass == 1;
+                long diskFree = TestUtil.NextInt(Random(), 100, 300);
+                while (true)
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("TEST: cycle: diskFree=" + diskFree);
+                    }
+                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
+                    dir.MaxSizeInBytes = diskFree;
+                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+                    IMergeScheduler ms = writer.Config.MergeScheduler;
+                    if (ms is IConcurrentMergeScheduler)
+                    {
+                        // this test intentionally produces exceptions
+                        // in the threads that CMS launches; we don't
+                        // want to pollute test output with these.
+                        ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
+                    }
+
+                    bool hitError = false;
+                    try
+                    {
+                        for (int i = 0; i < 200; i++)
+                        {
+                            AddDoc(writer);
+                        }
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("TEST: done adding docs; now commit");
+                        }
+                        writer.Commit();
+                    }
+                    catch (IOException e)
+                    {
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("TEST: exception on addDoc");
+                            Console.WriteLine(e.StackTrace);
+                        }
+                        hitError = true;
+                    }
+
+                    if (hitError)
+                    {
+                        if (doAbort)
+                        {
+                            if (VERBOSE)
+                            {
+                                Console.WriteLine("TEST: now rollback");
+                            }
+                            writer.Rollback();
+                        }
+                        else
+                        {
+                            try
+                            {
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("TEST: now close");
+                                }
+                                writer.Dispose();
+                            }
+                            catch (IOException e)
+                            {
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("TEST: exception on close; retry w/ no disk space limit");
+                                    Console.WriteLine(e.StackTrace);
+                                }
+                                dir.MaxSizeInBytes = 0;
+                                writer.Dispose();
+                            }
+                        }
+
+                        //TestUtil.SyncConcurrentMerges(ms);
+
+                        if (TestUtil.AnyFilesExceptWriteLock(dir))
+                        {
+                            TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument");
+
+                            // Make sure reader can open the index:
+                            DirectoryReader.Open(dir).Dispose();
+                        }
+
+                        dir.Dispose();
+                        // Now try again w/ more space:
+
+                        diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000);
+                    }
+                    else
+                    {
+                        //TestUtil.SyncConcurrentMerges(writer);
+                        dir.MaxSizeInBytes = 0;
+                        writer.Dispose();
+                        dir.Dispose();
+                        break;
+                    }
+                }
+            }
+        }
+
+        // TODO: make @Nightly variant that provokes more disk
+        // fulls
+
+        // TODO: have test fail if on any given top
+        // iter there was not a single IOE hit
+
+        /*
+        Test: make sure when we run out of disk space or hit
+        random IOExceptions in any of the addIndexes(*) calls
+        that 1) index is not corrupt (searcher can open/search
+        it) and 2) transactional semantics are followed:
+        either all or none of the incoming documents were in
+        fact added.
+         */
+
+        [Test]
+        public virtual void TestAddIndexOnDiskFull()
+        {
+            // MemoryCodec, since it uses FST, is not necessarily
+            // "additive", ie if you add up N small FSTs, then merge
+            // them, the merged result can easily be larger than the
+            // sum because the merged FST may use array encoding for
+            // some arcs (which uses more space):
+
+            string idFormat = TestUtil.GetPostingsFormat("id");
+            string contentFormat = TestUtil.GetPostingsFormat("content");
+            AssumeFalse("this test cannot run with Memory codec", idFormat.Equals("Memory") || contentFormat.Equals("Memory"));
+
+            int START_COUNT = 57;
+            int NUM_DIR = TEST_NIGHTLY ? 50 : 5;
+            int END_COUNT = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);
+
+            // Build up a bunch of dirs that have indexes which we
+            // will then merge together by calling addIndexes(*):
+            Directory[] dirs = new Directory[NUM_DIR];
+            long inputDiskUsage = 0;
+            for (int i = 0; i < NUM_DIR; i++)
+            {
+                dirs[i] = NewDirectory();
+                IndexWriter writer = new IndexWriter(dirs[i], NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+                for (int j = 0; j < 25; j++)
+                {
+                    AddDocWithIndex(writer, 25 * i + j);
+                }
+                writer.Dispose();
+                string[] files = dirs[i].ListAll();
+                for (int j = 0; j < files.Length; j++)
+                {
+                    inputDiskUsage += dirs[i].FileLength(files[j]);
+                }
+            }
+
+            // Now, build a starting index that has START_COUNT docs.  We
+            // will then try to addIndexes into a copy of this:
+            MockDirectoryWrapper startDir = NewMockDirectory();
+            IndexWriter indWriter = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+            for (int j = 0; j < START_COUNT; j++)
+            {
+                AddDocWithIndex(indWriter, j);
+            }
+            indWriter.Dispose();
+
+            // Make sure starting index seems to be working properly:
+            Term searchTerm = new Term("content", "aaa");
+            IndexReader reader = DirectoryReader.Open(startDir);
+            Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");
+
+            IndexSearcher searcher = NewSearcher(reader);
+            ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
+            Assert.AreEqual(57, hits.Length, "first number of hits");
+            reader.Dispose();
+
+            // Iterate with larger and larger amounts of free
+            // disk space.  With little free disk space,
+            // addIndexes will certainly run out of space &
+            // fail.  Verify that when this happens, index is
+            // not corrupt and index in fact has added no
+            // documents.  Then, we increase disk space by 2000
+            // bytes each iteration.  At some point there is
+            // enough free disk space and addIndexes should
+            // succeed and index should show all documents were
+            // added.
+
+            // String[] files = startDir.ListAll();
+            long diskUsage = startDir.SizeInBytes();
+
+            long startDiskUsage = 0;
+            string[] files_ = startDir.ListAll();
+            for (int i = 0; i < files_.Length; i++)
+            {
+                startDiskUsage += startDir.FileLength(files_[i]);
+            }
+
+            for (int iter = 0; iter < 3; iter++)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TEST: iter=" + iter);
+                }
+
+                // Start with 100 bytes more than we are currently using:
+                long diskFree = diskUsage + TestUtil.NextInt(Random(), 50, 200);
+
+                int method = iter;
+
+                bool success = false;
+                bool done = false;
+
+                string methodName;
+                if (0 == method)
+                {
+                    methodName = "addIndexes(Directory[]) + forceMerge(1)";
+                }
+                else if (1 == method)
+                {
+                    methodName = "addIndexes(IndexReader[])";
+                }
+                else
+                {
+                    methodName = "addIndexes(Directory[])";
+                }
+
+                while (!done)
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("TEST: cycle...");
+                    }
+
+                    // Make a new dir that will enforce disk usage:
+                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random())));
+                    indWriter = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMergePolicy(NewLogMergePolicy(false)));
+                    IOException err = null;
+
+                    IMergeScheduler ms = indWriter.Config.MergeScheduler;
+                    for (int x = 0; x < 2; x++)
+                    {
+                        if (ms is IConcurrentMergeScheduler)
+                        // this test intentionally produces exceptions
+                        // in the threads that CMS launches; we don't
+                        // want to pollute test output with these.
+                        {
+                            if (0 == x)
+                            {
+                                ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
+                            }
+                            else
+                            {
+                                ((IConcurrentMergeScheduler)ms).ClearSuppressExceptions();
+                            }
+                        }
+
+                        // Two loops: first time, limit disk space &
+                        // throw random IOExceptions; second time, no
+                        // disk space limit:
+
+                        double rate = 0.05;
+                        double diskRatio = ((double)diskFree) / diskUsage;
+                        long thisDiskFree;
+
+                        string testName = null;
+
+                        if (0 == x)
+                        {
+                            dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01;
+                            thisDiskFree = diskFree;
+                            if (diskRatio >= 2.0)
+                            {
+                                rate /= 2;
+                            }
+                            if (diskRatio >= 4.0)
+                            {
+                                rate /= 2;
+                            }
+                            if (diskRatio >= 6.0)
+                            {
+                                rate = 0.0;
+                            }
+                            if (VERBOSE)
+                            {
+                                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
+                            }
+                        }
+                        else
+                        {
+                            dir.RandomIOExceptionRateOnOpen = 0.0;
+                            thisDiskFree = 0;
+                            rate = 0.0;
+                            if (VERBOSE)
+                            {
+                                testName = "disk full test " + methodName + " with unlimited disk space";
+                            }
+                        }
+
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("\ncycle: " + testName);
+                        }
+
+                        dir.TrackDiskUsage = true;
+                        dir.MaxSizeInBytes = thisDiskFree;
+                        dir.RandomIOExceptionRate = rate;
+
+                        try
+                        {
+                            if (0 == method)
+                            {
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("TEST: now addIndexes count=" + dirs.Length);
+                                }
+                                indWriter.AddIndexes(dirs);
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("TEST: now forceMerge");
+                                }
+                                indWriter.ForceMerge(1);
+                            }
+                            else if (1 == method)
+                            {
+                                IndexReader[] readers = new IndexReader[dirs.Length];
+                                for (int i = 0; i < dirs.Length; i++)
+                                {
+                                    readers[i] = DirectoryReader.Open(dirs[i]);
+                                }
+                                try
+                                {
+                                    indWriter.AddIndexes(readers);
+                                }
+                                finally
+                                {
+                                    for (int i = 0; i < dirs.Length; i++)
+                                    {
+                                        readers[i].Dispose();
+                                    }
+                                }
+                            }
+                            else
+                            {
+                                indWriter.AddIndexes(dirs);
+                            }
+
+                            success = true;
+                            if (VERBOSE)
+                            {
+                                Console.WriteLine("  success!");
+                            }
+
+                            if (0 == x)
+                            {
+                                done = true;
+                            }
+                        }
+                        catch (IOException e)
+                        {
+                            success = false;
+                            err = e;
+                            if (VERBOSE)
+                            {
+                                Console.WriteLine("  hit IOException: " + e);
+                                Console.WriteLine(e.StackTrace);
+                            }
+
+                            if (1 == x)
+                            {
+                                Console.WriteLine(e.StackTrace);
+                                Assert.Fail(methodName + " hit IOException after disk space was freed up");
+                            }
+                        }
+
+                        // Make sure all threads from
+                        // ConcurrentMergeScheduler are done
+                        TestUtil.SyncConcurrentMerges(indWriter);
+
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("  now test readers");
+                        }
+
+                        // Finally, verify index is not corrupt, and, if
+                        // we succeeded, we see all docs added, and if we
+                        // failed, we see either all docs or no docs added
+                        // (transactional semantics):
+                        dir.RandomIOExceptionRateOnOpen = 0.0;
+                        try
+                        {
+                            reader = DirectoryReader.Open(dir);
+                        }
+                        catch (IOException e)
+                        {
+                            Console.WriteLine(e.StackTrace);
+                            Assert.Fail(testName + ": exception when creating IndexReader: " + e);
+                        }
+                        int result = reader.DocFreq(searchTerm);
+                        if (success)
+                        {
+                            if (result != START_COUNT)
+                            {
+                                Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
+                            }
+                        }
+                        else
+                        {
+                            // On hitting exception we still may have added
+                            // all docs:
+                            if (result != START_COUNT && result != END_COUNT)
+                            {
+                                Console.WriteLine(err.StackTrace);
+                                Assert.Fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+                            }
+                        }
+
+                        searcher = NewSearcher(reader);
+                        try
+                        {
+                            hits = searcher.Search(new TermQuery(searchTerm), null, END_COUNT).ScoreDocs;
+                        }
+                        catch (IOException e)
+                        {
+                            Console.WriteLine(e.StackTrace);
+                            Assert.Fail(testName + ": exception when searching: " + e);
+                        }
+                        int result2 = hits.Length;
+                        if (success)
+                        {
+                            if (result2 != result)
+                            {
+                                Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+                            }
+                        }
+                        else
+                        {
+                            // On hitting exception we still may have added
+                            // all docs:
+                            if (result2 != result)
+                            {
+                                Console.WriteLine(err.StackTrace);
+                                Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
+                            }
+                        }
+
+                        reader.Dispose();
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("  count is " + result);
+                        }
+
+                        if (done || result == END_COUNT)
+                        {
+                            break;
+                        }
+                    }
+
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("  start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.MaxUsedSizeInBytes);
+                    }
+
+                    if (done)
+                    {
+                        // Javadocs state that temp free Directory space
+                        // required is at most 2X total input size of
+                        // indices so let's make sure:
+                        Assert.IsTrue((dir.MaxUsedSizeInBytes - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage), "max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.MaxUsedSizeInBytes - startDiskUsage) + " bytes vs limit=" + (2 * (startDiskUsage + inputDiskUsage)) + "; starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes");
+                    }
+
+                    // Make sure we don't hit disk full during close below:
+                    dir.MaxSizeInBytes = 0;
+                    dir.RandomIOExceptionRate = 0.0;
+                    dir.RandomIOExceptionRateOnOpen = 0.0;
+
+                    indWriter.Dispose();
+
+                    // Wait for all BG threads to finish else
+                    // dir.Dispose() will throw IOException because
+                    // there are still open files
+                    TestUtil.SyncConcurrentMerges(ms);
+
+                    dir.Dispose();
+
+                    // Try again with more free space:
+                    diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 4000, 8000) : TestUtil.NextInt(Random(), 40000, 80000);
+                }
+            }
+
+            startDir.Dispose();
+            foreach (Directory dir in dirs)
+            {
+                dir.Dispose();
+            }
+        }
+
+        private class FailTwiceDuringMerge : MockDirectoryWrapper.Failure
+        {
+            public bool DidFail1;
+            public bool DidFail2;
+
+            public override void Eval(MockDirectoryWrapper dir)
+            {
+                if (!DoFail)
+                {
+                    return;
+                }
+
+                /*typeof(SegmentMerger).Name.Equals(frame.GetType().Name) && */
+                if (StackTraceHelper.DoesStackTraceContainMethod("MergeTerms") && !DidFail1)
+                {
+                    DidFail1 = true;
+                    throw new IOException("fake disk full during mergeTerms");
+                }
+
+                /*typeof(LiveDocsFormat).Name.Equals(frame.GetType().Name) && */
+                if (StackTraceHelper.DoesStackTraceContainMethod("WriteLiveDocs") && !DidFail2)
+                {
+                    DidFail2 = true;
+                    throw new IOException("fake disk full while writing LiveDocs");
+                }
+            }
+        }
+
+        // LUCENE-2593
+        [Test]
+        public virtual void TestCorruptionAfterDiskFullDuringMerge()
+        {
+            MockDirectoryWrapper dir = NewMockDirectory();
+            //IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderPooling(true));
+            IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(new SerialMergeScheduler()).SetReaderPooling(true).SetMergePolicy(NewLogMergePolicy(2)));
+            // we can do this because we add/delete/add (and dont merge to "nothing")
+            w.KeepFullyDeletedSegments = true;
+
+            Document doc = new Document();
+
+            doc.Add(NewTextField("f", "doctor who", Field.Store.NO));
+            w.AddDocument(doc);
+            w.Commit();
+
+            w.DeleteDocuments(new Term("f", "who"));
+            w.AddDocument(doc);
+
+            // disk fills up!
+            FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge();
+            ftdm.SetDoFail();
+            dir.FailOn(ftdm);
+
+            try
+            {
+                w.Commit();
+                Assert.Fail("fake disk full IOExceptions not hit");
+            }
+#pragma warning disable 168
+            catch (IOException ioe)
+#pragma warning restore 168
+            {
+                // expected
+                Assert.IsTrue(ftdm.DidFail1 || ftdm.DidFail2);
+            }
+            TestUtil.CheckIndex(dir);
+            ftdm.ClearDoFail();
+            w.AddDocument(doc);
+            w.Dispose();
+
+            dir.Dispose();
+        }
+
+        // LUCENE-1130: make sure immeidate disk full on creating
+        // an IndexWriter (hit during DW.ThreadState.Init()) is
+        // OK:
+        [Test]
+        public virtual void TestImmediateDiskFull([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+        {
+            MockDirectoryWrapper dir = NewMockDirectory();
+            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+                            .SetMaxBufferedDocs(2)
+                            .SetMergeScheduler(scheduler);
+            IndexWriter writer = new IndexWriter(dir, config);
+            dir.MaxSizeInBytes = Math.Max(1, dir.RecomputedActualSizeInBytes);
+            Document doc = new Document();
+            FieldType customType = new FieldType(TextField.TYPE_STORED);
+            doc.Add(NewField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
+            try
+            {
+                writer.AddDocument(doc);
+                Assert.Fail("did not hit disk full");
+            }
+            catch (IOException)
+            {
+            }
+            // Without fix for LUCENE-1130: this call will hang:
+            try
+            {
+                writer.AddDocument(doc);
+                Assert.Fail("did not hit disk full");
+            }
+            catch (IOException)
+            {
+            }
+            try
+            {
+                writer.Dispose(false);
+                Assert.Fail("did not hit disk full");
+            }
+            catch (IOException)
+            {
+            }
+
+            // Make sure once disk space is avail again, we can
+            // cleanly close:
+            dir.MaxSizeInBytes = 0;
+            writer.Dispose(false);
+            dir.Dispose();
+        }
+
+        // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+        // like this to LuceneTestCase?
+        private void AddDoc(IndexWriter writer)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+            if (DefaultCodecSupportsDocValues())
+            {
+                doc.Add(new NumericDocValuesField("numericdv", 1));
+            }
+            writer.AddDocument(doc);
+        }
+
+        private void AddDocWithIndex(IndexWriter writer, int index)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", "aaa " + index, Field.Store.NO));
+            doc.Add(NewTextField("id", "" + index, Field.Store.NO));
+            if (DefaultCodecSupportsDocValues())
+            {
+                doc.Add(new NumericDocValuesField("numericdv", 1));
+            }
+            writer.AddDocument(doc);
+        }
+    }
+}
\ No newline at end of file