You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/26 23:37:07 UTC

[19/72] [abbrv] [partial] lucenenet git commit: Lucene.Net.Tests: Removed \core directory and put its contents in root directory

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestSegmentTermEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestSegmentTermEnum.cs b/src/Lucene.Net.Tests/Index/TestSegmentTermEnum.cs
new file mode 100644
index 0000000..3ed504b
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestSegmentTermEnum.cs
@@ -0,0 +1,152 @@
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using NUnit.Framework;
+    using BytesRef = Lucene.Net.Util.BytesRef;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using Field = Field;
+    using Lucene41PostingsFormat = Lucene.Net.Codecs.Lucene41.Lucene41PostingsFormat;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using TestUtil = Lucene.Net.Util.TestUtil;
+
+    [TestFixture]
+    public class TestSegmentTermEnum : LuceneTestCase
+    {
+        internal Directory Dir;
+
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+            Dir = NewDirectory();
+        }
+
+        [TearDown]
+        public override void TearDown()
+        {
+            Dir.Dispose();
+            base.TearDown();
+        }
+
+        [Test]
+        public virtual void TestTermEnum()
+        {
+            IndexWriter writer = null;
+
+            writer = new IndexWriter(Dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+
+            // ADD 100 documents with term : aaa
+            // add 100 documents with terms: aaa bbb
+            // Therefore, term 'aaa' has document frequency of 200 and term 'bbb' 100
+            for (int i = 0; i < 100; i++)
+            {
+                AddDoc(writer, "aaa");
+                AddDoc(writer, "aaa bbb");
+            }
+
+            writer.Dispose();
+
+            // verify document frequency of terms in an multi segment index
+            VerifyDocFreq();
+
+            // merge segments
+            writer = new IndexWriter(Dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // verify document frequency of terms in a single segment index
+            VerifyDocFreq();
+        }
+
+        [Test]
+        public virtual void TestPrevTermAtEnd()
+        {
+            IndexWriter writer = new IndexWriter(Dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetCodec(TestUtil.AlwaysPostingsFormat(new Lucene41PostingsFormat())));
+            AddDoc(writer, "aaa bbb");
+            writer.Dispose();
+            SegmentReader reader = GetOnlySegmentReader(DirectoryReader.Open(Dir));
+            TermsEnum terms = reader.Fields.GetTerms("content").GetIterator(null);
+            Assert.IsNotNull(terms.Next());
+            Assert.AreEqual("aaa", terms.Term.Utf8ToString());
+            Assert.IsNotNull(terms.Next());
+            long ordB;
+            try
+            {
+                ordB = terms.Ord;
+            }
+#pragma warning disable 168
+            catch (System.NotSupportedException uoe)
+#pragma warning restore 168
+            {
+                // ok -- codec is not required to support ord
+                reader.Dispose();
+                return;
+            }
+            Assert.AreEqual("bbb", terms.Term.Utf8ToString());
+            Assert.IsNull(terms.Next());
+
+            terms.SeekExact(ordB);
+            Assert.AreEqual("bbb", terms.Term.Utf8ToString());
+            reader.Dispose();
+        }
+
+        private void VerifyDocFreq()
+        {
+            IndexReader reader = DirectoryReader.Open(Dir);
+            TermsEnum termEnum = MultiFields.GetTerms(reader, "content").GetIterator(null);
+
+            // create enumeration of all terms
+            // go to the first term (aaa)
+            termEnum.Next();
+            // assert that term is 'aaa'
+            Assert.AreEqual("aaa", termEnum.Term.Utf8ToString());
+            Assert.AreEqual(200, termEnum.DocFreq);
+            // go to the second term (bbb)
+            termEnum.Next();
+            // assert that term is 'bbb'
+            Assert.AreEqual("bbb", termEnum.Term.Utf8ToString());
+            Assert.AreEqual(100, termEnum.DocFreq);
+
+            // create enumeration of terms after term 'aaa',
+            // including 'aaa'
+            termEnum.SeekCeil(new BytesRef("aaa"));
+            // assert that term is 'aaa'
+            Assert.AreEqual("aaa", termEnum.Term.Utf8ToString());
+            Assert.AreEqual(200, termEnum.DocFreq);
+            // go to term 'bbb'
+            termEnum.Next();
+            // assert that term is 'bbb'
+            Assert.AreEqual("bbb", termEnum.Term.Utf8ToString());
+            Assert.AreEqual(100, termEnum.DocFreq);
+            reader.Dispose();
+        }
+
+        private void AddDoc(IndexWriter writer, string value)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("content", value, Field.Store.NO));
+            writer.AddDocument(doc);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestSizeBoundedForceMerge.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestSizeBoundedForceMerge.cs b/src/Lucene.Net.Tests/Index/TestSizeBoundedForceMerge.cs
new file mode 100644
index 0000000..812e759
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestSizeBoundedForceMerge.cs
@@ -0,0 +1,403 @@
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using NUnit.Framework;
+    using Directory = Lucene.Net.Store.Directory;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using Document = Documents.Document;
+    using Field = Field;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+    using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+    using StringField = StringField;
+
+    [TestFixture]
+    public class TestSizeBoundedForceMerge : LuceneTestCase
+    {
+        private void AddDocs(IndexWriter writer, int numDocs)
+        {
+            AddDocs(writer, numDocs, false);
+        }
+
+        private void AddDocs(IndexWriter writer, int numDocs, bool withID)
+        {
+            for (int i = 0; i < numDocs; i++)
+            {
+                Document doc = new Document();
+                if (withID)
+                {
+                    doc.Add(new StringField("id", "" + i, Field.Store.NO));
+                }
+                writer.AddDocument(doc);
+            }
+            writer.Commit();
+        }
+
+        private IndexWriterConfig NewWriterConfig()
+        {
+            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);
+            conf.SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+            conf.SetRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+            // prevent any merges by default.
+            conf.SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+            return conf;
+        }
+
+        [Test]
+        public virtual void TestByteSizeLimit()
+        {
+            // tests that the max merge size constraint is applied during forceMerge.
+            Directory dir = new RAMDirectory();
+
+            // Prepare an index w/ several small segments and a large one.
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+            const int numSegments = 15;
+            for (int i = 0; i < numSegments; i++)
+            {
+                int numDocs = i == 7 ? 30 : 1;
+                AddDocs(writer, numDocs);
+            }
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            double min = sis.Info(0).SizeInBytes();
+
+            conf = NewWriterConfig();
+            LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
+            lmp.MaxMergeMBForForcedMerge = (min + 1) / (1 << 20);
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Should only be 3 segments in the index, because one of them exceeds the size limit
+            sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(3, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestNumDocsLimit()
+        {
+            // tests that the max merge docs constraint is applied during forceMerge.
+            Directory dir = new RAMDirectory();
+
+            // Prepare an index w/ several small segments and a large one.
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Should only be 3 segments in the index, because one of them exceeds the size limit
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(3, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestLastSegmentTooLarge()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(2, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestFirstSegmentTooLarge()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 5);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(2, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestAllSegmentsSmall()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(1, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestAllSegmentsLarge()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 2;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(3, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestOneLargeOneSmall()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(4, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestMergeFactor()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+            AddDocs(writer, 3);
+            AddDocs(writer, 3);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            lmp.MergeFactor = 2;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Should only be 4 segments in the index, because of the merge factor and
+            // max merge docs settings.
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(4, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestSingleMergeableSegment()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3);
+            AddDocs(writer, 5);
+            AddDocs(writer, 3);
+
+            // delete the last document, so that the last segment is merged.
+            writer.DeleteDocuments(new Term("id", "10"));
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Verify that the last segment does not have deletions.
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(3, sis.Count);
+            Assert.IsFalse(sis.Info(2).HasDeletions);
+        }
+
+        [Test]
+        public virtual void TestSingleNonMergeableSegment()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 3, true);
+
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 3;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Verify that the last segment does not have deletions.
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(1, sis.Count);
+        }
+
+        [Test]
+        public virtual void TestSingleMergeableTooLargeSegment()
+        {
+            Directory dir = new RAMDirectory();
+
+            IndexWriterConfig conf = NewWriterConfig();
+            IndexWriter writer = new IndexWriter(dir, conf);
+
+            AddDocs(writer, 5, true);
+
+            // delete the last document
+
+            writer.DeleteDocuments(new Term("id", "4"));
+            writer.Dispose();
+
+            conf = NewWriterConfig();
+            LogMergePolicy lmp = new LogDocMergePolicy();
+            lmp.MaxMergeDocs = 2;
+            conf.SetMergePolicy(lmp);
+
+            writer = new IndexWriter(dir, conf);
+            writer.ForceMerge(1);
+            writer.Dispose();
+
+            // Verify that the last segment does not have deletions.
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(dir);
+            Assert.AreEqual(1, sis.Count);
+            Assert.IsTrue(sis.Info(0).HasDeletions);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestSnapshotDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestSnapshotDeletionPolicy.cs b/src/Lucene.Net.Tests/Index/TestSnapshotDeletionPolicy.cs
new file mode 100644
index 0000000..3fb56f6
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestSnapshotDeletionPolicy.cs
@@ -0,0 +1,527 @@
+using System;
+using System.Collections.Generic;
+using System.Threading;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    using Lucene.Net.Support;
+    using NUnit.Framework;
+    using Directory = Lucene.Net.Store.Directory;
+    using Document = Documents.Document;
+    using FieldType = FieldType;
+    using IndexInput = Lucene.Net.Store.IndexInput;
+    using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+    using TextField = TextField;
+
+    //
+    // this was developed for Lucene In Action,
+    // http://lucenebook.com
+    //
+    [TestFixture]
+    public class TestSnapshotDeletionPolicy : LuceneTestCase
+    {
+        public const string INDEX_PATH = "test.snapshots";
+
+        protected internal virtual IndexWriterConfig GetConfig(Random random, IndexDeletionPolicy dp)
+        {
+            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+            if (dp != null)
+            {
+                conf.SetIndexDeletionPolicy(dp);
+            }
+            return conf;
+        }
+
+        protected internal virtual void CheckSnapshotExists(Directory dir, IndexCommit c)
+        {
+            string segFileName = c.SegmentsFileName;
+            Assert.IsTrue(SlowFileExists(dir, segFileName), "segments file not found in directory: " + segFileName);
+        }
+
+        protected internal virtual void CheckMaxDoc(IndexCommit commit, int expectedMaxDoc)
+        {
+            IndexReader reader = DirectoryReader.Open(commit);
+            try
+            {
+                Assert.AreEqual(expectedMaxDoc, reader.MaxDoc);
+            }
+            finally
+            {
+                reader.Dispose();
+            }
+        }
+
+        protected internal virtual void PrepareIndexAndSnapshots(SnapshotDeletionPolicy sdp, IndexWriter writer, int numSnapshots)
+        {
+            for (int i = 0; i < numSnapshots; i++)
+            {
+                // create dummy document to trigger commit.
+                writer.AddDocument(new Document());
+                writer.Commit();
+                Snapshots.Add(sdp.Snapshot());
+            }
+        }
+
+        protected internal virtual SnapshotDeletionPolicy DeletionPolicy
+        {
+            get
+            {
+                return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
+            }
+        }
+
+        protected internal virtual void AssertSnapshotExists(Directory dir, SnapshotDeletionPolicy sdp, int numSnapshots, bool checkIndexCommitSame)
+        {
+            for (int i = 0; i < numSnapshots; i++)
+            {
+                IndexCommit snapshot = Snapshots[i];
+                CheckMaxDoc(snapshot, i + 1);
+                CheckSnapshotExists(dir, snapshot);
+                if (checkIndexCommitSame)
+                {
+                    Assert.AreSame(snapshot, sdp.GetIndexCommit(snapshot.Generation));
+                }
+                else
+                {
+                    Assert.AreEqual(snapshot.Generation, sdp.GetIndexCommit(snapshot.Generation).Generation);
+                }
+            }
+        }
+
+        protected internal IList<IndexCommit> Snapshots;
+
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+
+            this.Snapshots = new List<IndexCommit>();
+        }
+
+        [Test]
+        public virtual void TestSnapshotDeletionPolicy_Mem()
+        {
+            Directory fsDir = NewDirectory();
+            RunTest(Random(), fsDir);
+            fsDir.Dispose();
+        }
+
+        private void RunTest(Random random, Directory dir)
+        {
+            // Run for ~1 seconds
+            long stopTime = Environment.TickCount + 1000;
+
+            SnapshotDeletionPolicy dp = DeletionPolicy;
+            IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetIndexDeletionPolicy(dp).SetMaxBufferedDocs(2));
+
+            // Verify we catch misuse:
+            try
+            {
+                dp.Snapshot();
+                Assert.Fail("did not hit exception");
+            }
+#pragma warning disable 168
+            catch (InvalidOperationException ise)
+#pragma warning restore 168
+            {
+                // expected
+            }
+            dp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+            writer.Commit();
+
+            ThreadClass t = new ThreadAnonymousInnerClassHelper(stopTime, writer, NewField);
+
+            t.Start();
+
+            // While the above indexing thread is running, take many
+            // backups:
+            do
+            {
+                BackupIndex(dir, dp);
+                Thread.Sleep(20);
+            } while (t.IsAlive);
+
+            t.Join();
+
+            // Add one more document to force writer to commit a
+            // final segment, so deletion policy has a chance to
+            // delete again:
+            Document doc = new Document();
+            FieldType customType = new FieldType(TextField.TYPE_STORED);
+            customType.StoreTermVectors = true;
+            customType.StoreTermVectorPositions = true;
+            customType.StoreTermVectorOffsets = true;
+            doc.Add(NewField("content", "aaa", customType));
+            writer.AddDocument(doc);
+
+            // Make sure we don't have any leftover files in the
+            // directory:
+            writer.Dispose();
+            TestIndexWriter.AssertNoUnreferencedFiles(dir, "some files were not deleted but should have been");
+        }
+
+        private class ThreadAnonymousInnerClassHelper : ThreadClass
+        {
+            private long StopTime;
+            private IndexWriter Writer;
+            private readonly Func<string, string, FieldType, Field> _newFieldFunc;
+
+            /// <param name="newFieldFunc">
+            /// LUCENENET specific
+            /// Passed in because <see cref="LuceneTestCase.NewField(string, string, FieldType)"/>
+            /// is no longer static. 
+            /// </param>
+            public ThreadAnonymousInnerClassHelper(long stopTime, IndexWriter writer, Func<string, string, FieldType, Field> newFieldFunc)
+            {
+                this.StopTime = stopTime;
+                this.Writer = writer;
+                _newFieldFunc = newFieldFunc;
+            }
+
+            public override void Run()
+            {
+                Document doc = new Document();
+                FieldType customType = new FieldType(TextField.TYPE_STORED);
+                customType.StoreTermVectors = true;
+                customType.StoreTermVectorPositions = true;
+                customType.StoreTermVectorOffsets = true;
+                doc.Add(_newFieldFunc("content", "aaa", customType));
+                do
+                {
+                    for (int i = 0; i < 27; i++)
+                    {
+                        try
+                        {
+                            Writer.AddDocument(doc);
+                        }
+                        catch (Exception t)
+                        {
+                            Console.WriteLine(t.StackTrace);
+                            Assert.Fail("addDocument failed");
+                        }
+                        if (i % 2 == 0)
+                        {
+                            try
+                            {
+                                Writer.Commit();
+                            }
+                            catch (Exception e)
+                            {
+                                throw new Exception(e.Message, e);
+                            }
+                        }
+                    }
+#if !NETSTANDARD
+                    try
+                    {
+#endif 
+                        Thread.Sleep(1);
+#if !NETSTANDARD
+                    }
+                    catch (ThreadInterruptedException ie)
+                    {
+                        throw new ThreadInterruptedException("Thread Interrupted Exception", ie);
+                    }
+#endif
+                } while (Environment.TickCount < StopTime);
+            }
+        }
+
+        /// <summary>
+        /// Example showing how to use the SnapshotDeletionPolicy to take a backup.
+        /// this method does not really do a backup; instead, it reads every byte of
+        /// every file just to test that the files indeed exist and are readable even
+        /// while the index is changing.
+        /// </summary>
+        public virtual void BackupIndex(Directory dir, SnapshotDeletionPolicy dp)
+        {
+            // To backup an index we first take a snapshot:
+            IndexCommit snapshot = dp.Snapshot();
+            try
+            {
+                CopyFiles(dir, snapshot);
+            }
+            finally
+            {
+                // Make sure to release the snapshot, otherwise these
+                // files will never be deleted during this IndexWriter
+                // session:
+                dp.Release(snapshot);
+            }
+        }
+
+        private void CopyFiles(Directory dir, IndexCommit cp)
+        {
+            // While we hold the snapshot, and nomatter how long
+            // we take to do the backup, the IndexWriter will
+            // never delete the files in the snapshot:
+            ICollection<string> files = cp.FileNames;
+            foreach (String fileName in files)
+            {
+                // NOTE: in a real backup you would not use
+                // readFile; you would need to use something else
+                // that copies the file to a backup location.  this
+                // could even be a spawned shell process (eg "tar",
+                // "zip") that takes the list of files and builds a
+                // backup.
+                ReadFile(dir, fileName);
+            }
+        }
+
+        internal byte[] Buffer = new byte[4096];
+
+        private void ReadFile(Directory dir, string name)
+        {
+            IndexInput input = dir.OpenInput(name, NewIOContext(Random()));
+            try
+            {
+                long size = dir.FileLength(name);
+                long bytesLeft = size;
+                while (bytesLeft > 0)
+                {
+                    int numToRead;
+                    if (bytesLeft < Buffer.Length)
+                    {
+                        numToRead = (int)bytesLeft;
+                    }
+                    else
+                    {
+                        numToRead = Buffer.Length;
+                    }
+                    input.ReadBytes(Buffer, 0, numToRead, false);
+                    bytesLeft -= numToRead;
+                }
+                // Don't do this in your real backups!  this is just
+                // to force a backup to take a somewhat long time, to
+                // make sure we are exercising the fact that the
+                // IndexWriter should not delete this file even when I
+                // take my time reading it.
+                Thread.Sleep(1);
+            }
+            finally
+            {
+                input.Dispose();
+            }
+        }
+
+        [Test]
+        public virtual void TestBasicSnapshots()
+        {
+            int numSnapshots = 3;
+
+            // Create 3 snapshots: snapshot0, snapshot1, snapshot2
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), DeletionPolicy));
+            SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+            PrepareIndexAndSnapshots(sdp, writer, numSnapshots);
+            writer.Dispose();
+
+            Assert.AreEqual(numSnapshots, sdp.GetSnapshots().Count);
+            Assert.AreEqual(numSnapshots, sdp.SnapshotCount);
+            AssertSnapshotExists(dir, sdp, numSnapshots, true);
+
+            // open a reader on a snapshot - should succeed.
+            DirectoryReader.Open(Snapshots[0]).Dispose();
+
+            // open a new IndexWriter w/ no snapshots to keep and assert that all snapshots are gone.
+            sdp = DeletionPolicy;
+            writer = new IndexWriter(dir, GetConfig(Random(), sdp));
+            writer.DeleteUnusedFiles();
+            writer.Dispose();
+            Assert.AreEqual(1, DirectoryReader.ListCommits(dir).Count, "no snapshots should exist");
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestMultiThreadedSnapshotting()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), DeletionPolicy));
+            SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+
+            ThreadClass[] threads = new ThreadClass[10];
+            IndexCommit[] snapshots = new IndexCommit[threads.Length];
+            for (int i = 0; i < threads.Length; i++)
+            {
+                int finalI = i;
+                threads[i] = new ThreadAnonymousInnerClassHelper2(this, writer, sdp, snapshots, finalI);
+                threads[i].Name = "t" + i;
+            }
+
+            foreach (ThreadClass t in threads)
+            {
+                t.Start();
+            }
+
+            foreach (ThreadClass t in threads)
+            {
+                t.Join();
+            }
+
+            // Do one last commit, so that after we release all snapshots, we stay w/ one commit
+            writer.AddDocument(new Document());
+            writer.Commit();
+
+            for (int i = 0; i < threads.Length; i++)
+            {
+                sdp.Release(snapshots[i]);
+                writer.DeleteUnusedFiles();
+            }
+            Assert.AreEqual(1, DirectoryReader.ListCommits(dir).Count);
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        private class ThreadAnonymousInnerClassHelper2 : ThreadClass
+        {
+            private readonly TestSnapshotDeletionPolicy OuterInstance;
+
+            private IndexWriter Writer;
+            private SnapshotDeletionPolicy Sdp;
+            private IndexCommit[] Snapshots;
+            private int FinalI;
+
+            public ThreadAnonymousInnerClassHelper2(TestSnapshotDeletionPolicy outerInstance, IndexWriter writer, SnapshotDeletionPolicy sdp, IndexCommit[] snapshots, int finalI)
+            {
+                this.OuterInstance = outerInstance;
+                this.Writer = writer;
+                this.Sdp = sdp;
+                this.Snapshots = snapshots;
+                this.FinalI = finalI;
+            }
+
+            public override void Run()
+            {
+                try
+                {
+                    Writer.AddDocument(new Document());
+                    Writer.Commit();
+                    Snapshots[FinalI] = Sdp.Snapshot();
+                }
+                catch (Exception e)
+                {
+                    throw new Exception(e.Message, e);
+                }
+            }
+        }
+
+        [Test]
+        public virtual void TestRollbackToOldSnapshot()
+        {
+            int numSnapshots = 2;
+            Directory dir = NewDirectory();
+
+            SnapshotDeletionPolicy sdp = DeletionPolicy;
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), sdp));
+            PrepareIndexAndSnapshots(sdp, writer, numSnapshots);
+            writer.Dispose();
+
+            // now open the writer on "snapshot0" - make sure it succeeds
+            writer = new IndexWriter(dir, GetConfig(Random(), sdp).SetIndexCommit(Snapshots[0]));
+            // this does the actual rollback
+            writer.Commit();
+            writer.DeleteUnusedFiles();
+            AssertSnapshotExists(dir, sdp, numSnapshots - 1, false);
+            writer.Dispose();
+
+            // but 'snapshot1' files will still exist (need to release snapshot before they can be deleted).
+            string segFileName = Snapshots[1].SegmentsFileName;
+            Assert.IsTrue(SlowFileExists(dir, segFileName), "snapshot files should exist in the directory: " + segFileName);
+
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestReleaseSnapshot()
+        {
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), DeletionPolicy));
+            SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+            PrepareIndexAndSnapshots(sdp, writer, 1);
+
+            // Create another commit - we must do that, because otherwise the "snapshot"
+            // files will still remain in the index, since it's the last commit.
+            writer.AddDocument(new Document());
+            writer.Commit();
+
+            // Release
+            string segFileName = Snapshots[0].SegmentsFileName;
+            sdp.Release(Snapshots[0]);
+            writer.DeleteUnusedFiles();
+            writer.Dispose();
+            Assert.IsFalse(SlowFileExists(dir, segFileName), "segments file should not be found in dirctory: " + segFileName);
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestSnapshotLastCommitTwice()
+        {
+            Directory dir = NewDirectory();
+
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), DeletionPolicy));
+            SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+            writer.AddDocument(new Document());
+            writer.Commit();
+
+            IndexCommit s1 = sdp.Snapshot();
+            IndexCommit s2 = sdp.Snapshot();
+            Assert.AreSame(s1, s2); // should be the same instance
+
+            // create another commit
+            writer.AddDocument(new Document());
+            writer.Commit();
+
+            // release "s1" should not delete "s2"
+            sdp.Release(s1);
+            writer.DeleteUnusedFiles();
+            CheckSnapshotExists(dir, s2);
+
+            writer.Dispose();
+            dir.Dispose();
+        }
+
+        [Test]
+        public virtual void TestMissingCommits()
+        {
+            // Tests the behavior of SDP when commits that are given at ctor are missing
+            // on onInit().
+            Directory dir = NewDirectory();
+            IndexWriter writer = new IndexWriter(dir, GetConfig(Random(), DeletionPolicy));
+            SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy;
+            writer.AddDocument(new Document());
+            writer.Commit();
+            IndexCommit s1 = sdp.Snapshot();
+
+            // create another commit, not snapshotted.
+            writer.AddDocument(new Document());
+            writer.Dispose();
+
+            // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1"
+            // commit.
+            (new IndexWriter(dir, GetConfig(Random(), null))).Dispose();
+
+            Assert.IsFalse(SlowFileExists(dir, s1.SegmentsFileName), "snapshotted commit should not exist");
+            dir.Dispose();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestStoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestStoredFieldsFormat.cs b/src/Lucene.Net.Tests/Index/TestStoredFieldsFormat.cs
new file mode 100644
index 0000000..9c551f3
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestStoredFieldsFormat.cs
@@ -0,0 +1,141 @@
+using Lucene.Net.Attributes;
+using NUnit.Framework;
+
+namespace Lucene.Net.Index
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    using Codec = Lucene.Net.Codecs.Codec;
+    using Lucene3xCodec = Lucene.Net.Codecs.Lucene3x.Lucene3xCodec;
+
+    /// <summary>
+    /// Tests with the default randomized codec. Not really redundant with
+    /// other specific instantiations since we want to test some test-only impls
+    /// like Asserting, as well as make it easy to write a codec and pass -Dtests.codec
+    /// </summary>
+    [TestFixture]
+    public class TestStoredFieldsFormat : BaseStoredFieldsFormatTestCase
+    {
+        protected override Codec Codec
+        {
+            get
+            {
+                return Codec.Default;
+            }
+        }
+
+        [Test]
+        public override void TestWriteReadMerge()
+        {
+#pragma warning disable 612, 618
+            AssumeFalse("impersonation isnt good enough", Codec is Lucene3xCodec);
+#pragma warning restore 612, 618
+            // this test tries to switch up between the codec and another codec.
+            // for 3.x: we currently cannot take an index with existing 4.x segments
+            // and merge into newly formed 3.x segments.
+            base.TestWriteReadMerge();
+        }
+
+
+        #region BaseStoredFieldsFormatTestCase
+        // LUCENENET NOTE: Tests in an abstract base class are not pulled into the correct
+        // context in Visual Studio. This fixes that with the minimum amount of code necessary
+        // to run them in the correct context without duplicating all of the tests.
+
+        [Test]
+        public override void TestRandomStoredFields()
+        {
+            base.TestRandomStoredFields();
+        }
+
+        [Test]
+        // LUCENE-1727: make sure doc fields are stored in order
+        public override void TestStoredFieldsOrder()
+        {
+            base.TestStoredFieldsOrder();
+        }
+
+        [Test]
+        // LUCENE-1219
+        public override void TestBinaryFieldOffsetLength()
+        {
+            base.TestBinaryFieldOffsetLength();
+        }
+
+        [Test]
+        public override void TestNumericField()
+        {
+            base.TestNumericField();
+        }
+
+        [Test]
+        public override void TestIndexedBit()
+        {
+            base.TestIndexedBit();
+        }
+
+        [Test]
+        public override void TestReadSkip()
+        {
+            base.TestReadSkip();
+        }
+
+        [Test]
+        public override void TestEmptyDocs()
+        {
+            base.TestEmptyDocs();
+        }
+
+        [Test]
+        public override void TestConcurrentReads()
+        {
+            base.TestConcurrentReads();
+        }
+
+#if !NETSTANDARD
+        // LUCENENET: There is no Timeout on NUnit for .NET Core.
+        [Timeout(120000)]
+#endif
+        [Test, HasTimeout]
+        public override void TestBigDocuments()
+        {
+            base.TestBigDocuments();
+        }
+
+        [Test]
+        public override void TestBulkMergeWithDeletes()
+        {
+            base.TestBulkMergeWithDeletes();
+        }
+
+        #endregion
+
+        #region BaseIndexFileFormatTestCase
+        // LUCENENET NOTE: Tests in an abstract base class are not pulled into the correct
+        // context in Visual Studio. This fixes that with the minimum amount of code necessary
+        // to run them in the correct context without duplicating all of the tests.
+
+        [Test]
+        public override void TestMergeStability()
+        {
+            base.TestMergeStability();
+        }
+
+        #endregion
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestStressAdvance.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestStressAdvance.cs b/src/Lucene.Net.Tests/Index/TestStressAdvance.cs
new file mode 100644
index 0000000..c6c4521
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestStressAdvance.cs
@@ -0,0 +1,173 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    
+    using Lucene.Net.Store;
+
+    /*
+         * Licensed to the Apache Software Foundation (ASF) under one or more
+         * contributor license agreements.  See the NOTICE file distributed with
+         * this work for additional information regarding copyright ownership.
+         * The ASF licenses this file to You under the Apache License, Version 2.0
+         * (the "License"); you may not use this file except in compliance with
+         * the License.  You may obtain a copy of the License at
+         *
+         *     http://www.apache.org/licenses/LICENSE-2.0
+         *
+         * Unless required by applicable law or agreed to in writing, software
+         * distributed under the License is distributed on an "AS IS" BASIS,
+         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+         * See the License for the specific language governing permissions and
+         * limitations under the License.
+         */
+
+    using Lucene.Net.Util;
+    using NUnit.Framework;
+    using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
+
+    [TestFixture]
+    public class TestStressAdvance : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestStressAdvance_Mem()
+        {
+            for (int iter = 0; iter < 3; iter++)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("\nTEST: iter=" + iter);
+                }
+                Directory dir = NewDirectory();
+                RandomIndexWriter w = new RandomIndexWriter(Random(), dir, Similarity, TimeZone);
+                HashSet<int> aDocs = new HashSet<int>();
+                Documents.Document doc = new Documents.Document();
+                Field f = NewStringField("field", "", Field.Store.NO);
+                doc.Add(f);
+                Field idField = NewStringField("id", "", Field.Store.YES);
+                doc.Add(idField);
+                int num = AtLeast(4097);
+                if (VERBOSE)
+                {
+                    Console.WriteLine("\nTEST: numDocs=" + num);
+                }
+                for (int id = 0; id < num; id++)
+                {
+                    if (Random().Next(4) == 3)
+                    {
+                        f.SetStringValue("a");
+                        aDocs.Add(id);
+                    }
+                    else
+                    {
+                        f.SetStringValue("b");
+                    }
+                    idField.SetStringValue("" + id);
+                    w.AddDocument(doc);
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("\nTEST: doc upto " + id);
+                    }
+                }
+
+                w.ForceMerge(1);
+
+                IList<int> aDocIDs = new List<int>();
+                IList<int> bDocIDs = new List<int>();
+
+                DirectoryReader r = w.Reader;
+                int[] idToDocID = new int[r.MaxDoc];
+                for (int docID = 0; docID < idToDocID.Length; docID++)
+                {
+                    int id = Convert.ToInt32(r.Document(docID).Get("id"));
+                    if (aDocs.Contains(id))
+                    {
+                        aDocIDs.Add(docID);
+                    }
+                    else
+                    {
+                        bDocIDs.Add(docID);
+                    }
+                }
+                TermsEnum te = GetOnlySegmentReader(r).Fields.GetTerms("field").GetIterator(null);
+
+                DocsEnum de = null;
+                for (int iter2 = 0; iter2 < 10; iter2++)
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("\nTEST: iter=" + iter + " iter2=" + iter2);
+                    }
+                    Assert.AreEqual(TermsEnum.SeekStatus.FOUND, te.SeekCeil(new BytesRef("a")));
+                    de = TestUtil.Docs(Random(), te, null, de, DocsEnum.FLAG_NONE);
+                    TestOne(de, aDocIDs);
+
+                    Assert.AreEqual(TermsEnum.SeekStatus.FOUND, te.SeekCeil(new BytesRef("b")));
+                    de = TestUtil.Docs(Random(), te, null, de, DocsEnum.FLAG_NONE);
+                    TestOne(de, bDocIDs);
+                }
+
+                w.Dispose();
+                r.Dispose();
+                dir.Dispose();
+            }
+        }
+
+        private void TestOne(DocsEnum docs, IList<int> expected)
+        {
+            if (VERBOSE)
+            {
+                Console.WriteLine("test");
+            }
+            int upto = -1;
+            while (upto < expected.Count)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("  cycle upto=" + upto + " of " + expected.Count);
+                }
+                int docID;
+                if (Random().Next(4) == 1 || upto == expected.Count - 1)
+                {
+                    // test nextDoc()
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("    do nextDoc");
+                    }
+                    upto++;
+                    docID = docs.NextDoc();
+                }
+                else
+                {
+                    // test advance()
+                    int inc = TestUtil.NextInt(Random(), 1, expected.Count - 1 - upto);
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("    do advance inc=" + inc);
+                    }
+                    upto += inc;
+                    docID = docs.Advance(expected[upto]);
+                }
+                if (upto == expected.Count)
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("  expect docID=" + DocIdSetIterator.NO_MORE_DOCS + " actual=" + docID);
+                    }
+                    Assert.AreEqual(DocIdSetIterator.NO_MORE_DOCS, docID);
+                }
+                else
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("  expect docID=" + expected[upto] + " actual=" + docID);
+                    }
+                    Assert.IsTrue(docID != DocIdSetIterator.NO_MORE_DOCS);
+                    Assert.AreEqual((int)expected[upto], docID);
+                }
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestStressIndexing.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestStressIndexing.cs b/src/Lucene.Net.Tests/Index/TestStressIndexing.cs
new file mode 100644
index 0000000..428a2d0
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestStressIndexing.cs
@@ -0,0 +1,237 @@
+using System;
+using System.Threading;
+using Lucene.Net.Attributes;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+    
+    using Lucene.Net.Search;
+    using Lucene.Net.Store;
+    using Lucene.Net.Support;
+
+    /*
+        /// Copyright 2004 The Apache Software Foundation
+        ///
+        /// Licensed under the Apache License, Version 2.0 (the "License");
+        /// you may not use this file except in compliance with the License.
+        /// You may obtain a copy of the License at
+        ///
+        ///     http://www.apache.org/licenses/LICENSE-2.0
+        ///
+        /// Unless required by applicable law or agreed to in writing, software
+        /// distributed under the License is distributed on an "AS IS" BASIS,
+        /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        /// See the License for the specific language governing permissions and
+        /// limitations under the License.
+        */
+
+    using Lucene.Net.Util;
+    using NUnit.Framework;
+    using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+
+    [TestFixture]
+    public class TestStressIndexing : LuceneTestCase
+    {
+        private abstract class TimedThread : ThreadClass
+        {
+            internal volatile bool Failed;
+            internal int Count;
+            internal static int RUN_TIME_MSEC = AtLeast(1000);
+            internal TimedThread[] AllThreads;
+
+            public abstract void DoWork();
+
+            internal TimedThread(TimedThread[] threads)
+            {
+                this.AllThreads = threads;
+            }
+
+            public override void Run()
+            {
+                long stopTime = Environment.TickCount + RUN_TIME_MSEC;
+
+                Count = 0;
+
+                try
+                {
+                    do
+                    {
+                        if (AnyErrors())
+                        {
+                            break;
+                        }
+                        DoWork();
+                        Count++;
+                    } while (Environment.TickCount < stopTime);
+                }
+                catch (Exception e)
+                {
+                    Console.WriteLine(Thread.CurrentThread + ": exc");
+                    Console.WriteLine(e.StackTrace);
+                    Failed = true;
+                }
+            }
+
+            internal virtual bool AnyErrors()
+            {
+                for (int i = 0; i < AllThreads.Length; i++)
+                {
+                    if (AllThreads[i] != null && AllThreads[i].Failed)
+                    {
+                        return true;
+                    }
+                }
+                return false;
+            }
+        }
+
+        private class IndexerThread : TimedThread
+        {
+            private readonly Func<string, string, Field.Store, Field> NewStringFieldFunc;
+            private readonly Func<string, string, Field.Store, Field> NewTextFieldFunc;
+
+            internal IndexWriter Writer;
+            internal int NextID;
+
+            /// <param name="newStringField">
+            /// LUCENENET specific
+            /// Passed in because <see cref="LuceneTestCase.NewStringField(string, string, Field.Store)"/>
+            /// is no longer static.
+            /// </param>
+            /// <param name="newTextField">
+            /// LUCENENET specific
+            /// Passed in because <see cref="LuceneTestCase.NewTextField(string, string, Field.Store)"/>
+            /// is no longer static.
+            /// </param>
+            public IndexerThread(IndexWriter writer, TimedThread[] threads,
+                Func<string, string, Field.Store, Field> newStringField,
+                Func<string, string, Field.Store, Field> newTextField)
+                : base(threads)
+            {
+                this.Writer = writer;
+                NewStringFieldFunc = newStringField;
+                NewTextFieldFunc = newTextField;
+            }
+
+            public override void DoWork()
+            {
+                // Add 10 docs:
+                for (int j = 0; j < 10; j++)
+                {
+                    Documents.Document d = new Documents.Document();
+                    int n = Random().Next();
+                    d.Add(NewStringFieldFunc("id", Convert.ToString(NextID++), Field.Store.YES));
+                    d.Add(NewTextFieldFunc("contents", English.IntToEnglish(n), Field.Store.NO));
+                    Writer.AddDocument(d);
+                }
+
+                // Delete 5 docs:
+                int deleteID = NextID - 1;
+                for (int j = 0; j < 5; j++)
+                {
+                    Writer.DeleteDocuments(new Term("id", "" + deleteID));
+                    deleteID -= 2;
+                }
+            }
+        }
+
+        private class SearcherThread : TimedThread
+        {
+            internal Directory Directory;
+            private readonly LuceneTestCase OuterInstance;
+
+            /// <param name="outerInstance">
+            /// LUCENENET specific
+            /// Passed in because <see cref="LuceneTestCase.NewSearcher(IndexReader)"/>
+            /// is no longer static.
+            /// </param>
+            public SearcherThread(Directory directory, TimedThread[] threads, LuceneTestCase outerInstance)
+                : base(threads)
+            {
+                OuterInstance = outerInstance;
+                this.Directory = directory;
+            }
+
+            public override void DoWork()
+            {
+                for (int i = 0; i < 100; i++)
+                {
+                    IndexReader ir = DirectoryReader.Open(Directory);
+                    IndexSearcher @is = OuterInstance.NewSearcher(ir);
+                    ir.Dispose();
+                }
+                Count += 100;
+            }
+        }
+
+        /*
+          Run one indexer and 2 searchers against single index as
+          stress test.
+        */
+
+        public virtual void RunStressTest(Directory directory, IConcurrentMergeScheduler mergeScheduler)
+        {
+            IndexWriter modifier = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(10).SetMergeScheduler(mergeScheduler));
+            modifier.Commit();
+
+            TimedThread[] threads = new TimedThread[4];
+            int numThread = 0;
+
+            // One modifier that writes 10 docs then removes 5, over
+            // and over:
+            IndexerThread indexerThread = new IndexerThread(modifier, threads, NewStringField, NewTextField);
+            threads[numThread++] = indexerThread;
+            indexerThread.Start();
+
+            IndexerThread indexerThread2 = new IndexerThread(modifier, threads, NewStringField, NewTextField);
+            threads[numThread++] = indexerThread2;
+            indexerThread2.Start();
+
+            // Two searchers that constantly just re-instantiate the
+            // searcher:
+            SearcherThread searcherThread1 = new SearcherThread(directory, threads, this);
+            threads[numThread++] = searcherThread1;
+            searcherThread1.Start();
+
+            SearcherThread searcherThread2 = new SearcherThread(directory, threads, this);
+            threads[numThread++] = searcherThread2;
+            searcherThread2.Start();
+
+            for (int i = 0; i < numThread; i++)
+            {
+                threads[i].Join();
+            }
+
+            modifier.Dispose();
+
+            for (int i = 0; i < numThread; i++)
+            {
+                Assert.IsTrue(!threads[i].Failed);
+            }
+
+            //System.out.println("    Writer: " + indexerThread.count + " iterations");
+            //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
+            //System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
+        }
+
+        /*
+          Run above stress test against RAMDirectory and then
+          FSDirectory.
+        */
+
+        [Test]
+        public virtual void TestStressIndexAndSearching([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+        {
+            Directory directory = NewDirectory();
+            MockDirectoryWrapper wrapper = directory as MockDirectoryWrapper;
+            if (wrapper != null)
+            {
+                wrapper.AssertNoUnrefencedFilesOnClose = true;
+            }
+
+            RunStressTest(directory, scheduler);
+            directory.Dispose();
+        }
+    }
+}
\ No newline at end of file