You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/26 23:37:32 UTC
[44/72] [abbrv] [partial] lucenenet git commit: Lucene.Net.Tests:
Removed \core directory and put its contents in root directory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/Test2BSortedDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/Test2BSortedDocValues.cs b/src/Lucene.Net.Tests/Index/Test2BSortedDocValues.cs
new file mode 100644
index 0000000..8ad5aa9
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/Test2BSortedDocValues.cs
@@ -0,0 +1,168 @@
+using Lucene.Net.Documents;
+using NUnit.Framework;
+using System;
+
+namespace Lucene.Net.Index
+{
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ using BaseDirectoryWrapper = Lucene.Net.Store.BaseDirectoryWrapper;
+ using BytesRef = Lucene.Net.Util.BytesRef;
+ using Document = Documents.Document;
+ using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+ using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+ using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+ using SortedDocValuesField = SortedDocValuesField;
+
+ [SuppressCodecs("Lucene3x")]
+ [Ignore("very slow")]
+ [TestFixture]
+ public class Test2BSortedDocValues : LuceneTestCase
+ {
+ // indexes Integer.MAX_VALUE docs with a fixed binary field
+ [Test]
+ public virtual void TestFixedSorted([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+ {
+ BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BFixedSorted"));
+ if (dir is MockDirectoryWrapper)
+ {
+ ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
+ }
+
+ IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+ .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .SetRAMBufferSizeMB(256.0)
+ .SetMergeScheduler(scheduler)
+ .SetMergePolicy(NewLogMergePolicy(false, 10))
+ .SetOpenMode(OpenMode.CREATE));
+
+ Document doc = new Document();
+ var bytes = new byte[2];
+ BytesRef data = new BytesRef(bytes);
+ SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
+ doc.Add(dvField);
+
+ for (int i = 0; i < int.MaxValue; i++)
+ {
+ bytes[0] = (byte)(i >> 8);
+ bytes[1] = (byte)i;
+ w.AddDocument(doc);
+ if (i % 100000 == 0)
+ {
+ Console.WriteLine("indexed: " + i);
+ Console.Out.Flush();
+ }
+ }
+
+ w.ForceMerge(1);
+ w.Dispose();
+
+ Console.WriteLine("verifying...");
+ Console.Out.Flush();
+
+ DirectoryReader r = DirectoryReader.Open(dir);
+ int expectedValue = 0;
+ foreach (AtomicReaderContext context in r.Leaves)
+ {
+ AtomicReader reader = context.AtomicReader;
+ BytesRef scratch = new BytesRef();
+ BinaryDocValues dv = reader.GetSortedDocValues("dv");
+ for (int i = 0; i < reader.MaxDoc; i++)
+ {
+ bytes[0] = (byte)(expectedValue >> 8);
+ bytes[1] = (byte)expectedValue;
+ dv.Get(i, scratch);
+ Assert.AreEqual(data, scratch);
+ expectedValue++;
+ }
+ }
+
+ r.Dispose();
+ dir.Dispose();
+ }
+
+ // indexes Integer.MAX_VALUE docs with a fixed binary field
+ [Test]
+ public virtual void Test2BOrds([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+ {
+ BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BOrds"));
+ if (dir is MockDirectoryWrapper)
+ {
+ ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
+ }
+
+ var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+ .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .SetRAMBufferSizeMB(256.0)
+ .SetMergeScheduler(scheduler)
+ .SetMergePolicy(NewLogMergePolicy(false, 10))
+ .SetOpenMode(OpenMode.CREATE);
+ IndexWriter w = new IndexWriter(dir, config);
+
+ Document doc = new Document();
+ var bytes = new byte[4];
+ BytesRef data = new BytesRef(bytes);
+ SortedDocValuesField dvField = new SortedDocValuesField("dv", data);
+ doc.Add(dvField);
+
+ for (int i = 0; i < int.MaxValue; i++)
+ {
+ bytes[0] = (byte)(i >> 24);
+ bytes[1] = (byte)(i >> 16);
+ bytes[2] = (byte)(i >> 8);
+ bytes[3] = (byte)i;
+ w.AddDocument(doc);
+ if (i % 100000 == 0)
+ {
+ Console.WriteLine("indexed: " + i);
+ Console.Out.Flush();
+ }
+ }
+
+ w.ForceMerge(1);
+ w.Dispose();
+
+ Console.WriteLine("verifying...");
+ Console.Out.Flush();
+
+ DirectoryReader r = DirectoryReader.Open(dir);
+ int counter = 0;
+ foreach (AtomicReaderContext context in r.Leaves)
+ {
+ AtomicReader reader = context.AtomicReader;
+ BytesRef scratch = new BytesRef();
+ BinaryDocValues dv = reader.GetSortedDocValues("dv");
+ for (int i = 0; i < reader.MaxDoc; i++)
+ {
+ bytes[0] = (byte)(counter >> 24);
+ bytes[1] = (byte)(counter >> 16);
+ bytes[2] = (byte)(counter >> 8);
+ bytes[3] = (byte)counter;
+ counter++;
+ dv.Get(i, scratch);
+ Assert.AreEqual(data, scratch);
+ }
+ }
+
+ r.Dispose();
+ dir.Dispose();
+ }
+
+ // TODO: variable
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/Test2BTerms.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/Test2BTerms.cs b/src/Lucene.Net.Tests/Index/Test2BTerms.cs
new file mode 100644
index 0000000..97da141
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/Test2BTerms.cs
@@ -0,0 +1,317 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+ using Lucene.Net.Analysis;
+ using Lucene.Net.Analysis.TokenAttributes;
+
+ using Lucene.Net.Search;
+ using Lucene.Net.Store;
+ using Lucene.Net.Support;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ using Lucene.Net.Util;
+ using NUnit.Framework;
+ using System.Reflection;
+ using System.Runtime.CompilerServices;
+ using Codec = Lucene.Net.Codecs.Codec;
+
+ // NOTE: this test will fail w/ PreFlexRW codec! (Because
+ // this test uses full binary term space, but PreFlex cannot
+ // handle this since it requires the terms are UTF8 bytes).
+ //
+ // Also, SimpleText codec will consume very large amounts of
+ // disk (but, should run successfully). Best to run w/
+ // -Dtests.codec=Standard, and w/ plenty of RAM, eg:
+ //
+ // ant test -Dtest.slow=true -Dtests.heapsize=8g
+ //
+ // java -server -Xmx8g -d64 -cp .:lib/junit-4.10.jar:./build/classes/test:./build/classes/test-framework:./build/classes/java -Dlucene.version=4.0-dev -Dtests.directory=MMapDirectory -DtempDir=build -ea org.junit.runner.JUnitCore Lucene.Net.Index.Test2BTerms
+ //
+ [SuppressCodecs("SimpleText", "Memory", "Direct")]
+ [Ignore("SimpleText codec will consume very large amounts of memory.")]
+ [TestFixture]
+ public class Test2BTerms : LuceneTestCase
+ {
+ private const int TOKEN_LEN = 5;
+
+ private static readonly BytesRef Bytes = new BytesRef(TOKEN_LEN);
+
+ private sealed class MyTokenStream : TokenStream
+ {
+ internal readonly int TokensPerDoc;
+ internal int TokenCount;
+ public readonly IList<BytesRef> SavedTerms = new List<BytesRef>();
+ internal int NextSave;
+ internal long TermCounter;
+ internal readonly Random Random;
+
+ public MyTokenStream(Random random, int tokensPerDoc)
+ : base(new MyAttributeFactory(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY))
+ {
+ this.TokensPerDoc = tokensPerDoc;
+ AddAttribute<ITermToBytesRefAttribute>();
+ Bytes.Length = TOKEN_LEN;
+ this.Random = random;
+ NextSave = TestUtil.NextInt(random, 500000, 1000000);
+ }
+
+ public override bool IncrementToken()
+ {
+ ClearAttributes();
+ if (TokenCount >= TokensPerDoc)
+ {
+ return false;
+ }
+ int shift = 32;
+ for (int i = 0; i < 5; i++)
+ {
+ Bytes.Bytes[i] = unchecked((byte)((TermCounter >> shift) & 0xFF));
+ shift -= 8;
+ }
+ TermCounter++;
+ TokenCount++;
+ if (--NextSave == 0)
+ {
+ SavedTerms.Add(BytesRef.DeepCopyOf(Bytes));
+ Console.WriteLine("TEST: save term=" + Bytes);
+ NextSave = TestUtil.NextInt(Random, 500000, 1000000);
+ }
+ return true;
+ }
+
+ public override void Reset()
+ {
+ TokenCount = 0;
+ }
+
+ private sealed class MyTermAttributeImpl : Attribute, ITermToBytesRefAttribute
+ {
+ public void FillBytesRef()
+ {
+ // no-op: the bytes was already filled by our owner's incrementToken
+ }
+
+ public BytesRef BytesRef
+ {
+ get
+ {
+ return Bytes;
+ }
+ }
+
+ public override void Clear()
+ {
+ }
+
+ public override bool Equals(object other)
+ {
+ return other == this;
+ }
+
+ public override int GetHashCode()
+ {
+ return RuntimeHelpers.GetHashCode(this);
+ }
+
+ public override void CopyTo(IAttribute target)
+ {
+ }
+
+ public override object Clone()
+ {
+ throw new System.NotSupportedException();
+ }
+ }
+
+ private sealed class MyAttributeFactory : AttributeFactory
+ {
+ internal readonly AttributeFactory @delegate;
+
+ public MyAttributeFactory(AttributeFactory @delegate)
+ {
+ this.@delegate = @delegate;
+ }
+
+ public override Attribute CreateAttributeInstance<T>()
+ {
+ var attClass = typeof(T);
+ if (attClass == typeof(ITermToBytesRefAttribute))
+ {
+ return new MyTermAttributeImpl();
+ }
+ if (attClass.GetTypeInfo().IsSubclassOf(typeof(CharTermAttribute)))
+ {
+ throw new System.ArgumentException("no");
+ }
+ return @delegate.CreateAttributeInstance<T>();
+ }
+ }
+ }
+
+ [Ignore("Very slow. Enable manually by removing Ignore.")]
+ [Test]
+ public virtual void Test2BTerms_Mem([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+ {
+ if ("Lucene3x".Equals(Codec.Default.Name))
+ {
+ throw new Exception("this test cannot run with PreFlex codec");
+ }
+ Console.WriteLine("Starting Test2B");
+ long TERM_COUNT = ((long)int.MaxValue) + 100000000;
+
+ int TERMS_PER_DOC = TestUtil.NextInt(Random(), 100000, 1000000);
+
+ IList<BytesRef> savedTerms = null;
+
+ BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BTerms"));
+ //MockDirectoryWrapper dir = NewFSDirectory(new File("/p/lucene/indices/2bindex"));
+ if (dir is MockDirectoryWrapper)
+ {
+ ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
+ }
+ dir.CheckIndexOnClose = false; // don't double-checkindex
+
+ if (true)
+ {
+ IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+ .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .SetRAMBufferSizeMB(256.0)
+ .SetMergeScheduler(scheduler)
+ .SetMergePolicy(NewLogMergePolicy(false, 10))
+ .SetOpenMode(OpenMode.CREATE));
+
+ MergePolicy mp = w.Config.MergePolicy;
+ if (mp is LogByteSizeMergePolicy)
+ {
+ // 1 petabyte:
+ ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
+ }
+
+ Documents.Document doc = new Documents.Document();
+ MyTokenStream ts = new MyTokenStream(Random(), TERMS_PER_DOC);
+
+ FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
+ customType.IndexOptions = IndexOptions.DOCS_ONLY;
+ customType.OmitNorms = true;
+ Field field = new Field("field", ts, customType);
+ doc.Add(field);
+ //w.setInfoStream(System.out);
+ int numDocs = (int)(TERM_COUNT / TERMS_PER_DOC);
+
+ Console.WriteLine("TERMS_PER_DOC=" + TERMS_PER_DOC);
+ Console.WriteLine("numDocs=" + numDocs);
+
+ for (int i = 0; i < numDocs; i++)
+ {
+ long t0 = Environment.TickCount;
+ w.AddDocument(doc);
+ Console.WriteLine(i + " of " + numDocs + " " + (Environment.TickCount - t0) + " msec");
+ }
+ savedTerms = ts.SavedTerms;
+
+ Console.WriteLine("TEST: full merge");
+ w.ForceMerge(1);
+ Console.WriteLine("TEST: close writer");
+ w.Dispose();
+ }
+
+ Console.WriteLine("TEST: open reader");
+ IndexReader r = DirectoryReader.Open(dir);
+ if (savedTerms == null)
+ {
+ savedTerms = FindTerms(r);
+ }
+ int numSavedTerms = savedTerms.Count;
+ IList<BytesRef> bigOrdTerms = new List<BytesRef>(savedTerms.SubList(numSavedTerms - 10, numSavedTerms));
+ Console.WriteLine("TEST: test big ord terms...");
+ TestSavedTerms(r, bigOrdTerms);
+ Console.WriteLine("TEST: test all saved terms...");
+ TestSavedTerms(r, savedTerms);
+ r.Dispose();
+
+ Console.WriteLine("TEST: now CheckIndex...");
+ CheckIndex.Status status = TestUtil.CheckIndex(dir);
+ long tc = status.SegmentInfos[0].TermIndexStatus.TermCount;
+ Assert.IsTrue(tc > int.MaxValue, "count " + tc + " is not > " + int.MaxValue);
+
+ dir.Dispose();
+ Console.WriteLine("TEST: done!");
+ }
+
+ private IList<BytesRef> FindTerms(IndexReader r)
+ {
+ Console.WriteLine("TEST: findTerms");
+ TermsEnum termsEnum = MultiFields.GetTerms(r, "field").GetIterator(null);
+ IList<BytesRef> savedTerms = new List<BytesRef>();
+ int nextSave = TestUtil.NextInt(Random(), 500000, 1000000);
+ BytesRef term;
+ while ((term = termsEnum.Next()) != null)
+ {
+ if (--nextSave == 0)
+ {
+ savedTerms.Add(BytesRef.DeepCopyOf(term));
+ Console.WriteLine("TEST: add " + term);
+ nextSave = TestUtil.NextInt(Random(), 500000, 1000000);
+ }
+ }
+ return savedTerms;
+ }
+
+ private void TestSavedTerms(IndexReader r, IList<BytesRef> terms)
+ {
+ Console.WriteLine("TEST: run " + terms.Count + " terms on reader=" + r);
+ IndexSearcher s = NewSearcher(r);
+ Collections.Shuffle(terms);
+ TermsEnum termsEnum = MultiFields.GetTerms(r, "field").GetIterator(null);
+ bool failed = false;
+ for (int iter = 0; iter < 10 * terms.Count; iter++)
+ {
+ BytesRef term = terms[Random().Next(terms.Count)];
+ Console.WriteLine("TEST: search " + term);
+ long t0 = Environment.TickCount;
+ int count = s.Search(new TermQuery(new Term("field", term)), 1).TotalHits;
+ if (count <= 0)
+ {
+ Console.WriteLine(" FAILED: count=" + count);
+ failed = true;
+ }
+ long t1 = Environment.TickCount;
+ Console.WriteLine(" took " + (t1 - t0) + " millis");
+
+ TermsEnum.SeekStatus result = termsEnum.SeekCeil(term);
+ if (result != TermsEnum.SeekStatus.FOUND)
+ {
+ if (result == TermsEnum.SeekStatus.END)
+ {
+ Console.WriteLine(" FAILED: got END");
+ }
+ else
+ {
+ Console.WriteLine(" FAILED: wrong term: got " + termsEnum.Term);
+ }
+ failed = true;
+ }
+ }
+ Assert.IsFalse(failed);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/Test4GBStoredFields.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/Test4GBStoredFields.cs b/src/Lucene.Net.Tests/Index/Test4GBStoredFields.cs
new file mode 100644
index 0000000..212eca2
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/Test4GBStoredFields.cs
@@ -0,0 +1,123 @@
+using System;
+using Lucene.Net.Documents;
+
+namespace Lucene.Net.Index
+{
+ using Lucene.Net.Randomized.Generators;
+ using NUnit.Framework;
+ using BytesRef = Lucene.Net.Util.BytesRef;
+ using Document = Documents.Document;
+ using Field = Field;
+ using FieldType = FieldType;
+ using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+ using MMapDirectory = Lucene.Net.Store.MMapDirectory;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+ using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+
+ /// <summary>
+ /// this test creates an index with one segment that is a little larger than 4GB.
+ /// </summary>
+ [SuppressCodecs("SimpleText")]
+ [TestFixture]
+ public class Test4GBStoredFields : LuceneTestCase
+ {
+ [Ignore("//LUCENENET NOTE: This was marked Nightly in Java")]
+ [Test]
+ public virtual void Test([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
+ {
+ MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new MMapDirectory(CreateTempDir("4GBStoredFields")));
+ dir.Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
+
+ var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
+ .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .SetRAMBufferSizeMB(256.0)
+ .SetMergeScheduler(scheduler)
+ .SetMergePolicy(NewLogMergePolicy(false, 10))
+ .SetOpenMode(OpenMode.CREATE);
+ IndexWriter w = new IndexWriter(dir, config);
+
+ MergePolicy mp = w.Config.MergePolicy;
+ if (mp is LogByteSizeMergePolicy)
+ {
+ // 1 petabyte:
+ ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
+ }
+
+ Document doc = new Document();
+ FieldType ft = new FieldType();
+ ft.IsIndexed = false;
+ ft.IsStored = true;
+ ft.Freeze();
+ int valueLength = RandomInts.NextIntBetween(Random(), 1 << 13, 1 << 20);
+ var value = new byte[valueLength];
+ for (int i = 0; i < valueLength; ++i)
+ {
+ // random so that even compressing codecs can't compress it
+ value[i] = (byte)Random().Next(256);
+ }
+ Field f = new Field("fld", value, ft);
+ doc.Add(f);
+
+ int numDocs = (int)((1L << 32) / valueLength + 100);
+ for (int i = 0; i < numDocs; ++i)
+ {
+ w.AddDocument(doc);
+ if (VERBOSE && i % (numDocs / 10) == 0)
+ {
+ Console.WriteLine(i + " of " + numDocs + "...");
+ }
+ }
+ w.ForceMerge(1);
+ w.Dispose();
+ if (VERBOSE)
+ {
+ bool found = false;
+ foreach (string file in dir.ListAll())
+ {
+ if (file.EndsWith(".fdt"))
+ {
+ long fileLength = dir.FileLength(file);
+ if (fileLength >= 1L << 32)
+ {
+ found = true;
+ }
+ Console.WriteLine("File length of " + file + " : " + fileLength);
+ }
+ }
+ if (!found)
+ {
+ Console.WriteLine("No .fdt file larger than 4GB, test bug?");
+ }
+ }
+
+ DirectoryReader rd = DirectoryReader.Open(dir);
+ Document sd = rd.Document(numDocs - 1);
+ Assert.IsNotNull(sd);
+ Assert.AreEqual(1, sd.Fields.Count);
+ BytesRef valueRef = sd.GetBinaryValue("fld");
+ Assert.IsNotNull(valueRef);
+ Assert.AreEqual(new BytesRef(value), valueRef);
+ rd.Dispose();
+
+ dir.Dispose();
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestAddIndexes.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestAddIndexes.cs b/src/Lucene.Net.Tests/Index/TestAddIndexes.cs
new file mode 100644
index 0000000..5389e74
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestAddIndexes.cs
@@ -0,0 +1,1396 @@
+using Lucene.Net.Codecs;
+using Lucene.Net.Documents;
+using Lucene.Net.Support;
+using NUnit.Framework;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Threading;
+
+namespace Lucene.Net.Index
+{
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ using AlreadyClosedException = Lucene.Net.Store.AlreadyClosedException;
+ using BaseDirectoryWrapper = Lucene.Net.Store.BaseDirectoryWrapper;
+ using Codec = Lucene.Net.Codecs.Codec;
+ using Directory = Lucene.Net.Store.Directory;
+ using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
+ using Document = Documents.Document;
+ using Field = Field;
+ using FieldType = FieldType;
+ using FilterCodec = Lucene.Net.Codecs.FilterCodec;
+ using IOUtils = Lucene.Net.Util.IOUtils;
+ using LockObtainFailedException = Lucene.Net.Store.LockObtainFailedException;
+ using Lucene46Codec = Lucene.Net.Codecs.Lucene46.Lucene46Codec;
+ using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+ using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+ using MockDirectoryWrapper = Lucene.Net.Store.MockDirectoryWrapper;
+ using PhraseQuery = Lucene.Net.Search.PhraseQuery;
+ using PostingsFormat = Lucene.Net.Codecs.PostingsFormat;
+ using Pulsing41PostingsFormat = Lucene.Net.Codecs.Pulsing.Pulsing41PostingsFormat;
+ using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+ using StringField = StringField;
+ using TestUtil = Lucene.Net.Util.TestUtil;
+ using TextField = TextField;
+
+ [TestFixture]
+ public class TestAddIndexes : LuceneTestCase
+ {
+ [Test]
+ public virtual void TestSimpleCase()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // two auxiliary directories
+ Directory aux = NewDirectory();
+ Directory aux2 = NewDirectory();
+
+ IndexWriter writer = null;
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE));
+ // add 100 documents
+ AddDocs(writer, 100);
+ Assert.AreEqual(100, writer.MaxDoc);
+ writer.Dispose();
+ TestUtil.CheckIndex(dir);
+
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMergePolicy(NewLogMergePolicy(false)));
+ // add 40 documents in separate files
+ AddDocs(writer, 40);
+ Assert.AreEqual(40, writer.MaxDoc);
+ writer.Dispose();
+
+ writer = NewWriter(aux2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE));
+ // add 50 documents in compound files
+ AddDocs2(writer, 50);
+ Assert.AreEqual(50, writer.MaxDoc);
+ writer.Dispose();
+
+ // test doc count before segments are merged
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ Assert.AreEqual(100, writer.MaxDoc);
+ writer.AddIndexes(aux, aux2);
+ Assert.AreEqual(190, writer.MaxDoc);
+ writer.Dispose();
+ TestUtil.CheckIndex(dir);
+
+ // make sure the old index is correct
+ VerifyNumDocs(aux, 40);
+
+ // make sure the new index is correct
+ VerifyNumDocs(dir, 190);
+
+ // now add another set in.
+ Directory aux3 = NewDirectory();
+ writer = NewWriter(aux3, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+ // add 40 documents
+ AddDocs(writer, 40);
+ Assert.AreEqual(40, writer.MaxDoc);
+ writer.Dispose();
+
+ // test doc count before segments are merged
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ Assert.AreEqual(190, writer.MaxDoc);
+ writer.AddIndexes(aux3);
+ Assert.AreEqual(230, writer.MaxDoc);
+ writer.Dispose();
+
+ // make sure the new index is correct
+ VerifyNumDocs(dir, 230);
+
+ VerifyTermDocs(dir, new Term("content", "aaa"), 180);
+
+ VerifyTermDocs(dir, new Term("content", "bbb"), 50);
+
+ // now fully merge it.
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ writer.ForceMerge(1);
+ writer.Dispose();
+
+ // make sure the new index is correct
+ VerifyNumDocs(dir, 230);
+
+ VerifyTermDocs(dir, new Term("content", "aaa"), 180);
+
+ VerifyTermDocs(dir, new Term("content", "bbb"), 50);
+
+ // now add a single document
+ Directory aux4 = NewDirectory();
+ writer = NewWriter(aux4, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+ AddDocs2(writer, 1);
+ writer.Dispose();
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ Assert.AreEqual(230, writer.MaxDoc);
+ writer.AddIndexes(aux4);
+ Assert.AreEqual(231, writer.MaxDoc);
+ writer.Dispose();
+
+ VerifyNumDocs(dir, 231);
+
+ VerifyTermDocs(dir, new Term("content", "bbb"), 51);
+ dir.Dispose();
+ aux.Dispose();
+ aux2.Dispose();
+ aux3.Dispose();
+ aux4.Dispose();
+ }
+
+ [Test]
+ public virtual void TestWithPendingDeletes()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ writer.AddIndexes(aux);
+
+ // Adds 10 docs, then replaces them with another 10
+ // docs, so 10 pending deletes:
+ for (int i = 0; i < 20; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewStringField("id", "" + (i % 10), Field.Store.NO));
+ doc.Add(NewTextField("content", "bbb " + i, Field.Store.NO));
+ writer.UpdateDocument(new Term("id", "" + (i % 10)), doc);
+ }
+ // Deletes one of the 10 added docs, leaving 9:
+ PhraseQuery q = new PhraseQuery();
+ q.Add(new Term("content", "bbb"));
+ q.Add(new Term("content", "14"));
+ writer.DeleteDocuments(q);
+
+ writer.ForceMerge(1);
+ writer.Commit();
+
+ VerifyNumDocs(dir, 1039);
+ VerifyTermDocs(dir, new Term("content", "aaa"), 1030);
+ VerifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+ writer.Dispose();
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ [Test]
+ public virtual void TestWithPendingDeletes2()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+
+ // Adds 10 docs, then replaces them with another 10
+ // docs, so 10 pending deletes:
+ for (int i = 0; i < 20; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewStringField("id", "" + (i % 10), Field.Store.NO));
+ doc.Add(NewTextField("content", "bbb " + i, Field.Store.NO));
+ writer.UpdateDocument(new Term("id", "" + (i % 10)), doc);
+ }
+
+ writer.AddIndexes(aux);
+
+ // Deletes one of the 10 added docs, leaving 9:
+ PhraseQuery q = new PhraseQuery();
+ q.Add(new Term("content", "bbb"));
+ q.Add(new Term("content", "14"));
+ writer.DeleteDocuments(q);
+
+ writer.ForceMerge(1);
+ writer.Commit();
+
+ VerifyNumDocs(dir, 1039);
+ VerifyTermDocs(dir, new Term("content", "aaa"), 1030);
+ VerifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+ writer.Dispose();
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ [Test]
+ public virtual void TestWithPendingDeletes3()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+
+ // Adds 10 docs, then replaces them with another 10
+ // docs, so 10 pending deletes:
+ for (int i = 0; i < 20; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewStringField("id", "" + (i % 10), Field.Store.NO));
+ doc.Add(NewTextField("content", "bbb " + i, Field.Store.NO));
+ writer.UpdateDocument(new Term("id", "" + (i % 10)), doc);
+ }
+
+ // Deletes one of the 10 added docs, leaving 9:
+ PhraseQuery q = new PhraseQuery();
+ q.Add(new Term("content", "bbb"));
+ q.Add(new Term("content", "14"));
+ writer.DeleteDocuments(q);
+
+ writer.AddIndexes(aux);
+
+ writer.ForceMerge(1);
+ writer.Commit();
+
+ VerifyNumDocs(dir, 1039);
+ VerifyTermDocs(dir, new Term("content", "aaa"), 1030);
+ VerifyTermDocs(dir, new Term("content", "bbb"), 9);
+
+ writer.Dispose();
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // case 0: add self or exceed maxMergeDocs, expect exception
+ [Test]
+ public virtual void TestAddSelf()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ IndexWriter writer = null;
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+ // add 100 documents
+ AddDocs(writer, 100);
+ Assert.AreEqual(100, writer.MaxDoc);
+ writer.Dispose();
+
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(1000).SetMergePolicy(NewLogMergePolicy(false)));
+ // add 140 documents in separate files
+ AddDocs(writer, 40);
+ writer.Dispose();
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(1000).SetMergePolicy(NewLogMergePolicy(false)));
+ AddDocs(writer, 100);
+ writer.Dispose();
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND));
+ try
+ {
+ // cannot add self
+ writer.AddIndexes(aux, dir);
+ Assert.IsTrue(false);
+ }
+#pragma warning disable 168
+ catch (System.ArgumentException e)
+#pragma warning restore 168
+ {
+ Assert.AreEqual(100, writer.MaxDoc);
+ }
+ writer.Dispose();
+
+ // make sure the index is correct
+ VerifyNumDocs(dir, 100);
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // in all the remaining tests, make the doc count of the oldest segment
+ // in dir large so that it is never merged in addIndexes()
+ // case 1: no tail segments
+ [Test]
+ public virtual void TestNoTailSegments()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(4)));
+ AddDocs(writer, 10);
+
+ writer.AddIndexes(aux);
+ Assert.AreEqual(1040, writer.MaxDoc);
+ Assert.AreEqual(1000, writer.GetDocCount(0));
+ writer.Dispose();
+
+ // make sure the index is correct
+ VerifyNumDocs(dir, 1040);
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // case 2: tail segments, invariants hold, no copy
+ [Test]
+ public virtual void TestNoCopySegments()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(9).SetMergePolicy(NewLogMergePolicy(4)));
+ AddDocs(writer, 2);
+
+ writer.AddIndexes(aux);
+ Assert.AreEqual(1032, writer.MaxDoc);
+ Assert.AreEqual(1000, writer.GetDocCount(0));
+ writer.Dispose();
+
+ // make sure the index is correct
+ VerifyNumDocs(dir, 1032);
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // case 3: tail segments, invariants hold, copy, invariants hold
+ [Test]
+ public virtual void TestNoMergeAfterCopy()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux);
+
+ IndexWriter writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(4)));
+
+ writer.AddIndexes(aux, new MockDirectoryWrapper(Random(), new RAMDirectory(aux, NewIOContext(Random()))));
+ Assert.AreEqual(1060, writer.MaxDoc);
+ Assert.AreEqual(1000, writer.GetDocCount(0));
+ writer.Dispose();
+
+ // make sure the index is correct
+ VerifyNumDocs(dir, 1060);
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // case 4: tail segments, invariants hold, copy, invariants not hold
+ [Test]
+ public virtual void TestMergeAfterCopy()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+
+ SetUpDirs(dir, aux, true);
+
+ IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+ IndexWriter writer = new IndexWriter(aux, dontMergeConfig);
+ for (int i = 0; i < 20; i++)
+ {
+ writer.DeleteDocuments(new Term("id", "" + i));
+ }
+ writer.Dispose();
+ IndexReader reader = DirectoryReader.Open(aux);
+ Assert.AreEqual(10, reader.NumDocs);
+ reader.Dispose();
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(4).SetMergePolicy(NewLogMergePolicy(4)));
+
+ if (VERBOSE)
+ {
+ Console.WriteLine("\nTEST: now addIndexes");
+ }
+ writer.AddIndexes(aux, new MockDirectoryWrapper(Random(), new RAMDirectory(aux, NewIOContext(Random()))));
+ Assert.AreEqual(1020, writer.MaxDoc);
+ Assert.AreEqual(1000, writer.GetDocCount(0));
+ writer.Dispose();
+ dir.Dispose();
+ aux.Dispose();
+ }
+
+ // case 5: tail segments, invariants not hold
+ [Test]
+ public virtual void TestMoreMerges()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // auxiliary directory
+ Directory aux = NewDirectory();
+ Directory aux2 = NewDirectory();
+
+ SetUpDirs(dir, aux, true);
+
+ IndexWriter writer = NewWriter(aux2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(100).SetMergePolicy(NewLogMergePolicy(10)));
+ writer.AddIndexes(aux);
+ Assert.AreEqual(30, writer.MaxDoc);
+ Assert.AreEqual(3, writer.SegmentCount);
+ writer.Dispose();
+
+ IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+ writer = new IndexWriter(aux, dontMergeConfig);
+ for (int i = 0; i < 27; i++)
+ {
+ writer.DeleteDocuments(new Term("id", "" + i));
+ }
+ writer.Dispose();
+ IndexReader reader = DirectoryReader.Open(aux);
+ Assert.AreEqual(3, reader.NumDocs);
+ reader.Dispose();
+
+ dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES);
+ writer = new IndexWriter(aux2, dontMergeConfig);
+ for (int i = 0; i < 8; i++)
+ {
+ writer.DeleteDocuments(new Term("id", "" + i));
+ }
+ writer.Dispose();
+ reader = DirectoryReader.Open(aux2);
+ Assert.AreEqual(22, reader.NumDocs);
+ reader.Dispose();
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(6).SetMergePolicy(NewLogMergePolicy(4)));
+
+ writer.AddIndexes(aux, aux2);
+ Assert.AreEqual(1040, writer.MaxDoc);
+ Assert.AreEqual(1000, writer.GetDocCount(0));
+ writer.Dispose();
+ dir.Dispose();
+ aux.Dispose();
+ aux2.Dispose();
+ }
+
+ private IndexWriter NewWriter(Directory dir, IndexWriterConfig conf)
+ {
+ conf.SetMergePolicy(new LogDocMergePolicy());
+ IndexWriter writer = new IndexWriter(dir, conf);
+ return writer;
+ }
+
+ private void AddDocs(IndexWriter writer, int numDocs)
+ {
+ for (int i = 0; i < numDocs; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+ writer.AddDocument(doc);
+ }
+ }
+
+ private void AddDocs2(IndexWriter writer, int numDocs)
+ {
+ for (int i = 0; i < numDocs; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewTextField("content", "bbb", Field.Store.NO));
+ writer.AddDocument(doc);
+ }
+ }
+
+ private void VerifyNumDocs(Directory dir, int numDocs)
+ {
+ IndexReader reader = DirectoryReader.Open(dir);
+ Assert.AreEqual(numDocs, reader.MaxDoc);
+ Assert.AreEqual(numDocs, reader.NumDocs);
+ reader.Dispose();
+ }
+
+ private void VerifyTermDocs(Directory dir, Term term, int numDocs)
+ {
+ IndexReader reader = DirectoryReader.Open(dir);
+ DocsEnum docsEnum = TestUtil.Docs(Random(), reader, term.Field, term.Bytes, null, null, DocsEnum.FLAG_NONE);
+ int count = 0;
+ while (docsEnum.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
+ {
+ count++;
+ }
+ Assert.AreEqual(numDocs, count);
+ reader.Dispose();
+ }
+
+ private void SetUpDirs(Directory dir, Directory aux)
+ {
+ SetUpDirs(dir, aux, false);
+ }
+
+ private void SetUpDirs(Directory dir, Directory aux, bool withID)
+ {
+ IndexWriter writer = null;
+
+ writer = NewWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(1000));
+ // add 1000 documents in 1 segment
+ if (withID)
+ {
+ AddDocsWithID(writer, 1000, 0);
+ }
+ else
+ {
+ AddDocs(writer, 1000);
+ }
+ Assert.AreEqual(1000, writer.MaxDoc);
+ Assert.AreEqual(1, writer.SegmentCount);
+ writer.Dispose();
+
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(1000).SetMergePolicy(NewLogMergePolicy(false, 10)));
+ // add 30 documents in 3 segments
+ for (int i = 0; i < 3; i++)
+ {
+ if (withID)
+ {
+ AddDocsWithID(writer, 10, 10 * i);
+ }
+ else
+ {
+ AddDocs(writer, 10);
+ }
+ writer.Dispose();
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(1000).SetMergePolicy(NewLogMergePolicy(false, 10)));
+ }
+ Assert.AreEqual(30, writer.MaxDoc);
+ Assert.AreEqual(3, writer.SegmentCount);
+ writer.Dispose();
+ }
+
+ // LUCENE-1270
+ [Test]
+ public virtual void TestHangOnClose()
+ {
+ Directory dir = NewDirectory();
+ LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
+ lmp.NoCFSRatio = 0.0;
+ lmp.MergeFactor = 100;
+ IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(5).SetMergePolicy(lmp));
+
+ Document doc = new Document();
+ FieldType customType = new FieldType(TextField.TYPE_STORED);
+ customType.StoreTermVectors = true;
+ customType.StoreTermVectorPositions = true;
+ customType.StoreTermVectorOffsets = true;
+ doc.Add(NewField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
+ for (int i = 0; i < 60; i++)
+ {
+ writer.AddDocument(doc);
+ }
+
+ Document doc2 = new Document();
+ FieldType customType2 = new FieldType();
+ customType2.IsStored = true;
+ doc2.Add(NewField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+ doc2.Add(NewField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+ doc2.Add(NewField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+ doc2.Add(NewField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
+ for (int i = 0; i < 10; i++)
+ {
+ writer.AddDocument(doc2);
+ }
+ writer.Dispose();
+
+ Directory dir2 = NewDirectory();
+ lmp = new LogByteSizeMergePolicy();
+ lmp.MinMergeMB = 0.0001;
+ lmp.NoCFSRatio = 0.0;
+ lmp.MergeFactor = 4;
+ writer = new IndexWriter(dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(new SerialMergeScheduler()).SetMergePolicy(lmp));
+ writer.AddIndexes(dir);
+ writer.Dispose();
+ dir.Dispose();
+ dir2.Dispose();
+ }
+
+ // TODO: these are also in TestIndexWriter... add a simple doc-writing method
+ // like this to LuceneTestCase?
+ private void AddDoc(IndexWriter writer)
+ {
+ Document doc = new Document();
+ doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+ writer.AddDocument(doc);
+ }
+
+ private abstract class RunAddIndexesThreads
+ {
+ private readonly TestAddIndexes OuterInstance;
+
+ internal Directory Dir, Dir2;
+ internal const int NUM_INIT_DOCS = 17;
+ internal IndexWriter Writer2;
+ internal readonly IList<Exception> Failures = new List<Exception>();
+ internal volatile bool DidClose;
+ internal readonly IndexReader[] Readers;
+ internal readonly int NUM_COPY;
+ internal const int NUM_THREADS = 5;
+ internal readonly ThreadClass[] Threads = new ThreadClass[NUM_THREADS];
+
+ public RunAddIndexesThreads(TestAddIndexes outerInstance, int numCopy)
+ {
+ this.OuterInstance = outerInstance;
+ NUM_COPY = numCopy;
+ Dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
+ IndexWriter writer = new IndexWriter(Dir, (IndexWriterConfig)new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2));
+ for (int i = 0; i < NUM_INIT_DOCS; i++)
+ {
+ outerInstance.AddDoc(writer);
+ }
+ writer.Dispose();
+
+ Dir2 = NewDirectory();
+ Writer2 = new IndexWriter(Dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+ Writer2.Commit();
+
+ Readers = new IndexReader[NUM_COPY];
+ for (int i = 0; i < NUM_COPY; i++)
+ {
+ Readers[i] = DirectoryReader.Open(Dir);
+ }
+ }
+
+ internal virtual void LaunchThreads(int numIter)
+ {
+ for (int i = 0; i < NUM_THREADS; i++)
+ {
+ Threads[i] = new ThreadAnonymousInnerClassHelper(this, numIter);
+ }
+
+ for (int i = 0; i < NUM_THREADS; i++)
+ {
+ Threads[i].Start();
+ }
+ }
+
+ private class ThreadAnonymousInnerClassHelper : ThreadClass
+ {
+ private readonly RunAddIndexesThreads OuterInstance;
+
+ private int NumIter;
+
+ public ThreadAnonymousInnerClassHelper(RunAddIndexesThreads outerInstance, int numIter)
+ {
+ this.OuterInstance = outerInstance;
+ this.NumIter = numIter;
+ }
+
+ public override void Run()
+ {
+ try
+ {
+ Directory[] dirs = new Directory[OuterInstance.NUM_COPY];
+ for (int k = 0; k < OuterInstance.NUM_COPY; k++)
+ {
+ dirs[k] = new MockDirectoryWrapper(Random(), new RAMDirectory(OuterInstance.Dir, NewIOContext(Random())));
+ }
+
+ int j = 0;
+
+ while (true)
+ {
+ // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
+ if (NumIter > 0 && j == NumIter)
+ {
+ break;
+ }
+ OuterInstance.DoBody(j++, dirs);
+ }
+ }
+ catch (Exception t)
+ {
+ OuterInstance.Handle(t);
+ }
+ }
+ }
+
+ internal virtual void JoinThreads()
+ {
+ for (int i = 0; i < NUM_THREADS; i++)
+ {
+ Threads[i].Join();
+ }
+ }
+
+ internal virtual void Close(bool doWait)
+ {
+ DidClose = true;
+ Writer2.Dispose(doWait);
+ }
+
+ internal virtual void CloseDir()
+ {
+ for (int i = 0; i < NUM_COPY; i++)
+ {
+ Readers[i].Dispose();
+ }
+ Dir2.Dispose();
+ }
+
+ internal abstract void DoBody(int j, Directory[] dirs);
+
+ internal abstract void Handle(Exception t);
+ }
+
+ private class CommitAndAddIndexes : RunAddIndexesThreads
+ {
+ private readonly TestAddIndexes OuterInstance;
+
+ public CommitAndAddIndexes(TestAddIndexes outerInstance, int numCopy)
+ : base(outerInstance, numCopy)
+ {
+ this.OuterInstance = outerInstance;
+ }
+
+ internal override void Handle(Exception t)
+ {
+ Console.Error.WriteLine(t.StackTrace);
+ lock (Failures)
+ {
+ Failures.Add(t);
+ }
+ }
+
+ internal override void DoBody(int j, Directory[] dirs)
+ {
+ switch (j % 5)
+ {
+ case 0:
+ if (VERBOSE)
+ {
+ Console.WriteLine(Thread.CurrentThread.Name + ": TEST: addIndexes(Dir[]) then full merge");
+ }
+ Writer2.AddIndexes(dirs);
+ Writer2.ForceMerge(1);
+ break;
+
+ case 1:
+ if (VERBOSE)
+ {
+ Console.WriteLine(Thread.CurrentThread.Name + ": TEST: addIndexes(Dir[])");
+ }
+ Writer2.AddIndexes(dirs);
+ break;
+
+ case 2:
+ if (VERBOSE)
+ {
+ Console.WriteLine(Thread.CurrentThread.Name + ": TEST: addIndexes(IndexReader[])");
+ }
+ Writer2.AddIndexes(Readers);
+ break;
+
+ case 3:
+ if (VERBOSE)
+ {
+ Console.WriteLine(Thread.CurrentThread.Name + ": TEST: addIndexes(Dir[]) then maybeMerge");
+ }
+ Writer2.AddIndexes(dirs);
+ Writer2.MaybeMerge();
+ break;
+
+ case 4:
+ if (VERBOSE)
+ {
+ Console.WriteLine(Thread.CurrentThread.Name + ": TEST: commit");
+ }
+ Writer2.Commit();
+ break;
+ }
+ }
+ }
+
+ // LUCENE-1335: test simultaneous addIndexes & commits
+ // from multiple threads
+ [Test]
+ public virtual void TestAddIndexesWithThreads()
+ {
+ int NUM_ITER = TEST_NIGHTLY ? 15 : 5;
+ const int NUM_COPY = 3;
+ CommitAndAddIndexes c = new CommitAndAddIndexes(this, NUM_COPY);
+ c.LaunchThreads(NUM_ITER);
+
+ for (int i = 0; i < 100; i++)
+ {
+ AddDoc(c.Writer2);
+ }
+
+ c.JoinThreads();
+
+ int expectedNumDocs = 100 + NUM_COPY * (4 * NUM_ITER / 5) * RunAddIndexesThreads.NUM_THREADS * RunAddIndexesThreads.NUM_INIT_DOCS;
+ Assert.AreEqual(expectedNumDocs, c.Writer2.NumDocs, "expected num docs don't match - failures: " + Environment.NewLine
+ + string.Join(Environment.NewLine, c.Failures.Select(x => x.ToString())));
+
+ c.Close(true);
+
+ Assert.IsTrue(c.Failures.Count == 0, "found unexpected failures: " + c.Failures);
+
+ IndexReader reader = DirectoryReader.Open(c.Dir2);
+ Assert.AreEqual(expectedNumDocs, reader.NumDocs);
+ reader.Dispose();
+
+ c.CloseDir();
+ }
+
+ private class CommitAndAddIndexes2 : CommitAndAddIndexes
+ {
+ private readonly TestAddIndexes OuterInstance;
+
+ public CommitAndAddIndexes2(TestAddIndexes outerInstance, int numCopy)
+ : base(outerInstance, numCopy)
+ {
+ this.OuterInstance = outerInstance;
+ }
+
+ internal override void Handle(Exception t)
+ {
+ if (!(t is AlreadyClosedException) && !(t is System.NullReferenceException))
+ {
+ Console.Error.WriteLine(t.StackTrace);
+ lock (Failures)
+ {
+ Failures.Add(t);
+ }
+ }
+ }
+ }
+
+ // LUCENE-1335: test simultaneous addIndexes & close
+ [Test]
+ public virtual void TestAddIndexesWithClose()
+ {
+ const int NUM_COPY = 3;
+ CommitAndAddIndexes2 c = new CommitAndAddIndexes2(this, NUM_COPY);
+ //c.writer2.setInfoStream(System.out);
+ c.LaunchThreads(-1);
+
+ // Close w/o first stopping/joining the threads
+ c.Close(true);
+ //c.writer2.Dispose();
+
+ c.JoinThreads();
+
+ c.CloseDir();
+
+ Assert.IsTrue(c.Failures.Count == 0);
+ }
+
+ private class CommitAndAddIndexes3 : RunAddIndexesThreads
+ {
+ private readonly TestAddIndexes OuterInstance;
+
+ public CommitAndAddIndexes3(TestAddIndexes outerInstance, int numCopy)
+ : base(outerInstance, numCopy)
+ {
+ this.OuterInstance = outerInstance;
+ }
+
+ internal override void DoBody(int j, Directory[] dirs)
+ {
+ switch (j % 5)
+ {
+ case 0:
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": addIndexes + full merge");
+ }
+ Writer2.AddIndexes(dirs);
+ Writer2.ForceMerge(1);
+ break;
+
+ case 1:
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": addIndexes");
+ }
+ Writer2.AddIndexes(dirs);
+ break;
+
+ case 2:
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": addIndexes(IR[])");
+ }
+ Writer2.AddIndexes(Readers);
+ break;
+
+ case 3:
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": full merge");
+ }
+ Writer2.ForceMerge(1);
+ break;
+
+ case 4:
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": commit");
+ }
+ Writer2.Commit();
+ break;
+ }
+ }
+
+ internal override void Handle(Exception t)
+ {
+ bool report = true;
+
+ if (t is AlreadyClosedException || t is MergePolicy.MergeAbortedException || t is System.NullReferenceException)
+ {
+ report = !DidClose;
+ }
+ else if (t is FileNotFoundException/* || t is NoSuchFileException*/)
+ {
+ report = !DidClose;
+ }
+ else if (t is IOException)
+ {
+ Exception t2 = t.InnerException;
+ if (t2 is MergePolicy.MergeAbortedException)
+ {
+ report = !DidClose;
+ }
+ }
+ if (report)
+ {
+ Console.Out.WriteLine(t.StackTrace);
+ lock (Failures)
+ {
+ Failures.Add(t);
+ }
+ }
+ }
+ }
+
+ // LUCENE-1335: test simultaneous addIndexes & close
+ [Test]
+ public virtual void TestAddIndexesWithCloseNoWait()
+ {
+ const int NUM_COPY = 50;
+ CommitAndAddIndexes3 c = new CommitAndAddIndexes3(this, NUM_COPY);
+ c.LaunchThreads(-1);
+
+ Thread.Sleep(TestUtil.NextInt(Random(), 10, 500));
+
+ // Close w/o first stopping/joining the threads
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: now close(false)");
+ }
+ c.Close(false);
+
+ c.JoinThreads();
+
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: done join threads");
+ }
+ c.CloseDir();
+
+ Assert.IsTrue(c.Failures.Count == 0);
+ }
+
+ // LUCENE-1335: test simultaneous addIndexes & close
+ [Test]
+ public virtual void TestAddIndexesWithRollback()
+ {
+ int NUM_COPY = TEST_NIGHTLY ? 50 : 5;
+ CommitAndAddIndexes3 c = new CommitAndAddIndexes3(this, NUM_COPY);
+ c.LaunchThreads(-1);
+
+ Thread.Sleep(TestUtil.NextInt(Random(), 10, 500));
+
+ // Close w/o first stopping/joining the threads
+ if (VERBOSE)
+ {
+ Console.WriteLine("TEST: now force rollback");
+ }
+ c.DidClose = true;
+ c.Writer2.Rollback();
+
+ c.JoinThreads();
+
+ c.CloseDir();
+
+ Assert.IsTrue(c.Failures.Count == 0);
+ }
+
+ // LUCENE-2996: tests that addIndexes(IndexReader) applies existing deletes correctly.
+ [Test]
+ public virtual void TestExistingDeletes()
+ {
+ Directory[] dirs = new Directory[2];
+ for (int i = 0; i < dirs.Length; i++)
+ {
+ dirs[i] = NewDirectory();
+ IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ IndexWriter writer = new IndexWriter(dirs[i], conf);
+ Document doc = new Document();
+ doc.Add(new StringField("id", "myid", Field.Store.NO));
+ writer.AddDocument(doc);
+ writer.Dispose();
+ }
+
+ IndexWriterConfig conf_ = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ IndexWriter writer_ = new IndexWriter(dirs[0], conf_);
+
+ // Now delete the document
+ writer_.DeleteDocuments(new Term("id", "myid"));
+ IndexReader r = DirectoryReader.Open(dirs[1]);
+ try
+ {
+ writer_.AddIndexes(r);
+ }
+ finally
+ {
+ r.Dispose();
+ }
+ writer_.Commit();
+ Assert.AreEqual(1, writer_.NumDocs, "Documents from the incoming index should not have been deleted");
+ writer_.Dispose();
+
+ foreach (Directory dir in dirs)
+ {
+ dir.Dispose();
+ }
+ }
+
+ // just like addDocs but with ID, starting from docStart
+ private void AddDocsWithID(IndexWriter writer, int numDocs, int docStart)
+ {
+ for (int i = 0; i < numDocs; i++)
+ {
+ Document doc = new Document();
+ doc.Add(NewTextField("content", "aaa", Field.Store.NO));
+ doc.Add(NewTextField("id", "" + (docStart + i), Field.Store.YES));
+ writer.AddDocument(doc);
+ }
+ }
+
+ [Test]
+ public virtual void TestSimpleCaseCustomCodec()
+ {
+ // main directory
+ Directory dir = NewDirectory();
+ // two auxiliary directories
+ Directory aux = NewDirectory();
+ Directory aux2 = NewDirectory();
+ Codec codec = new CustomPerFieldCodec();
+ IndexWriter writer = null;
+
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetCodec(codec));
+ // add 100 documents
+ AddDocsWithID(writer, 100, 0);
+ Assert.AreEqual(100, writer.MaxDoc);
+ writer.Commit();
+ writer.Dispose();
+ TestUtil.CheckIndex(dir);
+
+ writer = NewWriter(aux, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetCodec(codec).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(false)));
+ // add 40 documents in separate files
+ AddDocs(writer, 40);
+ Assert.AreEqual(40, writer.MaxDoc);
+ writer.Commit();
+ writer.Dispose();
+
+ writer = NewWriter(aux2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.CREATE).SetCodec(codec));
+ // add 40 documents in compound files
+ AddDocs2(writer, 50);
+ Assert.AreEqual(50, writer.MaxDoc);
+ writer.Commit();
+ writer.Dispose();
+
+ // test doc count before segments are merged
+ writer = NewWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetCodec(codec));
+ Assert.AreEqual(100, writer.MaxDoc);
+ writer.AddIndexes(aux, aux2);
+ Assert.AreEqual(190, writer.MaxDoc);
+ writer.Dispose();
+
+ dir.Dispose();
+ aux.Dispose();
+ aux2.Dispose();
+ }
+
+ private sealed class CustomPerFieldCodec : Lucene46Codec
+ {
+ internal readonly PostingsFormat SimpleTextFormat;
+ internal readonly PostingsFormat DefaultFormat;
+ internal readonly PostingsFormat MockSepFormat;
+
+ public CustomPerFieldCodec()
+ {
+ SimpleTextFormat = Codecs.PostingsFormat.ForName("SimpleText");
+ DefaultFormat = Codecs.PostingsFormat.ForName("Lucene41");
+ MockSepFormat = Codecs.PostingsFormat.ForName("MockSep");
+ }
+
+ public override PostingsFormat GetPostingsFormatForField(string field)
+ {
+ if (field.Equals("id"))
+ {
+ return SimpleTextFormat;
+ }
+ else if (field.Equals("content"))
+ {
+ return MockSepFormat;
+ }
+ else
+ {
+ return DefaultFormat;
+ }
+ }
+ }
+
+ // LUCENE-2790: tests that the non CFS files were deleted by addIndexes
+ [Test]
+ public virtual void TestNonCFSLeftovers()
+ {
+ Directory[] dirs = new Directory[2];
+ for (int i = 0; i < dirs.Length; i++)
+ {
+ dirs[i] = new RAMDirectory();
+ IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
+ Document d = new Document();
+ FieldType customType = new FieldType(TextField.TYPE_STORED);
+ customType.StoreTermVectors = true;
+ d.Add(new Field("c", "v", customType));
+ w.AddDocument(d);
+ w.Dispose();
+ }
+
+ IndexReader[] readers = new IndexReader[] { DirectoryReader.Open(dirs[0]), DirectoryReader.Open(dirs[1]) };
+
+ Directory dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
+ IndexWriterConfig conf = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NewLogMergePolicy(true));
+ MergePolicy lmp = conf.MergePolicy;
+ // Force creation of CFS:
+ lmp.NoCFSRatio = 1.0;
+ lmp.MaxCFSSegmentSizeMB = double.PositiveInfinity;
+ IndexWriter w3 = new IndexWriter(dir, conf);
+ w3.AddIndexes(readers);
+ w3.Dispose();
+ // we should now see segments_X,
+ // segments.gen,_Y.cfs,_Y.cfe, _Z.si
+ Assert.AreEqual(5, dir.ListAll().Length, "Only one compound segment should exist, but got: " + Arrays.ToString(dir.ListAll()));
+ dir.Dispose();
+ }
+
+ [CodecName("NotRegistered")]
+ private sealed class UnRegisteredCodec : FilterCodec
+ {
+ public UnRegisteredCodec()
+ : base(new Lucene46Codec())
+ {
+ }
+ }
+
+ /*
+ * simple test that ensures we getting expected exceptions
+ */
+ [Test]
+ public virtual void TestAddIndexMissingCodec()
+ {
+ BaseDirectoryWrapper toAdd = NewDirectory();
+ // Disable checkIndex, else we get an exception because
+ // of the unregistered codec:
+ toAdd.CheckIndexOnClose = false;
+ {
+ IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ conf.SetCodec(new UnRegisteredCodec());
+ using (var w = new IndexWriter(toAdd, conf))
+ {
+ Document doc = new Document();
+ FieldType customType = new FieldType();
+ customType.IsIndexed = true;
+ doc.Add(NewField("foo", "bar", customType));
+ w.AddDocument(doc);
+ }
+ }
+
+ {
+ using (Directory dir = NewDirectory())
+ {
+ IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ conf.SetCodec(TestUtil.AlwaysPostingsFormat(new Pulsing41PostingsFormat(1 + Random().Next(20))));
+ IndexWriter w = new IndexWriter(dir, conf);
+ try
+ {
+ w.AddIndexes(toAdd);
+ Assert.Fail("no such codec");
+ }
+#pragma warning disable 168
+ catch (System.ArgumentException ex)
+#pragma warning restore 168
+ {
+ // expected
+ }
+ finally
+ {
+ w.Dispose();
+ }
+ using (IndexReader open = DirectoryReader.Open(dir))
+ {
+ Assert.AreEqual(0, open.NumDocs);
+ }
+ }
+ }
+
+ try
+ {
+ DirectoryReader.Open(toAdd);
+ Assert.Fail("no such codec");
+ }
+#pragma warning disable 168
+ catch (System.ArgumentException ex)
+#pragma warning restore 168
+ {
+ // expected
+ }
+ toAdd.Dispose();
+ }
+
+ // LUCENE-3575
+ [Test]
+ public virtual void TestFieldNamesChanged()
+ {
+ Directory d1 = NewDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(Random(), d1, Similarity, TimeZone);
+ Document doc = new Document();
+ doc.Add(NewStringField("f1", "doc1 field1", Field.Store.YES));
+ doc.Add(NewStringField("id", "1", Field.Store.YES));
+ w.AddDocument(doc);
+ IndexReader r1 = w.Reader;
+ w.Dispose();
+
+ Directory d2 = NewDirectory();
+ w = new RandomIndexWriter(Random(), d2, Similarity, TimeZone);
+ doc = new Document();
+ doc.Add(NewStringField("f2", "doc2 field2", Field.Store.YES));
+ doc.Add(NewStringField("id", "2", Field.Store.YES));
+ w.AddDocument(doc);
+ IndexReader r2 = w.Reader;
+ w.Dispose();
+
+ Directory d3 = NewDirectory();
+ w = new RandomIndexWriter(Random(), d3, Similarity, TimeZone);
+ w.AddIndexes(r1, r2);
+ r1.Dispose();
+ d1.Dispose();
+ r2.Dispose();
+ d2.Dispose();
+
+ IndexReader r3 = w.Reader;
+ w.Dispose();
+ Assert.AreEqual(2, r3.NumDocs);
+ for (int docID = 0; docID < 2; docID++)
+ {
+ Document d = r3.Document(docID);
+ if (d.Get("id").Equals("1"))
+ {
+ Assert.AreEqual("doc1 field1", d.Get("f1"));
+ }
+ else
+ {
+ Assert.AreEqual("doc2 field2", d.Get("f2"));
+ }
+ }
+ r3.Dispose();
+ d3.Dispose();
+ }
+
+ [Test]
+ public virtual void TestAddEmpty()
+ {
+ Directory d1 = NewDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(Random(), d1, Similarity, TimeZone);
+ MultiReader empty = new MultiReader();
+ w.AddIndexes(empty);
+ w.Dispose();
+ DirectoryReader dr = DirectoryReader.Open(d1);
+ foreach (AtomicReaderContext ctx in dr.Leaves)
+ {
+ Assert.IsTrue(ctx.Reader.MaxDoc > 0, "empty segments should be dropped by addIndexes");
+ }
+ dr.Dispose();
+ d1.Dispose();
+ }
+
+ // Currently it's impossible to end up with a segment with all documents
+ // deleted, as such segments are dropped. Still, to validate that addIndexes
+ // works with such segments, or readers that end up in such state, we fake an
+ // all deleted segment.
+ [Test]
+ public virtual void TestFakeAllDeleted()
+ {
+ Directory src = NewDirectory(), dest = NewDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(Random(), src, Similarity, TimeZone);
+ w.AddDocument(new Document());
+ IndexReader allDeletedReader = new AllDeletedFilterReader((AtomicReader)w.Reader.Leaves[0].Reader);
+ w.Dispose();
+
+ w = new RandomIndexWriter(Random(), dest, Similarity, TimeZone);
+ w.AddIndexes(allDeletedReader);
+ w.Dispose();
+ DirectoryReader dr = DirectoryReader.Open(src);
+ foreach (AtomicReaderContext ctx in dr.Leaves)
+ {
+ Assert.IsTrue(ctx.Reader.MaxDoc > 0, "empty segments should be dropped by addIndexes");
+ }
+ dr.Dispose();
+ allDeletedReader.Dispose();
+ src.Dispose();
+ dest.Dispose();
+ }
+
+ /// <summary>
+ /// Make sure an open IndexWriter on an incoming Directory
+ /// causes a LockObtainFailedException
+ /// </summary>
+ [Test]
+ public virtual void TestLocksBlock()
+ {
+ Directory src = NewDirectory();
+ RandomIndexWriter w1 = new RandomIndexWriter(Random(), src, Similarity, TimeZone);
+ w1.AddDocument(new Document());
+ w1.Commit();
+
+ Directory dest = NewDirectory();
+
+ IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ iwc.SetWriteLockTimeout(1);
+ RandomIndexWriter w2 = new RandomIndexWriter(Random(), dest, iwc);
+
+ try
+ {
+ w2.AddIndexes(src);
+ Assert.Fail("did not hit expected exception");
+ }
+#pragma warning disable 168
+ catch (LockObtainFailedException lofe)
+#pragma warning restore 168
+ {
+ // expected
+ }
+
+ IOUtils.Close(w1, w2, src, dest);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96822396/src/Lucene.Net.Tests/Index/TestAllFilesHaveChecksumFooter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Index/TestAllFilesHaveChecksumFooter.cs b/src/Lucene.Net.Tests/Index/TestAllFilesHaveChecksumFooter.cs
new file mode 100644
index 0000000..8d71391
--- /dev/null
+++ b/src/Lucene.Net.Tests/Index/TestAllFilesHaveChecksumFooter.cs
@@ -0,0 +1,114 @@
+using Lucene.Net.Documents;
+using NUnit.Framework;
+using System;
+
+namespace Lucene.Net.Index
+{
+ using CodecUtil = Lucene.Net.Codecs.CodecUtil;
+ using CompoundFileDirectory = Lucene.Net.Store.CompoundFileDirectory;
+ using Directory = Lucene.Net.Store.Directory;
+ using Document = Documents.Document;
+ using Field = Field;
+ using IndexInput = Lucene.Net.Store.IndexInput;
+ using IOUtils = Lucene.Net.Util.IOUtils;
+ using Lucene46Codec = Lucene.Net.Codecs.Lucene46.Lucene46Codec;
+ using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ using MockAnalyzer = Lucene.Net.Analysis.MockAnalyzer;
+ using NumericDocValuesField = NumericDocValuesField;
+ using TestUtil = Lucene.Net.Util.TestUtil;
+
+ /// <summary>
+ /// Test that a plain default puts CRC32 footers in all files.
+ /// </summary>
+ [TestFixture]
+ public class TestAllFilesHaveChecksumFooter : LuceneTestCase
+ {
+ [Test]
+ public virtual void Test()
+ {
+ Directory dir = NewDirectory();
+ IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+ conf.SetCodec(new Lucene46Codec());
+ RandomIndexWriter riw = new RandomIndexWriter(Random(), dir, conf);
+ Document doc = new Document();
+ // these fields should sometimes get term vectors, etc
+ Field idField = NewStringField("id", "", Field.Store.NO);
+ Field bodyField = NewTextField("body", "", Field.Store.NO);
+ Field dvField = new NumericDocValuesField("dv", 5);
+ doc.Add(idField);
+ doc.Add(bodyField);
+ doc.Add(dvField);
+ for (int i = 0; i < 100; i++)
+ {
+ idField.SetStringValue(Convert.ToString(i));
+ bodyField.SetStringValue(TestUtil.RandomUnicodeString(Random()));
+ riw.AddDocument(doc);
+ if (Random().Next(7) == 0)
+ {
+ riw.Commit();
+ }
+ if (Random().Next(20) == 0)
+ {
+ riw.DeleteDocuments(new Term("id", Convert.ToString(i)));
+ }
+ }
+ riw.Dispose();
+ CheckHeaders(dir);
+ dir.Dispose();
+ }
+
+ private void CheckHeaders(Directory dir)
+ {
+ foreach (string file in dir.ListAll())
+ {
+ if (file.Equals(IndexWriter.WRITE_LOCK_NAME))
+ {
+ continue; // write.lock has no footer, thats ok
+ }
+ if (file.EndsWith(IndexFileNames.COMPOUND_FILE_EXTENSION))
+ {
+ CompoundFileDirectory cfsDir = new CompoundFileDirectory(dir, file, NewIOContext(Random()), false);
+ CheckHeaders(cfsDir); // recurse into cfs
+ cfsDir.Dispose();
+ }
+ IndexInput @in = null;
+ bool success = false;
+ try
+ {
+ @in = dir.OpenInput(file, NewIOContext(Random()));
+ CodecUtil.ChecksumEntireFile(@in);
+ success = true;
+ }
+ finally
+ {
+ if (success)
+ {
+ IOUtils.Close(@in);
+ }
+ else
+ {
+ IOUtils.CloseWhileHandlingException(@in);
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file