You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ar...@apache.org on 2007/08/11 18:56:44 UTC

svn commit: r564939 [4/8] - in /incubator/lucene.net/trunk/C#/src: ./ Demo/ Demo/DeleteFiles/ Demo/DemoLib/ Demo/IndexFiles/ Demo/IndexHtml/ Demo/SearchFiles/ Lucene.Net/ Lucene.Net/Analysis/Standard/ Lucene.Net/Document/ Lucene.Net/Index/ Lucene.Net/Q...

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriterMergePolicy.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMergePolicy.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMergePolicy.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+using Directory = Lucene.Net.Store.Directory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+
+namespace Lucene.Net.Index
+{
+	
+    [TestFixture]
+    public class TestIndexWriterMergePolicy
+	{
+		
+		// Test the normal case
+        [Test]
+		public virtual void  TestNormalCase()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(10);
+			
+			for (int i = 0; i < 100; i++)
+			{
+				AddDoc(writer);
+				CheckInvariants(writer);
+			}
+			
+			writer.Close();
+		}
+		
+		// Test to see if there is over merge
+        [Test]
+		public virtual void  TestNoOverMerge()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(10);
+			
+			bool noOverMerge = false;
+			for (int i = 0; i < 100; i++)
+			{
+				AddDoc(writer);
+				CheckInvariants(writer);
+				if (writer.GetRamSegmentCount() + writer.GetSegmentCount() >= 18)
+				{
+					noOverMerge = true;
+				}
+			}
+			Assert.IsTrue(noOverMerge);
+			
+			writer.Close();
+		}
+		
+		// Test the case where flush is forced after every AddDoc
+        [Test]
+		public virtual void  TestForceFlush()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(10);
+			
+			for (int i = 0; i < 100; i++)
+			{
+				AddDoc(writer);
+				writer.Close();
+				
+				writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+				writer.SetMaxBufferedDocs(10);
+				writer.SetMergeFactor(10);
+				CheckInvariants(writer);
+			}
+			
+			writer.Close();
+		}
+		
+		// Test the case where mergeFactor changes
+        [Test]
+		public virtual void  TestMergeFactorChange()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(100);
+			
+			for (int i = 0; i < 250; i++)
+			{
+				AddDoc(writer);
+				CheckInvariants(writer);
+			}
+			
+			writer.SetMergeFactor(5);
+			
+			// merge policy only fixes segments on levels where merges
+			// have been triggered, so check invariants after all adds
+			for (int i = 0; i < 10; i++)
+			{
+				AddDoc(writer);
+			}
+			CheckInvariants(writer);
+			
+			writer.Close();
+		}
+		
+		// Test the case where both mergeFactor and maxBufferedDocs change
+        [Test]
+		public virtual void  TestMaxBufferedDocsChange()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(101);
+			writer.SetMergeFactor(101);
+			
+			// leftmost* segment has 1 doc
+			// rightmost* segment has 100 docs
+			for (int i = 1; i <= 100; i++)
+			{
+				for (int j = 0; j < i; j++)
+				{
+					AddDoc(writer);
+					CheckInvariants(writer);
+				}
+				writer.Close();
+				
+				writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+				writer.SetMaxBufferedDocs(101);
+				writer.SetMergeFactor(101);
+			}
+			
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(10);
+			
+			// merge policy only fixes segments on levels where merges
+			// have been triggered, so check invariants after all adds
+			for (int i = 0; i < 100; i++)
+			{
+				AddDoc(writer);
+			}
+			CheckInvariants(writer);
+			
+			for (int i = 100; i < 1000; i++)
+			{
+				AddDoc(writer);
+			}
+			CheckInvariants(writer);
+			
+			writer.Close();
+		}
+		
+		// Test the case where a merge results in no doc at all
+        [Test]
+		public virtual void  TestMergeDocCount0()
+		{
+			Directory dir = new RAMDirectory();
+			
+			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(100);
+			
+			for (int i = 0; i < 250; i++)
+			{
+				AddDoc(writer);
+				CheckInvariants(writer);
+			}
+			writer.Close();
+			
+			IndexReader reader = IndexReader.Open(dir);
+			reader.DeleteDocuments(new Term("content", "aaa"));
+			reader.Close();
+			
+			writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
+			writer.SetMaxBufferedDocs(10);
+			writer.SetMergeFactor(5);
+			
+			// merge factor is changed, so check invariants after all adds
+			for (int i = 0; i < 10; i++)
+			{
+				AddDoc(writer);
+			}
+			CheckInvariants(writer);
+			Assert.AreEqual(10, writer.DocCount());
+			
+			writer.Close();
+		}
+		
+		private void  AddDoc(IndexWriter writer)
+		{
+			Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+			doc.Add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+			writer.AddDocument(doc);
+		}
+		
+		private void  CheckInvariants(IndexWriter writer)
+		{
+			int maxBufferedDocs = writer.GetMaxBufferedDocs();
+			int mergeFactor = writer.GetMergeFactor();
+			int maxMergeDocs = writer.GetMaxMergeDocs();
+			
+			int ramSegmentCount = writer.GetRamSegmentCount();
+			Assert.IsTrue(ramSegmentCount < maxBufferedDocs);
+			
+			int lowerBound = - 1;
+			int upperBound = maxBufferedDocs;
+			int numSegments = 0;
+			
+			int segmentCount = writer.GetSegmentCount();
+			for (int i = segmentCount - 1; i >= 0; i--)
+			{
+				int docCount = writer.GetDocCount(i);
+				Assert.IsTrue(docCount > lowerBound);
+				
+				if (docCount <= upperBound)
+				{
+					numSegments++;
+				}
+				else
+				{
+					if (upperBound * mergeFactor <= maxMergeDocs)
+					{
+						Assert.IsTrue(numSegments < mergeFactor);
+					}
+					
+					do 
+					{
+						lowerBound = upperBound;
+						upperBound *= mergeFactor;
+					}
+					while (docCount > upperBound);
+					numSegments = 1;
+				}
+			}
+			if (upperBound * mergeFactor <= maxMergeDocs)
+			{
+				Assert.IsTrue(numSegments < mergeFactor);
+			}
+			
+			System.String[] files = writer.GetDirectory().List();
+			int segmentCfsCount = 0;
+			for (int i = 0; i < files.Length; i++)
+			{
+				if (files[i].EndsWith(".cfs"))
+				{
+					segmentCfsCount++;
+				}
+			}
+			Assert.AreEqual(segmentCount, segmentCfsCount);
+		}
+		
+		private void  PrintSegmentDocCounts(IndexWriter writer)
+		{
+			int segmentCount = writer.GetSegmentCount();
+			System.Console.Out.WriteLine("" + segmentCount + " segments total");
+			for (int i = 0; i < segmentCount; i++)
+			{
+				System.Console.Out.WriteLine("  segment " + i + " has " + writer.GetDocCount(i) + " docs");
+			}
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMerging.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestIndexWriterMerging.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMerging.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestIndexWriterMerging.cs Sat Aug 11 09:56:37 2007
@@ -16,12 +16,14 @@
  */
 
 using System;
+
+using NUnit.Framework;
+
+using Directory = Lucene.Net.Store.Directory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
 using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;
-using Directory = Lucene.Net.Store.Directory;
-using RAMDirectory = Lucene.Net.Store.RAMDirectory;
-using NUnit.Framework;
 
 namespace Lucene.Net.Index
 {
@@ -64,9 +66,9 @@
 			
             writer.AddIndexes(new Directory[]{indexA, indexB});
             writer.Close();
-            merged.Close();
 			
             fail = VerifyIndex(merged, 0);
+            merged.Close();
 			
             Assert.IsFalse(fail, "The merged index is invalid");
         }

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyBug.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestLazyBug.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyBug.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyBug.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using Analyzer = Lucene.Net.Analysis.Analyzer;
+using SimpleAnalyzer = Lucene.Net.Analysis.SimpleAnalyzer;
+using Lucene.Net.Documents;
+using Directory = Lucene.Net.Store.Directory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+
+namespace Lucene.Net.Index
+{
+	
+	
+	/// <summary> Test demonstrating EOF bug on the last field of the last doc 
+	/// if other docs have allready been accessed.
+	/// </summary>
+	[TestFixture]
+    public class TestLazyBug
+	{
+		public class AnonymousClassFieldSelector : FieldSelector
+		{
+			public virtual FieldSelectorResult Accept(System.String f)
+			{
+				if (f.Equals(Lucene.Net.Index.TestLazyBug.MAGIC_FIELD))
+				{
+					return FieldSelectorResult.LOAD;
+				}
+				return FieldSelectorResult.LAZY_LOAD;
+			}
+		}
+		
+		public static int BASE_SEED = 13;
+		
+		public static int NUM_DOCS = 500;
+		public static int NUM_FIELDS = 100;
+		
+		private static System.String[] data = new System.String[]{"now", "is the time", "for all good men", "to come to the aid", "of their country!", "this string contains big chars:{\u0111 \u0222 \u0333 \u1111 \u2222 \u3333}", "this string is a bigger string, mary had a little lamb, little lamb, little lamb!"};
+		
+		private static System.Collections.Hashtable dataset = new System.Collections.Hashtable();
+		
+		private static System.String MAGIC_FIELD = "f" + (NUM_FIELDS / 3);
+		
+		private static FieldSelector SELECTOR;
+		
+		private static Directory MakeIndex()
+		{
+			Directory dir = new RAMDirectory();
+			try
+			{
+				System.Random r = new System.Random((System.Int32) (BASE_SEED + 42));
+				Analyzer analyzer = new SimpleAnalyzer();
+				IndexWriter writer = new IndexWriter(dir, analyzer, true);
+				
+				writer.SetUseCompoundFile(false);
+				
+				for (int d = 1; d <= NUM_DOCS; d++)
+				{
+					Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+					for (int f = 1; f <= NUM_FIELDS; f++)
+					{
+						doc.Add(new Field("f" + f, data[f % data.Length] + '#' + data[r.Next(data.Length)], Field.Store.YES, Field.Index.TOKENIZED));
+					}
+					writer.AddDocument(doc);
+				}
+				writer.Close();
+			}
+			catch (System.Exception e)
+			{
+				throw new System.SystemException("", e);
+			}
+			return dir;
+		}
+		
+		public static void  DoTest(int[] docs)
+		{
+            if (dataset.Count == 0)
+            {
+                for (int i = 0; i < data.Length; i++)
+                {
+                    dataset.Add(data[i], data[i]);
+                }
+            }
+
+			Directory dir = MakeIndex();
+			IndexReader reader = IndexReader.Open(dir);
+			for (int i = 0; i < docs.Length; i++)
+			{
+				Lucene.Net.Documents.Document d = reader.Document(docs[i], SELECTOR);
+				System.String trash = d.Get(MAGIC_FIELD);
+				
+				System.Collections.IList fields = d.GetFields();
+				for (System.Collections.IEnumerator fi = fields.GetEnumerator(); fi.MoveNext(); )
+				{
+					Fieldable f = null;
+					try
+					{
+						f = (Fieldable) fi.Current;
+						System.String fname = f.Name();
+						System.String fval = f.StringValue();
+						Assert.IsNotNull(fval, docs[i] + " FIELD: " + fname);
+						System.String[] vals = fval.Split('#');
+						if (!dataset.Contains(vals[0]) || !dataset.Contains(vals[1]))
+						{
+							Assert.Fail("FIELD:" + fname + ",VAL:" + fval);
+						}
+					}
+					catch (System.Exception e)
+					{
+						throw new Exception(docs[i] + " WTF: " + f.Name(), e);
+					}
+				}
+			}
+			reader.Close();
+		}
+		
+        [Test]
+		public virtual void  TestLazyWorks()
+		{
+			DoTest(new int[]{399});
+		}
+		
+        [Test]
+		public virtual void  TestLazyAlsoWorks()
+		{
+			DoTest(new int[]{399, 150});
+		}
+		
+        [Test]
+		public virtual void  TestLazyBroken()
+		{
+			DoTest(new int[]{150, 399});
+		}
+
+		static TestLazyBug()
+		{
+			SELECTOR = new AnonymousClassFieldSelector();
+		}
+	}
+}
\ No newline at end of file

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyProxSkipping.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestLazyProxSkipping.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyProxSkipping.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestLazyProxSkipping.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+using Hits = Lucene.Net.Search.Hits;
+using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+using PhraseQuery = Lucene.Net.Search.PhraseQuery;
+using Searcher = Lucene.Net.Search.Searcher;
+using Directory = Lucene.Net.Store.Directory;
+using IndexInput = Lucene.Net.Store.IndexInput;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+
+namespace Lucene.Net.Index
+{
+	
+	/// <summary> Tests lazy skipping on the proximity file.
+	/// 
+	/// </summary>
+	[TestFixture]
+    public class TestLazyProxSkipping
+	{
+		private Searcher searcher;
+		private int seeksCounter = 0;
+		
+		private System.String field = "tokens";
+		private System.String term1 = "xx";
+		private System.String term2 = "yy";
+		private System.String term3 = "zz";
+		
+		private void  CreateIndex(int numHits)
+		{
+			int numDocs = 500;
+			
+			Directory directory = new RAMDirectory();
+			IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
+			
+			for (int i = 0; i < numDocs; i++)
+			{
+				Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+				System.String content;
+				if (i % (numDocs / numHits) == 0)
+				{
+					// add a document that matches the query "term1 term2"
+					content = this.term1 + " " + this.term2;
+				}
+				else if (i % 15 == 0)
+				{
+					// add a document that only contains term1
+					content = this.term1 + " " + this.term1;
+				}
+				else
+				{
+					// add a document that contains term2 but not term 1
+					content = this.term3 + " " + this.term2;
+				}
+				
+				doc.Add(new Field(this.field, content, Field.Store.YES, Field.Index.TOKENIZED));
+				writer.AddDocument(doc);
+			}
+			
+			// make sure the index has only a single segment
+			writer.Optimize();
+			writer.Close();
+			
+			// the index is a single segment, thus IndexReader.open() returns an instance of SegmentReader
+			SegmentReader reader = (SegmentReader) IndexReader.Open(directory);
+			
+			// we decorate the proxStream with a wrapper class that allows to count the number of calls of seek()
+			reader.ProxStream = new SeeksCountingStream(this, reader.ProxStream);
+			
+			this.searcher = new IndexSearcher(reader);
+		}
+		
+		private Hits Search()
+		{
+			// create PhraseQuery "term1 term2" and search
+			PhraseQuery pq = new PhraseQuery();
+			pq.Add(new Term(this.field, this.term1));
+			pq.Add(new Term(this.field, this.term2));
+			return this.searcher.Search(pq);
+		}
+		
+		private void  PerformTest(int numHits)
+		{
+			CreateIndex(numHits);
+			this.seeksCounter = 0;
+			Hits hits = Search();
+			// verify that the right number of docs was found
+			Assert.AreEqual(numHits, hits.Length());
+			
+			// check if the number of calls of seek() does not exceed the number of hits
+			Assert.AreEqual(numHits, this.seeksCounter);
+		}
+		
+        [Test]
+		public virtual void  TestLazySkipping()
+		{
+			// test whether only the minimum amount of seeks() are performed
+			PerformTest(5);
+			PerformTest(10);
+		}
+		
+		
+		// Simply extends IndexInput in a way that we are able to count the number
+		// of invocations of seek()
+		internal class SeeksCountingStream : IndexInput, System.ICloneable
+		{
+			private void  InitBlock(TestLazyProxSkipping enclosingInstance)
+			{
+				this.enclosingInstance = enclosingInstance;
+			}
+			private TestLazyProxSkipping enclosingInstance;
+			public TestLazyProxSkipping Enclosing_Instance
+			{
+				get
+				{
+					return enclosingInstance;
+				}
+				
+			}
+			private IndexInput input;
+			
+			
+			internal SeeksCountingStream(TestLazyProxSkipping enclosingInstance, IndexInput input)
+			{
+				InitBlock(enclosingInstance);
+				this.input = input;
+			}
+			
+			public override byte ReadByte()
+			{
+				return this.input.ReadByte();
+			}
+			
+			public override void  ReadBytes(byte[] b, int offset, int len)
+			{
+				this.input.ReadBytes(b, offset, len);
+			}
+			
+			public override void  Close()
+			{
+				this.input.Close();
+			}
+			
+			public override long GetFilePointer()
+			{
+				return this.input.GetFilePointer();
+			}
+			
+			public override void  Seek(long pos)
+			{
+				Enclosing_Instance.seeksCounter++;
+				this.input.Seek(pos);
+			}
+			
+			public override long Length()
+			{
+				return this.input.Length();
+			}
+			
+			public override System.Object Clone()
+			{
+				return new SeeksCountingStream(enclosingInstance, (IndexInput) this.input.Clone());
+			}
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestMultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestMultiReader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestMultiReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestMultiReader.cs Sat Aug 11 09:56:37 2007
@@ -16,8 +16,12 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
+using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
 using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
 using Directory = Lucene.Net.Store.Directory;
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
 
@@ -35,7 +39,11 @@
 		private SegmentReader[] readers = new SegmentReader[2];
 		private SegmentInfos sis = new SegmentInfos();
 		
-		// This is needed if for the test to pass and mimic what happens wiht JUnit
+        // public TestMultiReader(System.String s)
+        // {
+        // }
+		
+        // This is needed if for the test to pass and mimic what happens wiht JUnit
         // For some reason, JUnit is creating a new member variable for each sub-test
         // but NUnit is not -- who is wrong/right, I don't know.
         private void SetUpInternal()        // {{Aroush-1.9}} See note above
@@ -100,7 +108,22 @@
 			Assert.AreEqual(1, reader.NumDocs());
 			reader.UndeleteAll();
 			Assert.AreEqual(2, reader.NumDocs());
-		}
+			
+            // Ensure undeleteAll survives commit/close/reopen:
+            reader.Commit();
+            reader.Close();
+            sis.Read(dir);
+            reader = new MultiReader(dir, sis, false, readers);
+            Assert.AreEqual(2, reader.NumDocs());
+			
+            reader.DeleteDocument(0);
+            Assert.AreEqual(1, reader.NumDocs());
+            reader.Commit();
+            reader.Close();
+            sis.Read(dir);
+            reader = new MultiReader(dir, sis, false, readers);
+            Assert.AreEqual(1, reader.NumDocs());
+        }
 		
 		[Test]
 		public virtual void  TestTermVectors()
@@ -108,5 +131,36 @@
 			MultiReader reader = new MultiReader(dir, sis, false, readers);
 			Assert.IsTrue(reader != null);
 		}
-	}
+		
+        /* known to fail, see https://issues.apache.org/jira/browse/LUCENE-781
+        public void testIsCurrent() throws IOException {
+        RAMDirectory ramDir1=new RAMDirectory();
+        addDoc(ramDir1, "test foo", true);
+        RAMDirectory ramDir2=new RAMDirectory();
+        addDoc(ramDir2, "test blah", true);
+        IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2)};
+        MultiReader mr = new MultiReader(readers);
+        assertTrue(mr.isCurrent());   // just opened, must be current
+        addDoc(ramDir1, "more text", false);
+        assertFalse(mr.isCurrent());   // has been modified, not current anymore
+        addDoc(ramDir2, "even more text", false);
+        assertFalse(mr.isCurrent());   // has been modified even more, not current anymore
+        try {
+        mr.getVersion();
+        fail();
+        } catch (UnsupportedOperationException e) {
+        // expected exception
+        }
+        mr.close();
+        }
+		
+        private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
+        IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(), create);
+        Document doc = new Document();
+        doc.add(new Field("body", s, Field.Store.YES, Field.Index.TOKENIZED));
+        iw.addDocument(doc);
+        iw.close();
+        }
+        */
+    }
 }

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestNorms.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestNorms.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestNorms.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestNorms.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using Analyzer = Lucene.Net.Analysis.Analyzer;
+using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+using Index = Lucene.Net.Documents.Field.Index;
+using Store = Lucene.Net.Documents.Field.Store;
+using DefaultSimilarity = Lucene.Net.Search.DefaultSimilarity;
+using Similarity = Lucene.Net.Search.Similarity;
+using Directory = Lucene.Net.Store.Directory;
+using FSDirectory = Lucene.Net.Store.FSDirectory;
+
+namespace Lucene.Net.Index
+{
+	
+	/// <summary> Test that norms info is preserved during index life - incluidng seprate norms, addDocument, addIndexes, optimize.</summary>
+    [TestFixture]
+    public class TestNorms
+	{
+		
+		[Serializable]
+		private class SimilarityOne : DefaultSimilarity
+		{
+			public SimilarityOne(TestNorms enclosingInstance)
+			{
+				InitBlock(enclosingInstance);
+			}
+			private void  InitBlock(TestNorms enclosingInstance)
+			{
+				this.enclosingInstance = enclosingInstance;
+			}
+			private TestNorms enclosingInstance;
+			public TestNorms Enclosing_Instance
+			{
+				get
+				{
+					return enclosingInstance;
+				}
+				
+			}
+			public override float LengthNorm(System.String fieldName, int numTerms)
+			{
+				return 1;
+			}
+		}
+		
+		private const int NUM_FIELDS = 10;
+		
+		private Similarity similarityOne;
+		private Analyzer anlzr;
+		private int numDocNorms;
+		private System.Collections.ArrayList norms;
+		private System.Collections.ArrayList modifiedNorms;
+		private float lastNorm = 0;
+		private float normDelta = (float) 0.001;
+		
+		
+        [SetUp]
+		protected internal virtual void  SetUp()
+		{
+			similarityOne = new SimilarityOne(this);
+			anlzr = new StandardAnalyzer();
+		}
+		
+        [TearDown]
+		protected internal virtual void  TearDown()
+		{
+		}
+		
+		/// <summary> Test that norms values are preserved as the index is maintained.
+		/// Including separate norms.
+		/// Including merging indexes with seprate norms. 
+		/// Including optimize. 
+		/// </summary>
+		[Test]
+        public virtual void  _TestNorms()
+		{
+			// tmp dir
+			System.String tempDir = System.IO.Path.GetTempPath();
+			if (tempDir == null)
+			{
+				throw new System.IO.IOException("java.io.tmpdir undefined, cannot run test");
+			}
+			
+			// test with a single index: index1
+			System.IO.FileInfo indexDir1 = new System.IO.FileInfo(System.IO.Path.Combine(tempDir, "lucenetestindex1"));
+			Directory dir1 = FSDirectory.GetDirectory(indexDir1);
+			
+			norms = new System.Collections.ArrayList();
+			modifiedNorms = new System.Collections.ArrayList();
+			
+			CreateIndex(dir1);
+			DoTestNorms(dir1);
+			
+			// test with a single index: index2
+			System.Collections.ArrayList norms1 = norms;
+			System.Collections.ArrayList modifiedNorms1 = modifiedNorms;
+			int numDocNorms1 = numDocNorms;
+			
+			norms = new System.Collections.ArrayList();
+			modifiedNorms = new System.Collections.ArrayList();
+			numDocNorms = 0;
+			
+			System.IO.FileInfo indexDir2 = new System.IO.FileInfo(System.IO.Path.Combine(tempDir, "lucenetestindex2"));
+			Directory dir2 = FSDirectory.GetDirectory(indexDir2);
+			
+			CreateIndex(dir2);
+			DoTestNorms(dir2);
+			
+			// add index1 and index2 to a third index: index3
+			System.IO.FileInfo indexDir3 = new System.IO.FileInfo(System.IO.Path.Combine(tempDir, "lucenetestindex3"));
+			Directory dir3 = FSDirectory.GetDirectory(indexDir3);
+			
+			CreateIndex(dir3);
+			IndexWriter iw = new IndexWriter(dir3, anlzr, false);
+			iw.SetMaxBufferedDocs(5);
+			iw.SetMergeFactor(3);
+			iw.AddIndexes(new Directory[]{dir1, dir2});
+			iw.Close();
+			
+			norms1.AddRange(norms);
+			norms = norms1;
+			modifiedNorms1.AddRange(modifiedNorms);
+			modifiedNorms = modifiedNorms1;
+			numDocNorms += numDocNorms1;
+			
+			// test with index3
+			VerifyIndex(dir3);
+			DoTestNorms(dir3);
+			
+			// now with optimize
+			iw = new IndexWriter(dir3, anlzr, false);
+			iw.SetMaxBufferedDocs(5);
+			iw.SetMergeFactor(3);
+			iw.Optimize();
+			iw.Close();
+			VerifyIndex(dir3);
+			
+			dir1.Close();
+			dir2.Close();
+			dir3.Close();
+		}
+		
+		private void  DoTestNorms(Directory dir)
+		{
+			for (int i = 0; i < 5; i++)
+			{
+				AddDocs(dir, 12, true);
+				VerifyIndex(dir);
+				ModifyNormsForF1(dir);
+				VerifyIndex(dir);
+				AddDocs(dir, 12, false);
+				VerifyIndex(dir);
+				ModifyNormsForF1(dir);
+				VerifyIndex(dir);
+			}
+		}
+		
+		private void  CreateIndex(Directory dir)
+		{
+			IndexWriter iw = new IndexWriter(dir, anlzr, true);
+			iw.SetMaxBufferedDocs(5);
+			iw.SetMergeFactor(3);
+			iw.SetSimilarity(similarityOne);
+			iw.SetUseCompoundFile(true);
+			iw.Close();
+		}
+		
+		private void  ModifyNormsForF1(Directory dir)
+		{
+			IndexReader ir = IndexReader.Open(dir);
+			int n = ir.MaxDoc();
+			for (int i = 0; i < n; i += 3)
+			{
+				// modify for every third doc
+				int k = (i * 3) % modifiedNorms.Count;
+				float origNorm = (float) ((System.Single) modifiedNorms[i]);
+				float newNorm = (float) ((System.Single) modifiedNorms[k]);
+				//System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
+				//System.out.println("      and: for "+k+" from "+newNorm+" to "+origNorm);
+				modifiedNorms[i] = (float) newNorm;
+				modifiedNorms[k] = (float) origNorm;
+				ir.SetNorm(i, "f" + 1, newNorm);
+				ir.SetNorm(k, "f" + 1, origNorm);
+			}
+			ir.Close();
+		}
+		
+		
+		private void  VerifyIndex(Directory dir)
+		{
+			IndexReader ir = IndexReader.Open(dir);
+			for (int i = 0; i < NUM_FIELDS; i++)
+			{
+				System.String field = "f" + i;
+				byte[] b = ir.Norms(field);
+				Assert.AreEqual(numDocNorms, b.Length, "number of norms mismatches");
+				System.Collections.ArrayList storedNorms = (i == 1?modifiedNorms:norms);
+				for (int j = 0; j < b.Length; j++)
+				{
+					float norm = Similarity.DecodeNorm(b[j]);
+					float norm1 = (float) ((System.Single) storedNorms[j]);
+					Assert.AreEqual(norm, norm1, 0.000001, "stored norm value of " + field + " for doc " + j + " is " + norm + " - a mismatch!");
+				}
+			}
+		}
+		
+		private void  AddDocs(Directory dir, int ndocs, bool compound)
+		{
+			IndexWriter iw = new IndexWriter(dir, anlzr, false);
+			iw.SetMaxBufferedDocs(5);
+			iw.SetMergeFactor(3);
+			iw.SetSimilarity(similarityOne);
+			iw.SetUseCompoundFile(compound);
+			for (int i = 0; i < ndocs; i++)
+			{
+				iw.AddDocument(NewDoc());
+			}
+			iw.Close();
+		}
+		
+		// create the next document
+		private Lucene.Net.Documents.Document NewDoc()
+		{
+			Lucene.Net.Documents.Document d = new Lucene.Net.Documents.Document();
+			float boost = NextNorm();
+			for (int i = 0; i < 10; i++)
+			{
+				Field f = new Field("f" + i, "v" + i, Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.UN_TOKENIZED);
+				f.SetBoost(boost);
+				d.Add(f);
+			}
+			return d;
+		}
+		
+		// return unique norm values that are unchanged by encoding/decoding
+		private float NextNorm()
+		{
+			float norm = lastNorm + normDelta;
+			do 
+			{
+				float norm1 = Similarity.DecodeNorm(Similarity.EncodeNorm(norm));
+				if (norm1 > lastNorm)
+				{
+					//System.out.println(norm1+" > "+lastNorm);
+					norm = norm1;
+					break;
+				}
+				norm += normDelta;
+			}
+			while (true);
+			norms.Insert(numDocNorms, (float) norm);
+			modifiedNorms.Insert(numDocNorms, (float) norm);
+			//System.out.println("creating norm("+numDocNorms+"): "+norm);
+			numDocNorms++;
+			lastNorm = (norm > 10 ? 0 : norm); //there's a limit to how many distinct values can be stored in a ingle byte
+			return norm;
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestParallelReader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelReader.cs Sat Aug 11 09:56:37 2007
@@ -16,12 +16,19 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;
-using Lucene.Net.Search;
-using Searchable = Lucene.Net.Search.Searchable;
+using MapFieldSelector = Lucene.Net.Documents.MapFieldSelector;
+using BooleanQuery = Lucene.Net.Search.BooleanQuery;
+using Hits = Lucene.Net.Search.Hits;
+using IndexSearcher = Lucene.Net.Search.IndexSearcher;
+using Query = Lucene.Net.Search.Query;
+using Searcher = Lucene.Net.Search.Searcher;
+using TermQuery = Lucene.Net.Search.TermQuery;
 using Occur = Lucene.Net.Search.BooleanClause.Occur;
 using Directory = Lucene.Net.Store.Directory;
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
@@ -75,20 +82,31 @@
 			Assert.IsTrue(CollectionContains(fieldNames, "f3"));
 			Assert.IsTrue(CollectionContains(fieldNames, "f4"));
 		}
-
-        public static bool CollectionContains(System.Collections.ICollection col, System.String val)
+		
+        [Test]
+        public virtual void  TestDocument()
         {
-            for (System.Collections.IEnumerator iterator = col.GetEnumerator(); iterator.MoveNext(); )
-            {
-                System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) iterator.Current;
-                System.String s = fi.Key.ToString();
-                if (s == val)
-                    return true;
-            }
-            return false;
+            Directory dir1 = GetDir1();
+            Directory dir2 = GetDir2();
+            ParallelReader pr = new ParallelReader();
+            pr.Add(IndexReader.Open(dir1));
+            pr.Add(IndexReader.Open(dir2));
+			
+            Lucene.Net.Documents.Document doc11 = pr.Document(0, new MapFieldSelector(new System.String[]{"f1"}));
+            Lucene.Net.Documents.Document doc24 = pr.Document(1, new MapFieldSelector(new System.Collections.ArrayList(new System.String[]{"f4"})));
+            Lucene.Net.Documents.Document doc223 = pr.Document(1, new MapFieldSelector(new System.String[]{"f2", "f3"}));
+			
+            Assert.AreEqual(1, doc11.GetFields().Count);
+            Assert.AreEqual(1, doc24.GetFields().Count);
+            Assert.AreEqual(2, doc223.GetFields().Count);
+			
+            Assert.AreEqual("v1", doc11.Get("f1"));
+            Assert.AreEqual("v2", doc24.Get("f4"));
+            Assert.AreEqual("v2", doc223.Get("f2"));
+            Assert.AreEqual("v2", doc223.Get("f3"));
         }
 		
-		[Test]
+        [Test]
         public virtual void  TestIncompatibleIndexes()
 		{
 			// two documents:
@@ -196,5 +214,17 @@
 			w2.Close();
 			return dir2;
 		}
-	}
+
+        public static bool CollectionContains(System.Collections.ICollection col, System.String val)
+        {
+            for (System.Collections.IEnumerator iterator = col.GetEnumerator(); iterator.MoveNext(); )
+            {
+                System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) iterator.Current;
+                System.String s = fi.Key.ToString();
+                if (s == val)
+                    return true;
+            }
+            return false;
+        }
+    }
 }

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelTermEnum.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestParallelTermEnum.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelTermEnum.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestParallelTermEnum.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using SimpleAnalyzer = Lucene.Net.Analysis.SimpleAnalyzer;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+using Index = Lucene.Net.Documents.Field.Index;
+using Store = Lucene.Net.Documents.Field.Store;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+
+namespace Lucene.Net.Index
+{
+	
+    [TestFixture]
+    public class TestParallelTermEnum
+	{
+		private IndexReader ir1;
+		private IndexReader ir2;
+		
+        [SetUp]
+		protected internal virtual void  SetUp()
+		{
+			Lucene.Net.Documents.Document doc;
+			
+			RAMDirectory rd1 = new RAMDirectory();
+			IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(), true);
+			
+			doc = new Lucene.Net.Documents.Document();
+			doc.Add(new Field("field1", "the quick brown fox jumps", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			doc.Add(new Field("field2", "the quick brown fox jumps", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			doc.Add(new Field("field4", "", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			iw1.AddDocument(doc);
+			
+			iw1.Close();
+			RAMDirectory rd2 = new RAMDirectory();
+			IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(), true);
+			
+			doc = new Lucene.Net.Documents.Document();
+			doc.Add(new Field("field0", "", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			doc.Add(new Field("field1", "the fox jumps over the lazy dog", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			doc.Add(new Field("field3", "the fox jumps over the lazy dog", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
+			iw2.AddDocument(doc);
+			
+			iw2.Close();
+			
+			this.ir1 = IndexReader.Open(rd1);
+			this.ir2 = IndexReader.Open(rd2);
+		}
+		
+        [TearDown]
+		protected internal virtual void  TearDown()
+		{
+			ir1.Close();
+			ir2.Close();
+		}
+		
+        [Test]
+		public virtual void  Test1()
+		{
+			ParallelReader pr = new ParallelReader();
+			pr.Add(ir1);
+			pr.Add(ir2);
+			
+			TermDocs td = pr.TermDocs();
+			
+			TermEnum te = pr.Terms();
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field1:brown", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field1:fox", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field1:jumps", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field1:quick", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field1:the", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field2:brown", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field2:fox", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field2:jumps", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field2:quick", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field2:the", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:dog", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:fox", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:jumps", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:lazy", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:over", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsTrue(te.Next());
+			Assert.AreEqual("field3:the", te.Term().ToString());
+			td.Seek(te.Term());
+			Assert.IsTrue(td.Next());
+			Assert.AreEqual(0, td.Doc());
+			Assert.IsFalse(td.Next());
+			Assert.IsFalse(te.Next());
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentMerger.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestSegmentMerger.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentMerger.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentMerger.cs Sat Aug 11 09:56:37 2007
@@ -16,10 +16,12 @@
  */
 
 using System;
+
 using NUnit.Framework;
-using Document = Lucene.Net.Documents.Document;
+
 using Directory = Lucene.Net.Store.Directory;
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using Document = Lucene.Net.Documents.Document;
 
 namespace Lucene.Net.Index
 {
@@ -42,6 +44,10 @@
 		private SegmentReader reader2 = null;
 		
 		
+        // public TestSegmentMerger(System.String s)
+        // {
+        // }
+		
         // This is needed if for the test to pass and mimic what happens wiht JUnit
         // For some reason, JUnit is creating a new member variable for each sub-test
         // but NUnit is not -- who is wrong/right, I don't know.
@@ -95,7 +101,7 @@
 			merger.CloseReaders();
 			Assert.IsTrue(docsMerged == 2);
 			//Should be able to open a new SegmentReader against the new directory
-			SegmentReader mergedReader = SegmentReader.Get(new SegmentInfo(mergedSegment, docsMerged, mergedDir));
+			SegmentReader mergedReader = SegmentReader.Get(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true));
 			Assert.IsTrue(mergedReader != null);
 			Assert.IsTrue(mergedReader.NumDocs() == 2);
 			Lucene.Net.Documents.Document newDoc1 = mergedReader.Document(0);
@@ -113,7 +119,7 @@
 			System.Collections.ICollection stored = mergedReader.GetFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
 			Assert.IsTrue(stored != null);
 			//System.out.println("stored size: " + stored.size());
-			Assert.IsTrue(stored.Count == 2);
+			Assert.IsTrue(stored.Count == 4, "We do not have 4 fields that were indexed with term vector");
 			
 			TermFreqVector vector = mergedReader.GetTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
 			Assert.IsTrue(vector != null);

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestSegmentReader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentReader.cs Sat Aug 11 09:56:37 2007
@@ -16,9 +16,11 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using Document = Lucene.Net.Documents.Document;
-using Field = Lucene.Net.Documents.Field;
+using Fieldable = Lucene.Net.Documents.Fieldable;
 using DefaultSimilarity = Lucene.Net.Search.DefaultSimilarity;
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
 
@@ -32,6 +34,10 @@
 		private Lucene.Net.Documents.Document testDoc = new Lucene.Net.Documents.Document();
 		private SegmentReader reader = null;
 		
+        // public TestSegmentReader(System.String s)
+        // {
+        // }
+		
         // This is needed if for the test to pass and mimic what happens wiht JUnit
         // For some reason, JUnit is creating a new member variable for each sub-test
         // but NUnit is not -- who is wrong/right, I don't know.
@@ -78,7 +84,7 @@
 			//There are 2 unstored fields on the document that are not preserved across writing
 			Assert.IsTrue(DocHelper.NumFields(result) == DocHelper.NumFields(testDoc) - DocHelper.unstored.Count);
 			
-            foreach (Field field in result.Fields())
+            foreach (Lucene.Net.Documents.Field field in result.Fields())
 			{
 				Assert.IsTrue(field != null);
 				Assert.IsTrue(DocHelper.nameValues.Contains(field.Name()));
@@ -197,7 +203,7 @@
 			// test omit norms
 			for (int i = 0; i < DocHelper.fields.Length; i++)
 			{
-				Field f = DocHelper.fields[i];
+				Lucene.Net.Documents.Field f = DocHelper.fields[i];
 				if (f.IsIndexed())
 				{
 					Assert.AreEqual(reader.HasNorms(f.Name()), !f.GetOmitNorms());
@@ -239,7 +245,7 @@
 			
 			TermFreqVector[] results = reader.GetTermFreqVectors(0);
 			Assert.IsTrue(results != null);
-			Assert.IsTrue(results.Length == 2);
+			Assert.IsTrue(results.Length == 4, "We do not have 4 term freq vectors, we have: " + results.Length);
 		}
 	}
 }

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermDocs.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestSegmentTermDocs.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermDocs.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermDocs.cs Sat Aug 11 09:56:37 2007
@@ -16,12 +16,14 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using Directory = Lucene.Net.Store.Directory;
 using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;
-using Directory = Lucene.Net.Store.Directory;
-using RAMDirectory = Lucene.Net.Store.RAMDirectory;
 
 namespace Lucene.Net.Index
 {
@@ -32,6 +34,10 @@
 		private Lucene.Net.Documents.Document testDoc = new Lucene.Net.Documents.Document();
 		private Directory dir = new RAMDirectory();
 		
+        // public TestSegmentTermDocs(System.String s)
+        // {
+        // }
+		
         // This is needed if for the test to pass and mimic what happens wiht JUnit
         // For some reason, JUnit is creating a new member variable for each sub-test
         // but NUnit is not -- who is wrong/right, I don't know.
@@ -86,7 +92,7 @@
 		{
 			{
 				//After adding the document, we should be able to read it back in
-				SegmentReader reader = SegmentReader.Get(new SegmentInfo("test", 3, dir));
+				SegmentReader reader = SegmentReader.Get(new SegmentInfo("test", 1, dir));
 				Assert.IsTrue(reader != null);
 				SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
 				Assert.IsTrue(segTermDocs != null);
@@ -96,7 +102,7 @@
 			}
 			{
 				//After adding the document, we should be able to read it back in
-				SegmentReader reader = SegmentReader.Get(new SegmentInfo("test", 3, dir));
+				SegmentReader reader = SegmentReader.Get(new SegmentInfo("test", 1, dir));
 				Assert.IsTrue(reader != null);
 				SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
 				Assert.IsTrue(segTermDocs != null);

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermEnum.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestSegmentTermEnum.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermEnum.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestSegmentTermEnum.cs Sat Aug 11 09:56:37 2007
@@ -16,7 +16,9 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using WhitespaceAnalyzer = Lucene.Net.Analysis.WhitespaceAnalyzer;
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestStressIndexing.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestStressIndexing.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestStressIndexing.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestStressIndexing.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using Lucene.Net.Util;
+using Lucene.Net.Store;
+using Lucene.Net.Documents;
+using Lucene.Net.Analysis;
+using Lucene.Net.Index;
+using Lucene.Net.Search;
+using Searchable = Lucene.Net.Search.Searchable;
+using Lucene.Net.QueryParsers;
+
+namespace Lucene.Net.Index
+{
+	
+    [TestFixture]
+	public class TestStressIndexing
+	{
+		private static readonly Analyzer ANALYZER = new SimpleAnalyzer();
+		private static readonly System.Random RANDOM = new System.Random();
+		private static Searcher SEARCHER;
+		
+		private static int RUN_TIME_SEC = 15;
+		
+		private class IndexerThread : SupportClass.ThreadClass
+		{
+			internal IndexModifier modifier;
+			internal int nextID;
+			public int count;
+			internal bool failed;
+			
+			public IndexerThread(IndexModifier modifier)
+			{
+				this.modifier = modifier;
+			}
+			
+			override public void  Run()
+			{
+				long stopTime = (System.DateTime.Now.Ticks - 621355968000000000) / 10000 + 1000 * Lucene.Net.Index.TestStressIndexing.RUN_TIME_SEC;
+				try
+				{
+					while (true)
+					{
+						
+						if ((System.DateTime.Now.Ticks - 621355968000000000) / 10000 > stopTime)
+						{
+							break;
+						}
+						
+						// Add 10 docs:
+						for (int j = 0; j < 10; j++)
+						{
+							Lucene.Net.Documents.Document d = new Lucene.Net.Documents.Document();
+							int n = Lucene.Net.Index.TestStressIndexing.RANDOM.Next();
+							d.Add(new Field("id", System.Convert.ToString(nextID++), Field.Store.YES, Field.Index.UN_TOKENIZED));
+							d.Add(new Field("contents", English.IntToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
+							modifier.AddDocument(d);
+						}
+						
+						// Delete 5 docs:
+						int deleteID = nextID;
+						for (int j = 0; j < 5; j++)
+						{
+							modifier.DeleteDocuments(new Term("id", "" + deleteID));
+							deleteID -= 2;
+						}
+						
+						count++;
+					}
+					
+					modifier.Close();
+				}
+				catch (System.Exception e)
+				{
+					System.Console.Out.WriteLine(e.ToString());
+					System.Console.Error.WriteLine(e.StackTrace);
+					failed = true;
+				}
+			}
+		}
+		
+		private class SearcherThread : SupportClass.ThreadClass
+		{
+			private Directory directory;
+			public int count;
+			internal bool failed;
+			
+			public SearcherThread(Directory directory)
+			{
+				this.directory = directory;
+			}
+			
+			override public void  Run()
+			{
+				long stopTime = (System.DateTime.Now.Ticks - 621355968000000000) / 10000 + 1000 * Lucene.Net.Index.TestStressIndexing.RUN_TIME_SEC;
+				try
+				{
+					while (true)
+					{
+						for (int i = 0; i < 100; i++)
+						{
+							(new IndexSearcher(directory)).Close();
+						}
+						count += 100;
+						if ((System.DateTime.Now.Ticks - 621355968000000000) / 10000 > stopTime)
+						{
+							break;
+						}
+					}
+				}
+				catch (System.Exception e)
+				{
+					//UPGRADE_TODO: The equivalent in .NET for method 'java.lang.Throwable.toString' may return a different value. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1043'"
+					System.Console.Out.WriteLine(e.ToString());
+					System.Console.Error.WriteLine(e.StackTrace);
+					failed = true;
+				}
+			}
+		}
+		
+		/*
+		Run one indexer and 2 searchers against single index as
+		stress test.
+		*/
+		public virtual void  RunStressTest(Directory directory)
+		{
+			IndexModifier modifier = new IndexModifier(directory, ANALYZER, true);
+			
+			// One modifier that writes 10 docs then removes 5, over
+			// and over:
+			IndexerThread indexerThread = new IndexerThread(modifier);
+			indexerThread.Start();
+			
+			// Two searchers that constantly just re-instantiate the searcher:
+			SearcherThread searcherThread1 = new SearcherThread(directory);
+			searcherThread1.Start();
+			
+			SearcherThread searcherThread2 = new SearcherThread(directory);
+			searcherThread2.Start();
+			
+			indexerThread.Join();
+			searcherThread1.Join();
+			searcherThread2.Join();
+			Assert.IsTrue(!indexerThread.failed,"hit unexpected exception in indexer");
+			Assert.IsTrue(!searcherThread1.failed,"hit unexpected exception in search1");
+			Assert.IsTrue(!searcherThread2.failed, "hit unexpected exception in search2");
+			//System.out.println("    Writer: " + indexerThread.count + " iterations");
+			//System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
+			//System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
+		}
+		
+		/*
+		Run above stress test against RAMDirectory and then
+		FSDirectory.
+		*/
+        [Test]
+		public virtual void  TestStressIndexAndSearching()
+		{
+			
+			// First in a RAM directory:
+			Directory directory = new RAMDirectory();
+			RunStressTest(directory);
+			directory.Close();
+			
+			// Second in an FSDirectory:
+			//UPGRADE_TODO: Method 'java.lang.System.getProperty' was converted to 'System.IO.Path.GetTempPath' which has a different behavior. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1073_javalangSystemgetProperty_javalangString'"
+			System.String tempDir = System.IO.Path.GetTempPath();
+			System.IO.FileInfo dirPath = new System.IO.FileInfo(tempDir + "\\" + "lucene.test.stress");
+			directory = FSDirectory.GetDirectory(dirPath);
+			RunStressTest(directory);
+			directory.Close();
+			RmDir(dirPath);
+		}
+		
+		private void  RmDir(System.IO.FileInfo dir)
+		{
+			System.IO.FileInfo[] files = SupportClass.FileSupport.GetFiles(dir);
+			for (int i = 0; i < files.Length; i++)
+			{
+				bool tmpBool;
+				if (System.IO.File.Exists(files[i].FullName))
+				{
+					System.IO.File.Delete(files[i].FullName);
+					tmpBool = true;
+				}
+				else if (System.IO.Directory.Exists(files[i].FullName))
+				{
+					System.IO.Directory.Delete(files[i].FullName);
+					tmpBool = true;
+				}
+				else
+					tmpBool = false;
+				bool generatedAux = tmpBool;
+			}
+			bool tmpBool2;
+			if (System.IO.File.Exists(dir.FullName))
+			{
+				System.IO.File.Delete(dir.FullName);
+				tmpBool2 = true;
+			}
+			else if (System.IO.Directory.Exists(dir.FullName))
+			{
+				System.IO.Directory.Delete(dir.FullName);
+				tmpBool2 = true;
+			}
+			else
+				tmpBool2 = false;
+			bool generatedAux2 = tmpBool2;
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestTermVectorsReader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsReader.cs Sat Aug 11 09:56:37 2007
@@ -16,7 +16,9 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
 
 namespace Lucene.Net.Index

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestTermVectorsWriter.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsWriter.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestTermVectorsWriter.cs Sat Aug 11 09:56:37 2007
@@ -16,7 +16,9 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;

Added: incubator/lucene.net/trunk/C#/src/Test/Index/TestTermdocPerf.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestTermdocPerf.cs?view=auto&rev=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestTermdocPerf.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestTermdocPerf.cs Sat Aug 11 09:56:37 2007
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NUnit.Framework;
+
+using Directory = Lucene.Net.Store.Directory;
+using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using Analyzer = Lucene.Net.Analysis.Analyzer;
+using TokenStream = Lucene.Net.Analysis.TokenStream;
+using Token = Lucene.Net.Analysis.Token;
+using Document = Lucene.Net.Documents.Document;
+using Field = Lucene.Net.Documents.Field;
+
+namespace Lucene.Net.Index
+{
+	
+	/// <author>  yonik
+	/// </author>
+	/// <version>  $Id$
+	/// </version>
+	class RepeatingTokenStream : TokenStream
+	{
+		public int num;
+		internal Token t;
+		
+		public RepeatingTokenStream(System.String val)
+		{
+			t = new Token(val, 0, val.Length);
+		}
+		
+		public override Token Next()
+		{
+			return --num < 0?null:t;
+		}
+	}
+	
+	[TestFixture]
+	public class TestTermdocPerf
+	{
+		private class AnonymousClassAnalyzer:Analyzer
+		{
+			public AnonymousClassAnalyzer(System.Random random, float percentDocs, Lucene.Net.Index.RepeatingTokenStream ts, int maxTF, TestTermdocPerf enclosingInstance)
+			{
+				InitBlock(random, percentDocs, ts, maxTF, enclosingInstance);
+			}
+			private void  InitBlock(System.Random random, float percentDocs, Lucene.Net.Index.RepeatingTokenStream ts, int maxTF, TestTermdocPerf enclosingInstance)
+			{
+				this.random = random;
+				this.percentDocs = percentDocs;
+				this.ts = ts;
+				this.maxTF = maxTF;
+				this.enclosingInstance = enclosingInstance;
+			}
+			private System.Random random;
+			private float percentDocs;
+			private Lucene.Net.Index.RepeatingTokenStream ts;
+			private int maxTF;
+			private TestTermdocPerf enclosingInstance;
+			public TestTermdocPerf Enclosing_Instance
+			{
+				get
+				{
+					return enclosingInstance;
+				}
+				
+			}
+			public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+			{
+				if ((float) random.NextDouble() < percentDocs)
+					ts.num = random.Next(maxTF) + 1;
+				else
+					ts.num = 0;
+				return ts;
+			}
+		}
+		
+		internal virtual void  AddDocs(Directory dir, int ndocs, System.String field, System.String val, int maxTF, float percentDocs)
+		{
+			System.Random random = new System.Random((System.Int32) 0);
+			RepeatingTokenStream ts = new RepeatingTokenStream(val);
+			
+			Analyzer analyzer = new AnonymousClassAnalyzer(random, percentDocs, ts, maxTF, this);
+			
+			Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
+			doc.Add(new Field(field, val, Field.Store.NO, Field.Index.NO_NORMS));
+			IndexWriter writer = new IndexWriter(dir, analyzer, true);
+			writer.SetMaxBufferedDocs(100);
+			writer.SetMergeFactor(100);
+			
+			for (int i = 0; i < ndocs; i++)
+			{
+				writer.AddDocument(doc);
+			}
+			
+			writer.Optimize();
+			writer.Close();
+		}
+		
+		
+		public virtual int DoTest(int iter, int ndocs, int maxTF, float percentDocs)
+		{
+			Directory dir = new RAMDirectory();
+			
+			long start = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
+			AddDocs(dir, ndocs, "foo", "val", maxTF, percentDocs);
+			long end = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
+			System.Console.Out.WriteLine("milliseconds for creation of " + ndocs + " docs = " + (end - start));
+			
+			IndexReader reader = IndexReader.Open(dir);
+			TermEnum tenum = reader.Terms(new Term("foo", "val"));
+			TermDocs tdocs = reader.TermDocs();
+			
+			start = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
+			
+			int ret = 0;
+			for (int i = 0; i < iter; i++)
+			{
+				tdocs.Seek(tenum);
+				while (tdocs.Next())
+				{
+					ret += tdocs.Doc();
+				}
+			}
+			
+			end = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
+			System.Console.Out.WriteLine("milliseconds for " + iter + " TermDocs iteration: " + (end - start));
+			
+			return ret;
+		}
+		
+        [Test]
+		public virtual void  TestTermDocPerf()
+		{
+			// performance test for 10% of documents containing a term
+			// DoTest(100000, 10000,3,.1f);
+		}
+	}
+}
\ No newline at end of file

Modified: incubator/lucene.net/trunk/C#/src/Test/Index/TestWordlistLoader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/TestWordlistLoader.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/Index/TestWordlistLoader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/Index/TestWordlistLoader.cs Sat Aug 11 09:56:37 2007
@@ -16,7 +16,9 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using WordlistLoader = Lucene.Net.Analysis.WordlistLoader;
 
 namespace Lucene.Net.Index

Added: incubator/lucene.net/trunk/C#/src/Test/Index/index.prelockless.cfs.zip
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/index.prelockless.cfs.zip?view=auto&rev=564939
==============================================================================
Binary file - no diff available.

Propchange: incubator/lucene.net/trunk/C#/src/Test/Index/index.prelockless.cfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: incubator/lucene.net/trunk/C#/src/Test/Index/index.prelockless.nocfs.zip
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/Index/index.prelockless.nocfs.zip?view=auto&rev=564939
==============================================================================
Binary file - no diff available.

Propchange: incubator/lucene.net/trunk/C#/src/Test/Index/index.prelockless.nocfs.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: incubator/lucene.net/trunk/C#/src/Test/IndexTest.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/IndexTest.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/IndexTest.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/IndexTest.cs Sat Aug 11 09:56:37 2007
@@ -16,6 +16,7 @@
  */
 
 using System;
+
 using SimpleAnalyzer = Lucene.Net.Analysis.SimpleAnalyzer;
 using FileDocument = Lucene.Net.Demo.FileDocument;
 using IndexWriter = Lucene.Net.Index.IndexWriter;

Modified: incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiAnalyzer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/QueryParser/TestMultiAnalyzer.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiAnalyzer.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiAnalyzer.cs Sat Aug 11 09:56:37 2007
@@ -16,15 +16,16 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
+using Query = Lucene.Net.Search.Query;
 using Analyzer = Lucene.Net.Analysis.Analyzer;
 using LowerCaseFilter = Lucene.Net.Analysis.LowerCaseFilter;
 using Token = Lucene.Net.Analysis.Token;
 using TokenFilter = Lucene.Net.Analysis.TokenFilter;
 using TokenStream = Lucene.Net.Analysis.TokenStream;
 using StandardTokenizer = Lucene.Net.Analysis.Standard.StandardTokenizer;
-using ParseException = Lucene.Net.QueryParsers.ParseException;
-using QueryParser = Lucene.Net.QueryParsers.QueryParser;
 
 namespace Lucene.Net.QueryParser
 {

Modified: incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiFieldQueryParser.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Test/QueryParser/TestMultiFieldQueryParser.cs?view=diff&rev=564939&r1=564938&r2=564939
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiFieldQueryParser.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Test/QueryParser/TestMultiFieldQueryParser.cs Sat Aug 11 09:56:37 2007
@@ -16,7 +16,9 @@
  */
 
 using System;
+
 using NUnit.Framework;
+
 using Analyzer = Lucene.Net.Analysis.Analyzer;
 using Token = Lucene.Net.Analysis.Token;
 using TokenStream = Lucene.Net.Analysis.TokenStream;
@@ -24,15 +26,13 @@
 using Document = Lucene.Net.Documents.Document;
 using Field = Lucene.Net.Documents.Field;
 using IndexWriter = Lucene.Net.Index.IndexWriter;
-using MultiFieldQueryParser = Lucene.Net.QueryParsers.MultiFieldQueryParser;
-using ParseException = Lucene.Net.QueryParsers.ParseException;
-using QueryParser = Lucene.Net.QueryParsers.QueryParser;
 using BooleanClause = Lucene.Net.Search.BooleanClause;
 using Hits = Lucene.Net.Search.Hits;
 using IndexSearcher = Lucene.Net.Search.IndexSearcher;
 using Query = Lucene.Net.Search.Query;
 using Directory = Lucene.Net.Store.Directory;
 using RAMDirectory = Lucene.Net.Store.RAMDirectory;
+using MultiFieldQueryParser = Lucene.Net.QueryParsers.MultiFieldQueryParser;
 
 namespace Lucene.Net.QueryParser
 {
@@ -100,7 +100,37 @@
 			Assert.AreEqual("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")", q.ToString());
 		}
 		
-		[Test]
+        [Test]
+        public virtual void  TestBoostsSimple()
+        {
+            System.Collections.IDictionary boosts = new System.Collections.Hashtable();
+            boosts["b"] = (float) 5;
+            boosts["t"] = (float) 10;
+            System.String[] fields = new System.String[]{"b", "t"};
+            MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(), boosts);
+			
+			
+            //Check for simple
+            Query q = mfqp.Parse("one");
+            Assert.AreEqual("b:one^5.0 t:one^10.0", q.ToString());
+			
+            //Check for AND
+            q = mfqp.Parse("one AND two");
+            Assert.AreEqual("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q.ToString());
+			
+            //Check for OR
+            q = mfqp.Parse("one OR two");
+            Assert.AreEqual("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.ToString());
+			
+            //Check for AND and a field
+            q = mfqp.Parse("one AND two AND foo:test");
+            Assert.AreEqual("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q.ToString());
+			
+            q = mfqp.Parse("one^3 AND two^4");
+            Assert.AreEqual("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)", q.ToString());
+        }
+		
+        [Test]
         public virtual void  TestStaticMethod1()
 		{
 			System.String[] fields = new System.String[]{"b", "t"};
@@ -260,7 +290,7 @@
 		}
 		
 		/// <summary> Return empty tokens for field "f1".</summary>
-		private class AnalyzerReturningNull:Analyzer
+		private class AnalyzerReturningNull : Analyzer
 		{
 			internal StandardAnalyzer stdAnalyzer = new StandardAnalyzer();
 			
@@ -280,7 +310,7 @@
 				}
 			}
 			
-			private class EmptyTokenStream:TokenStream
+			private class EmptyTokenStream : TokenStream
 			{
 				public override Token Next()
 				{