You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by do...@apache.org on 2009/07/29 20:04:24 UTC

svn commit: r798995 [11/35] - in /incubator/lucene.net/trunk/C#/src: Lucene.Net/ Lucene.Net/Analysis/ Lucene.Net/Analysis/Standard/ Lucene.Net/Document/ Lucene.Net/Index/ Lucene.Net/QueryParser/ Lucene.Net/Search/ Lucene.Net/Search/Function/ Lucene.Net...

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IntBlockPool.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/IntBlockPool.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IntBlockPool.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/IntBlockPool.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal sealed class IntBlockPool
+    {
+
+        public int[][] buffers = new int[10][];
+
+        internal int bufferUpto = -1;                        // Which buffer we are upto
+        public int intUpto = DocumentsWriter.INT_BLOCK_SIZE;             // Where we are in head buffer
+
+        public int[] buffer;                              // Current head buffer
+        public int intOffset = -DocumentsWriter.INT_BLOCK_SIZE;          // Current head offset
+
+        private readonly DocumentsWriter docWriter;
+        internal readonly bool trackAllocations;
+
+        public IntBlockPool(DocumentsWriter docWriter, bool trackAllocations)
+        {
+            this.docWriter = docWriter;
+            this.trackAllocations = trackAllocations;
+        }
+
+        public void reset()
+        {
+            if (bufferUpto != -1)
+            {
+                if (bufferUpto > 0)
+                    // Recycle all but the first buffer
+                    docWriter.RecycleIntBlocks(buffers, 1, 1 + bufferUpto);
+
+                // Reuse first buffer
+                bufferUpto = 0;
+                intUpto = 0;
+                intOffset = 0;
+                buffer = buffers[0];
+            }
+        }
+
+        public void nextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                int[][] newBuffers = new int[(int)(buffers.Length * 1.5)][];
+                System.Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = docWriter.GetIntBlock(trackAllocations);
+            bufferUpto++;
+
+            intUpto = 0;
+            intOffset += DocumentsWriter.INT_BLOCK_SIZE;
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocConsumer.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumer.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumer.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System.Collections.Generic;
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocConsumer
+    {
+        /// <summary>  Add a new thread </summary>
+        internal abstract InvertedDocConsumerPerThread addThread(DocInverterPerThread docInverterPerThread);
+
+        /// <summary>  Abort (called after hitting AbortException) </summary>
+        internal abstract void abort();
+
+        /// <summary>  Flush a new segment </summary>
+        internal abstract void flush(IDictionary<object,ICollection<object>> threadsAndFields, DocumentsWriter.FlushState state);
+
+        /// <summary>  Close doc stores </summary>
+        internal abstract void closeDocStore(DocumentsWriter.FlushState state);
+
+        /// <summary>  Attempt to free RAM, returning true if any RAM was freed </summary>
+        internal abstract bool freeRAM();
+
+        internal FieldInfos fieldInfos;
+
+        internal virtual void setFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocConsumerPerField.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerField.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerField.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using Fieldable = Lucene.Net.Documents.Fieldable;
+using Token = Lucene.Net.Analysis.Token;
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocConsumerPerField
+    {
+        // Called once per field, and is given all Fieldable
+        // occurrences for this field in the document.  Return
+        // true if you wish to see inverted tokens for these
+        // fields:
+        internal abstract bool start(Fieldable[] fields, int count);
+
+        // Called once per inverted token
+        internal abstract void add(Token token);
+
+        // Called once per field per document, after all Fieldable
+        // occurrences are inverted
+        internal abstract void finish();
+
+        // Called on hitting an aborting exception
+        internal abstract void abort();
+    }
+}
\ No newline at end of file

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerThread.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocConsumerPerThread.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerThread.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocConsumerPerThread.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocConsumerPerThread
+    {
+        internal abstract void startDocument();
+        internal abstract InvertedDocConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+        internal abstract DocumentsWriter.DocWriter finishDocument();
+        internal abstract void abort();
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocEndConsumer.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumer.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumer.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System.Collections.Generic;
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocEndConsumer
+    {
+        internal abstract InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread);
+        internal abstract void flush(IDictionary<object,ICollection<object>> threadsAndFields, DocumentsWriter.FlushState state);
+        internal abstract void closeDocStore(DocumentsWriter.FlushState state);
+        internal abstract void abort();
+        internal abstract void setFieldInfos(FieldInfos fieldInfos);
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocEndConsumerPerField
+    {
+        internal abstract void finish();
+        internal abstract void abort();
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal abstract class InvertedDocEndConsumerPerThread
+    {
+        internal abstract void startDocument();
+        internal abstract InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+        internal abstract void finishDocument();
+        internal abstract void abort();
+    }
+}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs Wed Jul 29 18:04:12 2009
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-using System;
+using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
@@ -30,21 +30,21 @@
 	{
 		
 		/// <summary> Deletes all commits except the most recent one.</summary>
-		public void  OnInit(System.Collections.IList commits)
+        public void OnInit(List<IndexCommitPoint> commits)
 		{
 			// Note that commits.size() should normally be 1:
 			OnCommit(commits);
 		}
 		
 		/// <summary> Deletes all commits except the most recent one.</summary>
-		public void  OnCommit(System.Collections.IList commits)
+        public void OnCommit(List<IndexCommitPoint> commits)
 		{
 			// Note that commits.size() should normally be 2 (if not
 			// called by onInit above):
 			int size = commits.Count;
 			for (int i = 0; i < size - 1; i++)
 			{
-				((IndexCommitPoint) commits[i]).Delete();
+				commits[i].Delete();
 			}
 		}
 	}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/LogMergePolicy.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/LogMergePolicy.cs Wed Jul 29 18:04:12 2009
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-using System;
+using System.Collections.Generic;
 
 using Directory = Lucene.Net.Store.Directory;
 
@@ -154,7 +154,7 @@
 		
 		abstract protected internal long Size(SegmentInfo info);
 		
-		private bool IsOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+		private bool IsOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Dictionary<SegmentInfo,SegmentInfo> segmentsToOptimize)
 		{
 			int numSegments = infos.Count;
 			int numToOptimize = 0;
@@ -162,7 +162,7 @@
 			for (int i = 0; i < numSegments && numToOptimize <= maxNumSegments; i++)
 			{
 				SegmentInfo info = infos.Info(i);
-				if (segmentsToOptimize.Contains(info))
+				if (segmentsToOptimize.ContainsKey(info))
 				{
 					numToOptimize++;
 					optimizeInfo = info;
@@ -190,7 +190,7 @@
 		/// (mergeFactor at a time) so the {@link MergeScheduler}
 		/// in use may make use of concurrency. 
 		/// </summary>
-		public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, IndexWriter writer, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+		public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Dictionary<SegmentInfo,SegmentInfo> segmentsToOptimize)
 		{
 			MergeSpecification spec;
 			
@@ -206,7 +206,7 @@
 				while (last > 0)
 				{
 					SegmentInfo info = infos.Info(--last);
-					if (segmentsToOptimize.Contains(info))
+					if (segmentsToOptimize.ContainsKey(info))
 					{
 						last++;
 						break;
@@ -280,7 +280,59 @@
 			
 			return spec;
 		}
-		
+
+        /// <summary>
+        /// Finds merges necessary to expunge all deletes from the
+        /// index.  We simply merge adjacent segments that have
+        /// deletes, up to mergeFactor at a time.
+        /// </summary>
+        public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer)
+        {
+            this.writer = writer;
+
+            int numSegments = segmentInfos.Count;
+
+            Message("findMergesToExpungeDeletes: " + numSegments + " segments");
+
+            MergeSpecification spec = new MergeSpecification();
+            int firstSegmentWithDeletions = -1;
+            for (int i = 0; i < numSegments; i++)
+            {
+                SegmentInfo info = segmentInfos.Info(i);
+                if (info.HasDeletions())
+                {
+                    Message("  segment " + info.name + " has deletions");
+                    if (firstSegmentWithDeletions == -1)
+                        firstSegmentWithDeletions = i;
+                    else if (i - firstSegmentWithDeletions == mergeFactor)
+                    {
+                        // We've seen mergeFactor segments in a row with
+                        // deletions, so force a merge now:
+                        Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                        spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile));
+                        firstSegmentWithDeletions = i;
+                    }
+                }
+                else if (firstSegmentWithDeletions != -1)
+                {
+                    // End of a sequence of segments with deletions, so,
+                    // merge those past segments even if it's fewer than
+                    // mergeFactor segments
+                    Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                    spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, i), useCompoundFile));
+                    firstSegmentWithDeletions = -1;
+                }
+            }
+
+            if (firstSegmentWithDeletions != -1)
+            {
+                Message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive");
+                spec.Add(new OneMerge(segmentInfos.Range(firstSegmentWithDeletions, numSegments), useCompoundFile));
+            }
+
+            return spec;
+        }
+
 		/// <summary>Checks if any merges are now necessary and returns a
 		/// {@link MergePolicy.MergeSpecification} if so.  A merge
 		/// is necessary when there are more than {@link

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeDocIDRemapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergeDocIDRemapper.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeDocIDRemapper.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergeDocIDRemapper.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    /// <summary>
+    /// Remaps docIDs after a merge has completed, where the
+    /// merged segments had at least one deletion.  This is used
+    /// to renumber the buffered deletes in IndexWriter when a
+    /// merge of segments with deletions commits.
+    /// </summary>
+    internal sealed class MergeDocIDRemapper
+    {
+        internal int[] starts;                                 // used for binary search of mapped docID
+        internal int[] newStarts;                              // starts, minus the deletes
+        internal int[][] docMaps;                              // maps docIDs in the merged set
+        internal int minDocID;                                 // minimum docID that needs renumbering
+        internal int maxDocID;                                 // 1+ the max docID that needs renumbering
+        internal int docShift;                                 // total # deleted docs that were compacted by this merge
+
+        public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
+        {
+            this.docMaps = docMaps;
+            SegmentInfo firstSegment = merge.segments.Info(0);
+            int i = 0;
+            while (true)
+            {
+                SegmentInfo info = infos.Info(i);
+                if (info.Equals(firstSegment))
+                    break;
+                minDocID += info.docCount;
+                i++;
+            }
+
+            int numDocs = 0;
+            for (int j = 0; j < docMaps.Length; i++, j++)
+            {
+                numDocs += infos.Info(i).docCount;
+                System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
+            }
+            maxDocID = minDocID + numDocs;
+
+            starts = new int[docMaps.Length];
+            newStarts = new int[docMaps.Length];
+
+            starts[0] = minDocID;
+            newStarts[0] = minDocID;
+            for (i = 1; i < docMaps.Length; i++)
+            {
+                int lastDocCount = merge.segments.Info(i - 1).docCount;
+                starts[i] = starts[i - 1] + lastDocCount;
+                newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
+            }
+            docShift = numDocs - mergedDocCount;
+
+            // There are rare cases when docShift is 0.  It happens
+            // if you try to delete a docID that's out of bounds,
+            // because the SegmentReader still allocates deletedDocs
+            // and pretends it has deletions ... so we can't make
+            // this assert here
+            // assert docShift > 0;
+
+            // Make sure it all adds up:
+            System.Diagnostics.Debug.Assert(docShift == maxDocID - (newStarts[docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts[docMaps.Length - 1]));
+        }
+
+        public int Remap(int oldDocID)
+        {
+            if (oldDocID < minDocID)
+                // Unaffected by merge
+                return oldDocID;
+            else if (oldDocID >= maxDocID)
+                // This doc was "after" the merge, so simple shift
+                return oldDocID - docShift;
+            else
+            {
+                // Binary search to locate this document & find its new docID
+                int lo = 0;                                      // search starts array
+                int hi = docMaps.Length - 1;                  // for first element less
+
+                while (hi >= lo)
+                {
+                    int mid = (lo + hi) >> 1;
+                    int midValue = starts[mid];
+                    if (oldDocID < midValue)
+                        hi = mid - 1;
+                    else if (oldDocID > midValue)
+                        lo = mid + 1;
+                    else
+                    {                                      // found a match
+                        while (mid + 1 < docMaps.Length && starts[mid + 1] == midValue)
+                        {
+                            mid++;                                  // scan to last match
+                        }
+                        if (docMaps[mid] != null)
+                            return newStarts[mid] + docMaps[mid][oldDocID - starts[mid]];
+                        else
+                            return newStarts[mid] + oldDocID - starts[mid];
+                    }
+                }
+                if (docMaps[hi] != null)
+                    return newStarts[hi] + docMaps[hi][oldDocID - starts[hi]];
+                else
+                    return newStarts[hi] + oldDocID - starts[hi];
+            }
+        }
+    }
+}
+
+

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MergePolicy.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MergePolicy.cs Wed Jul 29 18:04:12 2009
@@ -15,13 +15,12 @@
  * limitations under the License.
  */
 
-using System;
-
 using Directory = Lucene.Net.Store.Directory;
 
+using System.Collections.Generic;
+
 namespace Lucene.Net.Index
 {
-	
 	/// <summary> <p>Expert: a MergePolicy determines the sequence of
 	/// primitive merge operations to be used for overall merge
 	/// and optimize operations.</p>
@@ -145,14 +144,14 @@
 				}
 			}
 			
-			internal virtual System.String SegString(Directory dir)
+			internal virtual string SegString(Directory dir)
 			{
 				System.Text.StringBuilder b = new System.Text.StringBuilder();
 				int numSegments = segments.Count;
 				for (int i = 0; i < numSegments; i++)
 				{
 					if (i > 0)
-						b.Append(" ");
+						b.Append(' ');
 					b.Append(segments.Info(i).SegString(dir));
 				}
 				if (info != null)
@@ -173,20 +172,20 @@
 			
 			/// <summary> The subset of segments to be included in the primitive merge.</summary>
 			
-			public System.Collections.IList merges = new System.Collections.ArrayList();
+			public List<OneMerge> merges = new List<OneMerge>();
 			
-			public virtual void  Add(OneMerge merge)
+			public virtual void Add(OneMerge merge)
 			{
 				merges.Add(merge);
 			}
 			
-			public virtual System.String SegString(Directory dir)
+			public virtual string SegString(Directory dir)
 			{
 				System.Text.StringBuilder b = new System.Text.StringBuilder();
 				b.Append("MergeSpec:\n");
 				int count = merges.Count;
 				for (int i = 0; i < count; i++)
-					b.Append("  ").Append(1 + i).Append(": ").Append(((OneMerge) merges[i]).SegString(dir));
+					b.Append("  ").Append(1 + i).Append(": ").Append(merges[i].SegString(dir));
 				return b.ToString();
 			}
 		}
@@ -194,24 +193,47 @@
 		/// <summary>Exception thrown if there are any problems while
 		/// executing a merge. 
 		/// </summary>
-		[Serializable]
+		[System.Serializable]
 		public class MergeException:System.SystemException
 		{
-			public MergeException(System.String message) : base(message)
-			{
-			}
-			public MergeException(System.Exception exc) : base(null, exc)
+            private Directory dir;
+            [System.Obsolete("use MergePolicy.MergeException(string, Directory) instead")]
+            public MergeException(string message)
+                : base(message)
+            {
+            }
+            public MergeException(string message, Directory dir)
+                : base(message)
+            {
+                this.dir = dir;
+            }
+            [System.Obsolete("use MergePolicy.MergeException(System.Exception, Directory) instead")]
+            public MergeException(System.Exception exc)
+                : base(null, exc)
 			{
 			}
-		}
+            public MergeException(System.Exception exc, Directory dir)
+                : base(null, exc)
+            {
+                this.dir = dir;
+            }
+            /// <summary>
+            /// Returns the Directory of the index that hit the exception.
+            /// </summary>
+            /// <returns></returns>
+            public Directory GetDirectory()
+            {
+                return dir;
+            }
+        }
 		
-		[Serializable]
+		[System.Serializable]
 		public class MergeAbortedException:System.IO.IOException
 		{
 			public MergeAbortedException():base("merge is aborted")
 			{
 			}
-			public MergeAbortedException(System.String message):base(message)
+			public MergeAbortedException(string message):base(message)
 			{
 			}
 		}
@@ -247,8 +269,20 @@
 		/// SegmentInfo instances that must be merged away.  This
 		/// may be a subset of all SegmentInfos.
 		/// </param>
-		public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, IndexWriter writer, int maxSegmentCount, System.Collections.Hashtable segmentsToOptimize);
-		
+		public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, IndexWriter writer, int maxSegmentCount, Dictionary<SegmentInfo,SegmentInfo> segmentsToOptimize);
+
+        /// <summary>
+        /// Determine what set of merge operations is necessary in
+        /// order to expunge all deletes from the index.
+        /// </summary>
+        /// <param name="segmentInfos"></param>
+        /// <param name="writer"></param>
+        /// <returns></returns>
+        public virtual MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer)
+        {
+            throw new System.SystemException("not implemented");
+        }
+
 		/// <summary> Release all resources for the policy.</summary>
 		public abstract void  Close();
 		

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiLevelSkipListReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultiLevelSkipListReader.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiLevelSkipListReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiLevelSkipListReader.cs Wed Jul 29 18:04:12 2009
@@ -308,7 +308,7 @@
 				this.pos = (int) (pos - pointer);
 			}
 			
-			//override public System.Object Clone()  // {{Aroush-2.3.1}} Do we need this?
+			//override public object Clone()  // {{Aroush-2.3.1}} Do we need this?
 			//{
 			//	return null;
 			//}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultiReader.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiReader.cs Wed Jul 29 18:04:12 2009
@@ -29,8 +29,6 @@
 	/// <summary>An IndexReader which reads multiple indexes, appending their content.
 	/// 
 	/// </summary>
-	/// <version>  $Id: MultiReader.java 596004 2007-11-17 21:34:23Z buschmi $
-	/// </version>
 	public class MultiReader : IndexReader
 	{
 		protected internal IndexReader[] subReaders;
@@ -71,7 +69,7 @@
 		
 		private void  Initialize(IndexReader[] subReaders, bool closeSubReaders)
 		{
-			this.subReaders = subReaders;
+			this.subReaders = (IndexReader[]) subReaders.Clone();
 			starts = new int[subReaders.Length + 1]; // build starts array
 			decrefOnClose = new bool[subReaders.Length];
 			for (int i = 0; i < subReaders.Length; i++)
@@ -179,7 +177,7 @@
 									newSubReaders[i].Close();
 								}
 							}
-							catch (System.IO.IOException ignore)
+							catch (System.IO.IOException)
 							{
 								// keep going - we want to clean up as much as possible
 							}
@@ -348,7 +346,10 @@
 		
 		protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
 		{
-			normsCache.Remove(field); // clear cache
+            lock (normsCache)
+            {
+                normsCache.Remove(field); // clear cache
+            }
 			int i = ReaderIndex(n); // find segment num
 			subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
 		}
@@ -410,7 +411,7 @@
 			}
 		}
 		
-		public override System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames)
+		public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
 		{
 			EnsureOpen();
 			return MultiSegmentReader.GetFieldNames(fieldNames, this.subReaders);

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiSegmentReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultiSegmentReader.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiSegmentReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultiSegmentReader.cs Wed Jul 29 18:04:12 2009
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-using System;
+using System.Collections.Generic;
 
 using Document = Lucene.Net.Documents.Document;
 using FieldSelector = Lucene.Net.Documents.FieldSelector;
@@ -29,13 +29,13 @@
 	{
 		protected internal SegmentReader[] subReaders;
 		private int[] starts; // 1st docno for each segment
-		private System.Collections.Hashtable normsCache = System.Collections.Hashtable.Synchronized(new System.Collections.Hashtable());
+        private Dictionary<string, byte[]> normsCache = new Dictionary<string, byte[]>();
 		private int maxDoc = 0;
 		private int numDocs = - 1;
 		private bool hasDeletions = false;
 		
 		/// <summary>Construct reading the named set of readers. </summary>
-		internal MultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory):base(directory, sis, closeDirectory)
+		internal MultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory, bool readOnly):base(directory, sis, closeDirectory, readOnly)
 		{
 			// To reduce the chance of hitting FileNotFound
 			// (and having to retry), we open segments in
@@ -47,7 +47,7 @@
 			{
 				try
 				{
-					readers[i] = SegmentReader.Get(sis.Info(i));
+					readers[i] = SegmentReader.Get(readOnly, sis.Info(i));
 				}
 				catch (System.IO.IOException e)
 				{
@@ -58,7 +58,7 @@
 						{
 							readers[i].Close();
 						}
-						catch (System.IO.IOException ignore)
+						catch (System.IO.IOException)
 						{
 							// keep going - we want to clean up as much as possible
 						}
@@ -71,19 +71,20 @@
 		}
 		
 		/// <summary>This contructor is only used for {@link #Reopen()} </summary>
-		internal MultiSegmentReader(Directory directory, SegmentInfos infos, bool closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.IDictionary oldNormsCache):base(directory, infos, closeDirectory)
+		internal MultiSegmentReader(Directory directory, SegmentInfos infos, bool closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Dictionary<string, byte[]> oldNormsCache, bool readOnly)
+            : base(directory, infos, closeDirectory, readOnly)
 		{
 			
 			// we put the old SegmentReaders in a map, that allows us
 			// to lookup a reader using its segment name
-			System.Collections.IDictionary segmentReaders = new System.Collections.Hashtable();
+            Dictionary<string, int> segmentReaders = new Dictionary<string, int>();
 			
 			if (oldReaders != null)
 			{
 				// create a Map SegmentName->SegmentReader
 				for (int i = 0; i < oldReaders.Length; i++)
 				{
-					segmentReaders[oldReaders[i].GetSegmentName()] = (System.Int32) i;
+					segmentReaders[oldReaders[i].GetSegmentName()] = i;
 				}
 			}
 			
@@ -96,16 +97,17 @@
 			for (int i = infos.Count - 1; i >= 0; i--)
 			{
 				// find SegmentReader for this segment
-				Object oldReaderIndex = segmentReaders[infos.Info(i).name];
-				if (oldReaderIndex == null)
+                int oldReaderIndex;
+                if (!segmentReaders.ContainsKey(infos.Info(i).name))
 				{
 					// this is a new segment, no old SegmentReader can be reused
 					newReaders[i] = null;
 				}
 				else
 				{
-					// there is an old reader for this segment - we'll try to reopen it
-					newReaders[i] = oldReaders[(System.Int32) oldReaderIndex];
+                    oldReaderIndex = segmentReaders[infos.Info(i).name];
+                    // there is an old reader for this segment - we'll try to reopen it
+					newReaders[i] = oldReaders[oldReaderIndex];
 				}
 				
 				bool success = false;
@@ -115,7 +117,7 @@
 					if (newReaders[i] == null || infos.Info(i).GetUseCompoundFile() != newReaders[i].GetSegmentInfo().GetUseCompoundFile())
 					{
 						// this is a new reader; in case we hit an exception we can close it safely
-						newReader = SegmentReader.Get(infos.Info(i));
+						newReader = SegmentReader.Get(readOnly, infos.Info(i));
 					}
 					else
 					{
@@ -158,7 +160,7 @@
 										newReaders[i].DecRef();
 									}
 								}
-								catch (System.IO.IOException ignore)
+								catch (System.IO.IOException)
 								{
 									// keep going - we want to clean up as much as possible
 								}
@@ -174,39 +176,45 @@
 			// try to copy unchanged norms from the old normsCache to the new one
 			if (oldNormsCache != null)
 			{
-				System.Collections.IEnumerator it = oldNormsCache.Keys.GetEnumerator();
+				IEnumerator<KeyValuePair<string, byte[]>> it = oldNormsCache.GetEnumerator();
 				while (it.MoveNext())
 				{
-					System.String field = (System.String) it.Current;
+                    KeyValuePair<string, byte[]> entry = it.Current;
+					string field = entry.Key;
 					if (!HasNorms(field))
 					{
 						continue;
 					}
 					
-					byte[] oldBytes = (byte[]) oldNormsCache[field];
-					
+					byte[] oldBytes = entry.Value;
 					byte[] bytes = new byte[MaxDoc()];
 					
 					for (int i = 0; i < subReaders.Length; i++)
 					{
-						Object oldReaderIndex = segmentReaders[subReaders[i].GetSegmentName()];
-						
-						// this SegmentReader was not re-opened, we can copy all of its norms 
-						if (oldReaderIndex != null && (oldReaders[(System.Int32) oldReaderIndex] == subReaders[i] || oldReaders[(System.Int32) oldReaderIndex].norms[field] == subReaders[i].norms[field]))
-						{
-							// we don't have to synchronize here: either this constructor is called from a SegmentReader,
-							// in which case no old norms cache is present, or it is called from MultiReader.reopen(),
-							// which is synchronized
-							Array.Copy(oldBytes, oldStarts[(System.Int32) oldReaderIndex], bytes, starts[i], starts[i + 1] - starts[i]);
-						}
-						else
-						{
-							subReaders[i].Norms(field, bytes, starts[i]);
-						}
-					}
+                        if (segmentReaders.ContainsKey(subReaders[i].GetSegmentName()))
+                        {
+                            int oldReaderIndex = segmentReaders[subReaders[i].GetSegmentName()];
+                            // this SegmentReader was not re-opened, we can copy all of its norms 
+                            if (oldReaders[oldReaderIndex] == subReaders[i] || oldReaders[oldReaderIndex].norms[field] == subReaders[i].norms[field])
+                            {
+                                // we don't have to synchronize here: either this constructor is called from a SegmentReader,
+                                // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
+                                // which is synchronized
+                                System.Array.Copy(oldBytes, oldStarts[oldReaderIndex], bytes, starts[i], starts[i + 1] - starts[i]);
+                            }
+                            else
+                            {
+                                subReaders[i].Norms(field, bytes, starts[i]);
+                            }
+                        }
+                        else
+                        {
+                            subReaders[i].Norms(field, bytes, starts[i]);
+                        }
+                    }
 					
 					normsCache[field] = bytes; // update cache
-				}
+                }
 			}
 		}
 		
@@ -232,14 +240,17 @@
 				if (infos.Count == 1)
 				{
 					// The index has only one segment now, so we can't refresh the MultiSegmentReader.
-					// Return a new SegmentReader instead
-					SegmentReader newReader = SegmentReader.Get(infos, infos.Info(0), false);
-					return newReader;
-				}
-				else
-				{
-					return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+					// Return a new [ReadOnly]SegmentReader instead
+					return SegmentReader.Get(readOnly, infos, infos.Info(0), false);
 				}
+                else if (readOnly)
+                {
+                    return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+                }
+                else
+                {
+                    return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false);
+                }
 			}
 		}
 		
@@ -250,7 +261,7 @@
 			return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
 		}
 		
-		public override TermFreqVector GetTermFreqVector(int n, System.String field)
+		public override TermFreqVector GetTermFreqVector(int n, string field)
 		{
 			EnsureOpen();
 			int i = ReaderIndex(n); // find segment num
@@ -258,7 +269,7 @@
 		}
 		
 		
-		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+		public override void  GetTermFreqVector(int docNumber, string field, TermVectorMapper mapper)
 		{
 			EnsureOpen();
 			int i = ReaderIndex(docNumber); // find segment num
@@ -371,7 +382,7 @@
 			return hi;
 		}
 		
-		public override bool HasNorms(System.String field)
+		public override bool HasNorms(string field)
 		{
 			EnsureOpen();
 			for (int i = 0; i < subReaders.Length; i++)
@@ -390,36 +401,39 @@
 			return ones;
 		}
 		
-		public override byte[] Norms(System.String field)
+		public override byte[] Norms(string field)
 		{
 			lock (this)
 			{
 				EnsureOpen();
-				byte[] bytes = (byte[]) normsCache[field];
-				if (bytes != null)
-					return bytes; // cache hit
-				if (!HasNorms(field))
-					return fakeNorms();
-				
+                byte[] bytes = normsCache.ContainsKey(field) ? normsCache[field] : null;
+
+                if (bytes != null)
+                    return bytes; // cache hit
+
+                if (!HasNorms(field))
+                    return fakeNorms();
+
 				bytes = new byte[MaxDoc()];
 				for (int i = 0; i < subReaders.Length; i++)
 					subReaders[i].Norms(field, bytes, starts[i]);
-				normsCache[field] = bytes; // update cache
-				return bytes;
+                normsCache[field] = bytes; // update cache
+
+                return bytes;
 			}
 		}
 		
-		public override void  Norms(System.String field, byte[] result, int offset)
+		public override void  Norms(string field, byte[] result, int offset)
 		{
 			lock (this)
 			{
 				EnsureOpen();
-				byte[] bytes = (byte[]) normsCache[field];
+				byte[] bytes = normsCache.ContainsKey(field)? normsCache[field] : null;
 				if (bytes == null && !HasNorms(field))
 					bytes = fakeNorms();
 				if (bytes != null)
 				// cache hit
-					Array.Copy(bytes, 0, result, offset, MaxDoc());
+					System.Array.Copy(bytes, 0, result, offset, MaxDoc());
 				
 				for (int i = 0; i < subReaders.Length; i++)
 				// read from segments
@@ -427,9 +441,12 @@
 			}
 		}
 		
-		protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+		protected internal override void  DoSetNorm(int n, string field, byte value_Renamed)
 		{
-			normsCache.Remove(field); // clear cache
+            lock (normsCache)
+            {
+                normsCache.Remove(field); // clear cache
+            }
 			int i = ReaderIndex(n); // find segment num
 			subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
 		}
@@ -503,24 +520,23 @@
 			}
 		}
 		
-		public override System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames)
+		public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
 		{
 			EnsureOpen();
 			return GetFieldNames(fieldNames, this.subReaders);
 		}
 		
-		internal static System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders)
+		internal static System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders)
 		{
 			// maintain a unique set of field names
-			System.Collections.Hashtable fieldSet = new System.Collections.Hashtable();
+            System.Collections.Generic.Dictionary<string, string> fieldSet = new System.Collections.Generic.Dictionary<string, string>();
 			for (int i = 0; i < subReaders.Length; i++)
 			{
 				IndexReader reader = subReaders[i];
-                System.Collections.IEnumerator names = reader.GetFieldNames(fieldNames).GetEnumerator();
+                System.Collections.Generic.IEnumerator<string> names = reader.GetFieldNames(fieldNames).GetEnumerator();
                 while (names.MoveNext())
 				{
-					if (!fieldSet.ContainsKey(names.Current))
-						fieldSet.Add(names.Current, names.Current);
+                    fieldSet[names.Current] = names.Current;
 				}
 			}
 			return fieldSet.Keys;

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultipleTermPositions.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/MultipleTermPositions.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultipleTermPositions.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/MultipleTermPositions.cs Wed Jul 29 18:04:12 2009
@@ -23,10 +23,7 @@
 {
 	
 	/// <summary> Describe class <code>MultipleTermPositions</code> here.
-	/// 
 	/// </summary>
-	/// <author>  Anders Nielsen
-	/// </author>
 	/// <version>  1.0
 	/// </version>
 	public class MultipleTermPositions : TermPositions
@@ -52,7 +49,7 @@
 				return (TermPositions) Top();
 			}
 			
-			public override bool LessThan(System.Object a, System.Object b)
+			public override bool LessThan(object a, object b)
 			{
 				return ((TermPositions) a).Doc() < ((TermPositions) b).Doc();
 			}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/NormsWriter.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriter.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriter.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System.Collections.Generic;
+
+using IndexOutput = Lucene.Net.Store.IndexOutput;
+using Similarity = Lucene.Net.Search.Similarity;
+
+namespace Lucene.Net.Index
+{
+    // TODO FI: norms could actually be stored as doc store
+
+    /** Writes norms.  Each thread X field accumulates the norms
+     *  for the doc/fields it saw, then the flush method below
+     *  merges all of these together into a single _X.nrm file.
+     */
+
+    internal sealed class NormsWriter : InvertedDocEndConsumer
+    {
+
+        private static readonly byte defaultNorm = Similarity.EncodeNorm(1.0f);
+
+        private FieldInfos fieldInfos;
+        
+        internal override InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread)
+        {
+            return new NormsWriterPerThread(docInverterPerThread, this);
+        }
+
+        internal override void abort() { }
+
+        // We only write the _X.nrm file at flush
+        internal void files(ICollection<object> files) { }
+
+        internal override void setFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+
+        /** Produce _X.nrm if any document had a field with norms
+         *  not disabled */
+        internal override void flush(IDictionary<object, ICollection<object>> threadsAndFields, DocumentsWriter.FlushState state)
+        {
+
+            IDictionary<object, object> byField = new Dictionary<object, object>();
+
+            // Typically, each thread will have encountered the same
+            // field.  So first we collate by field, ie, all
+            // per-thread field instances that correspond to the
+            // same FieldInfo
+            IEnumerator<KeyValuePair<object, ICollection<object>>> it = threadsAndFields.GetEnumerator();
+            while (it.MoveNext())
+            {
+                KeyValuePair<object, ICollection<object>> entry = it.Current;
+
+                ICollection<object> fields = entry.Value;
+                IEnumerator<object> fieldsIt = fields.GetEnumerator();
+                List<object> fieldsToRemove = new List<object>(fields.Count);
+
+                while (fieldsIt.MoveNext())
+                {
+                    NormsWriterPerField perField = (NormsWriterPerField)fieldsIt.Current;
+
+                    if (perField.upto > 0)
+                    {
+                        // It has some norms
+                        IList<object> l;
+                        if (byField.ContainsKey(perField.fieldInfo))
+                        {
+                            l = (IList<object>)byField[perField.fieldInfo];
+                        }
+                        else
+                        {
+                            l = new List<object>();
+                            byField[perField.fieldInfo] = l;
+                        }
+                        //IList<object> l = (IList<object>)byField[perField.fieldInfo];
+                        //if (l == null)
+                        //{
+                        //    l = new List<object>();
+                        //    byField[perField.fieldInfo] = l;
+                        //}
+                        l.Add(perField);
+                    }
+                    else
+                    {
+                        // Remove this field since we haven't seen it
+                        // since the previous flush
+                        fieldsToRemove.Add(perField);
+                        //fields.Remove(perField);
+                    }
+                }
+                for (int i = 0; i < fieldsToRemove.Count; i++) fields.Remove(fieldsToRemove[i]);
+            }
+
+            string normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
+            state.flushedFiles[normsFileName] = normsFileName;
+            IndexOutput normsOut = state.directory.CreateOutput(normsFileName);
+
+            try
+            {
+                normsOut.WriteBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.Length);
+
+                int numField = fieldInfos.Size();
+
+                int normCount = 0;
+
+                for (int fieldNumber = 0; fieldNumber < numField; fieldNumber++)
+                {
+
+                    FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
+
+                    List<object> toMerge;
+                    int upto = 0;
+                    if (byField.ContainsKey(fieldInfo))
+                    {
+                        toMerge = (List<object>)byField[fieldInfo];
+
+                        int numFields = toMerge.Count;
+
+                        normCount++;
+
+                        NormsWriterPerField[] fields = new NormsWriterPerField[numFields];
+                        int[] uptos = new int[numFields];
+
+                        for (int j = 0; j < numFields; j++)
+                            fields[j] = (NormsWriterPerField)toMerge[j];
+
+                        int numLeft = numFields;
+
+                        while (numLeft > 0)
+                        {
+
+                            System.Diagnostics.Debug.Assert(uptos[0] < fields[0].docIDs.Length, " uptos[0]=" + uptos[0] + " len=" + (fields[0].docIDs.Length));
+
+                            int minLoc = 0;
+                            int minDocID = fields[0].docIDs[uptos[0]];
+
+                            for (int j = 1; j < numLeft; j++)
+                            {
+                                int docID = fields[j].docIDs[uptos[j]];
+                                if (docID < minDocID)
+                                {
+                                    minDocID = docID;
+                                    minLoc = j;
+                                }
+                            }
+
+                            System.Diagnostics.Debug.Assert(minDocID < state.numDocsInRAM);
+
+                            // Fill hole
+                            for (; upto < minDocID; upto++)
+                                normsOut.WriteByte(defaultNorm);
+
+                            normsOut.WriteByte(fields[minLoc].norms[uptos[minLoc]]);
+                            (uptos[minLoc])++;
+                            upto++;
+
+                            if (uptos[minLoc] == fields[minLoc].upto)
+                            {
+                                fields[minLoc].reset();
+                                if (minLoc != numLeft - 1)
+                                {
+                                    fields[minLoc] = fields[numLeft - 1];
+                                    uptos[minLoc] = uptos[numLeft - 1];
+                                }
+                                numLeft--;
+                            }
+                        }
+
+                        // Fill final hole with defaultNorm
+                        for (; upto < state.numDocsInRAM; upto++)
+                            normsOut.WriteByte(defaultNorm);
+                    }
+                    else if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+                    {
+                        normCount++;
+                        // Fill entire field with default norm:
+                        for (; upto < state.numDocsInRAM; upto++)
+                            normsOut.WriteByte(defaultNorm);
+                    }
+
+                    System.Diagnostics.Debug.Assert(4 + normCount * state.numDocsInRAM == normsOut.GetFilePointer(), ".nrm file size mismatch: expected=" + (4 + normCount * state.numDocsInRAM) + " actual=" + normsOut.GetFilePointer());
+                }
+
+            }
+            finally
+            {
+                normsOut.Close();
+            }
+        }
+
+        internal override void closeDocStore(DocumentsWriter.FlushState state) { }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/NormsWriterPerField.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerField.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerField.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using ArrayUtil = Lucene.Net.Util.ArrayUtil;
+using Similarity = Lucene.Net.Search.Similarity;
+
+namespace Lucene.Net.Index
+{
+    internal sealed class NormsWriterPerField : InvertedDocEndConsumerPerField, System.IComparable
+    {
+        internal readonly NormsWriterPerThread perThread;
+        internal readonly FieldInfo fieldInfo;
+        internal readonly DocumentsWriter.DocState docState;
+
+        // Holds all docID/norm pairs we've seen
+        internal int[] docIDs = new int[1];
+        internal byte[] norms = new byte[1];
+        internal int upto;
+
+        internal readonly DocInverter.FieldInvertState fieldState;
+
+        public void reset()
+        {
+            // Shrink back if we are overallocated now:
+            docIDs = ArrayUtil.Shrink(docIDs, upto);
+            norms = ArrayUtil.Shrink(norms, upto);
+            upto = 0;
+        }
+
+        public NormsWriterPerField(DocInverterPerField docInverterPerField, NormsWriterPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.perThread = perThread;
+            this.fieldInfo = fieldInfo;
+            docState = perThread.docState;
+            fieldState = docInverterPerField.fieldState;
+        }
+
+        internal override void abort()
+        {
+            upto = 0;
+        }
+
+        public int CompareTo(object other)
+        {
+            return string.CompareOrdinal(fieldInfo.name, ((NormsWriterPerField)other).fieldInfo.name);
+        }
+
+        internal override void finish()
+        {
+            System.Diagnostics.Debug.Assert(docIDs.Length == norms.Length);
+            if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+            {
+                if (docIDs.Length <= upto)
+                {
+                    System.Diagnostics.Debug.Assert(docIDs.Length == upto);
+                    docIDs = ArrayUtil.Grow(docIDs, 1 + upto);
+                    norms = ArrayUtil.Grow(norms, 1 + upto);
+                }
+                float norm = fieldState.boost * docState.similarity.LengthNorm(fieldInfo.name, fieldState.length);
+                norms[upto] = Similarity.EncodeNorm(norm);
+                docIDs[upto] = docState.docID;
+                upto++;
+            }
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerThread.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/NormsWriterPerThread.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerThread.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/NormsWriterPerThread.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal sealed class NormsWriterPerThread : InvertedDocEndConsumerPerThread
+    {
+        internal readonly NormsWriter normsWriter;
+        internal readonly DocumentsWriter.DocState docState;
+
+        public NormsWriterPerThread(DocInverterPerThread docInverterPerThread, NormsWriter normsWriter)
+        {
+            this.normsWriter = normsWriter;
+            docState = docInverterPerThread.docState;
+        }
+
+        internal override InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
+        {
+            return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
+        }
+
+        internal override void abort() { }
+
+        internal override void startDocument() { }
+        internal override void finishDocument() { }
+
+        internal bool freeRAM()
+        {
+            return false;
+        }
+    }
+}

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ParallelReader.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ParallelReader.cs Wed Jul 29 18:04:12 2009
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-using System;
+using System.Collections.Generic;
 
 using Document = Lucene.Net.Documents.Document;
 using FieldSelector = Lucene.Net.Documents.FieldSelector;
@@ -45,12 +45,12 @@
 	/// </summary>
 	public class ParallelReader : IndexReader
 	{
-		private System.Collections.ArrayList readers = new System.Collections.ArrayList();
-		private System.Collections.IList decrefOnClose = new System.Collections.ArrayList(); // remember which subreaders to decRef on close
+		private List<IndexReader> readers = new List<IndexReader>();
+		private List<bool> decrefOnClose = new List<bool>(); // remember which subreaders to decRef on close
 		internal bool incRefReaders = false;
-		private System.Collections.SortedList fieldToReader = new System.Collections.SortedList();
-		private System.Collections.IDictionary readerToFields = new System.Collections.Hashtable();
-		private System.Collections.IList storedFieldReaders = new System.Collections.ArrayList();
+        private SortedDictionary<string, IndexReader> fieldToReader = new SortedDictionary<string, IndexReader>();
+		private Dictionary<IndexReader, ICollection<string>> readerToFields = new Dictionary<IndexReader, ICollection<string>>();
+        private List<IndexReader> storedFieldReaders = new List<IndexReader>();
 		
 		private int maxDoc;
 		private int numDocs;
@@ -109,17 +109,15 @@
 			if (reader.NumDocs() != numDocs)
 				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
 			
-			System.Collections.ICollection fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+			ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
 			readerToFields[reader] = fields;
-			System.Collections.IEnumerator i = fields.GetEnumerator();
+			IEnumerator<string> i = fields.GetEnumerator();
 			while (i.MoveNext())
 			{
-                //System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) i.Current;
-
                 //// update fieldToReader map
-                //System.String field = fi.Key.ToString();
-                System.String field = (String) i.Current;
-				if (fieldToReader[field] == null)
+                string field = i.Current;
+                //if (fieldToReader[field] == null)
+                if (!fieldToReader.ContainsKey(field))
 					fieldToReader[field] = reader;
 			}
 			
@@ -158,7 +156,7 @@
 			
 			bool reopened = false;
 			System.Collections.IList newReaders = new System.Collections.ArrayList();
-			System.Collections.IList newDecrefOnClose = new System.Collections.ArrayList();
+            List<bool> newDecrefOnClose = new List<bool>();
 			
 			bool success = false;
 			
@@ -167,7 +165,7 @@
 				
 				for (int i = 0; i < readers.Count; i++)
 				{
-					IndexReader oldReader = (IndexReader) readers[i];
+					IndexReader oldReader = readers[i];
 					IndexReader newReader = oldReader.Reopen();
 					newReaders.Add(newReader);
 					// if at least one of the subreaders was updated we remember that
@@ -183,7 +181,7 @@
 					ParallelReader pr = new ParallelReader();
 					for (int i = 0; i < readers.Count; i++)
 					{
-						IndexReader oldReader = (IndexReader) readers[i];
+						IndexReader oldReader = readers[i];
 						IndexReader newReader = (IndexReader) newReaders[i];
 						if (newReader == oldReader)
 						{
@@ -220,7 +218,7 @@
 						if (r != null)
 						{
 							try
-							{
+							{   
 								if (((System.Boolean) newDecrefOnClose[i]))
 								{
 									r.DecRef();
@@ -230,7 +228,7 @@
 									r.Close();
 								}
 							}
-							catch (System.IO.IOException ignore)
+							catch (System.IO.IOException)
 							{
 								// keep going - we want to clean up as much as possible
 							}
@@ -264,7 +262,7 @@
 		{
 			// Don't call ensureOpen() here (it could affect performance)
 			if (readers.Count > 0)
-				return ((IndexReader) readers[0]).IsDeleted(n);
+				return readers[0].IsDeleted(n);
 			return false;
 		}
 		
@@ -273,7 +271,7 @@
 		{
 			for (int i = 0; i < readers.Count; i++)
 			{
-				((IndexReader) readers[i]).DeleteDocument(n);
+				readers[i].DeleteDocument(n);
 			}
 			hasDeletions = true;
 		}
@@ -283,7 +281,7 @@
 		{
 			for (int i = 0; i < readers.Count; i++)
 			{
-				((IndexReader) readers[i]).UndeleteAll();
+				readers[i].UndeleteAll();
 			}
 			hasDeletions = false;
 		}
@@ -295,15 +293,15 @@
 			Document result = new Document();
 			for (int i = 0; i < storedFieldReaders.Count; i++)
 			{
-				IndexReader reader = (IndexReader) storedFieldReaders[i];
+				IndexReader reader = storedFieldReaders[i];
 				
 				bool include = (fieldSelector == null);
 				if (!include)
 				{
-					System.Collections.IEnumerator it = ((System.Collections.ICollection) readerToFields[reader]).GetEnumerator();
+					IEnumerator<string> it = readerToFields[reader].GetEnumerator();
 					while (it.MoveNext())
 					{
-						if (fieldSelector.Accept((System.String)it.Current) != FieldSelectorResult.NO_LOAD)
+						if (fieldSelector.Accept(it.Current) != FieldSelectorResult.NO_LOAD)
 						{
 							include = true;
 							break;
@@ -326,24 +324,24 @@
 		public override TermFreqVector[] GetTermFreqVectors(int n)
 		{
 			EnsureOpen();
-			System.Collections.ArrayList results = new System.Collections.ArrayList();
-			System.Collections.IEnumerator i = new System.Collections.Hashtable(fieldToReader).GetEnumerator();
+            List<TermFreqVector> results = new List<TermFreqVector>();
+			IEnumerator<KeyValuePair<string, IndexReader>> i = fieldToReader.GetEnumerator();
 			while (i.MoveNext())
 			{
-				System.Collections.DictionaryEntry e = (System.Collections.DictionaryEntry) i.Current;
-				System.String field = (System.String) e.Key;
-				IndexReader reader = (IndexReader) e.Value;
+                KeyValuePair<string, IndexReader> e = i.Current;
+				string field = e.Key;
+				IndexReader reader = e.Value;
 				TermFreqVector vector = reader.GetTermFreqVector(n, field);
 				if (vector != null)
 					results.Add(vector);
 			}
-			return (TermFreqVector[]) (results.ToArray(typeof(TermFreqVector)));
+			return results.ToArray();
 		}
 		
 		public override TermFreqVector GetTermFreqVector(int n, System.String field)
 		{
 			EnsureOpen();
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			return reader == null ? null : reader.GetTermFreqVector(n, field);
 		}
 		
@@ -351,7 +349,7 @@
 		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
 		{
 			EnsureOpen();
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			if (reader != null)
 			{
 				reader.GetTermFreqVector(docNumber, field, mapper);
@@ -376,28 +374,28 @@
 		public override bool HasNorms(System.String field)
 		{
 			EnsureOpen();
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			return reader == null ? false : reader.HasNorms(field);
 		}
 		
 		public override byte[] Norms(System.String field)
 		{
 			EnsureOpen();
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			return reader == null ? null : reader.Norms(field);
 		}
 		
 		public override void  Norms(System.String field, byte[] result, int offset)
 		{
 			EnsureOpen();
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			if (reader != null)
 				reader.Norms(field, result, offset);
 		}
 		
 		protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
 		{
-			IndexReader reader = ((IndexReader) fieldToReader[field]);
+			IndexReader reader = fieldToReader[field];
 			if (reader != null)
 				reader.DoSetNorm(n, field, value_Renamed);
 		}
@@ -450,7 +448,7 @@
 		{
 			for (int i = 0; i < readers.Count; i++)
 			{
-				if (!((IndexReader) readers[i]).IsCurrent())
+				if (!readers[i].IsCurrent())
 				{
 					return false;
 				}
@@ -465,7 +463,7 @@
 		{
 			for (int i = 0; i < readers.Count; i++)
 			{
-				if (!((IndexReader) readers[i]).IsOptimized())
+				if (!readers[i].IsOptimized())
 				{
 					return false;
 				}
@@ -486,13 +484,13 @@
 		// for testing
 		public /*internal*/ virtual IndexReader[] GetSubReaders()
 		{
-			return (IndexReader[]) readers.ToArray(typeof(IndexReader));
+			return readers.ToArray();
 		}
 		
 		protected internal override void  DoCommit()
 		{
 			for (int i = 0; i < readers.Count; i++)
-				((IndexReader) readers[i]).Commit();
+				readers[i].Commit();
 		}
 		
 		protected internal override void  DoClose()
@@ -501,33 +499,30 @@
 			{
 				for (int i = 0; i < readers.Count; i++)
 				{
-					if (((System.Boolean) decrefOnClose[i]))
+					if (decrefOnClose[i])
 					{
-						((IndexReader) readers[i]).DecRef();
+						readers[i].DecRef();
 					}
 					else
 					{
-						((IndexReader) readers[i]).Close();
+						readers[i].Close();
 					}
 				}
 			}
 		}
 		
-		public override System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames)
+		public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
 		{
 			EnsureOpen();
-			System.Collections.Hashtable fieldSet = new System.Collections.Hashtable();
+            System.Collections.Generic.Dictionary<string, string> fieldSet = new System.Collections.Generic.Dictionary<string, string>();
 			for (int i = 0; i < readers.Count; i++)
 			{
-				IndexReader reader = ((IndexReader) readers[i]);
-				System.Collections.ICollection names = reader.GetFieldNames(fieldNames);
-				for (System.Collections.IEnumerator iterator = names.GetEnumerator(); iterator.MoveNext(); )
+				IndexReader reader = readers[i];
+				System.Collections.Generic.ICollection<string> names = reader.GetFieldNames(fieldNames);
+				for (System.Collections.Generic.IEnumerator<string> iterator = names.GetEnumerator(); iterator.MoveNext(); )
 				{
-                    System.String s = (System.String)iterator.Current;
-					if (fieldSet.ContainsKey(s) == false)
-					{
-						fieldSet.Add(s, s);
-					}
+                    string s = iterator.Current;
+                    fieldSet[s] = s;
 				}
 			}
 			return fieldSet.Keys;
@@ -549,22 +544,26 @@
 				
 			}
 			private System.String field;
-			private System.Collections.IEnumerator fieldIterator;
+			private IEnumerator<string> fieldIterator;
 			private TermEnum termEnum;
 			
 			public ParallelTermEnum(ParallelReader enclosingInstance)
 			{
 				InitBlock(enclosingInstance);
-				field = ((System.String) Enclosing_Instance.fieldToReader.GetKey(0));
-				if (field != null)
-					termEnum = ((IndexReader) Enclosing_Instance.fieldToReader[field]).Terms();
+                IEnumerator<string> e = Enclosing_Instance.fieldToReader.Keys.GetEnumerator();
+                if (e.MoveNext())
+                {
+                    field = e.Current;
+                    if (field != null)
+                        termEnum = Enclosing_Instance.fieldToReader[field].Terms();
+                }
 			}
 			
 			public ParallelTermEnum(ParallelReader enclosingInstance, Term term)
 			{
 				InitBlock(enclosingInstance);
 				field = term.Field();
-				IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[field]);
+				IndexReader reader = Enclosing_Instance.fieldToReader[field];
 				if (reader != null)
 					termEnum = reader.Terms(term);
 			}
@@ -575,7 +574,7 @@
 					return false;
 				
 				// another term in this field?
-				if (termEnum.Next() && (System.Object) termEnum.Term().Field() == (System.Object) field)
+				if (termEnum.Next() && (object) termEnum.Term().Field() == (object) field)
 					return true; // yes, keep going
 				
 				termEnum.Close(); // close old termEnum
@@ -583,15 +582,14 @@
 				// find the next field with terms, if any
 				if (fieldIterator == null)
 				{
-					fieldIterator = SupportClass.TailMap(Enclosing_Instance.fieldToReader, field).Keys.GetEnumerator();
-                    fieldIterator.MoveNext();                     // Skip field to get next one
+					fieldIterator = SupportClass.CollectionsSupport.TailMap(Enclosing_Instance.fieldToReader, field).Keys.GetEnumerator();
 				}
 				while (fieldIterator.MoveNext())
 				{
-					field = ((System.String) fieldIterator.Current);
-					termEnum = ((IndexReader) Enclosing_Instance.fieldToReader[field]).Terms(new Term(field, ""));
+					field = fieldIterator.Current;
+					termEnum = Enclosing_Instance.fieldToReader[field].Terms(new Term(field));
 					Term term = termEnum.Term();
-					if (term != null && (System.Object) term.Field() == (System.Object) field)
+					if (term != null && (object) term.Field() == (object) field)
 						return true;
 					else
 						termEnum.Close();
@@ -662,7 +660,7 @@
 			
 			public virtual void  Seek(Term term)
 			{
-				IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[term.Field()]);
+				IndexReader reader = Enclosing_Instance.fieldToReader[term.Field()];
 				termDocs = reader != null ? reader.TermDocs(term) : null;
 			}
 			
@@ -730,7 +728,7 @@
 			
 			public override void  Seek(Term term)
 			{
-				IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[term.Field()]);
+				IndexReader reader = Enclosing_Instance.fieldToReader[term.Field()];
 				termDocs = reader != null ? reader.TermPositions(term) : null;
 			}
 			

Modified: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/Payload.cs?rev=798995&r1=798994&r2=798995&view=diff
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs (original)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/Payload.cs Wed Jul 29 18:04:12 2009
@@ -19,6 +19,8 @@
 
 using Token = Lucene.Net.Analysis.Token;
 using TokenStream = Lucene.Net.Analysis.TokenStream;
+using ArrayUtil = Lucene.Net.Util.ArrayUtil;
+
 
 namespace Lucene.Net.Index
 {
@@ -162,10 +164,47 @@
 		/// <summary> Clones this payload by creating a copy of the underlying
 		/// byte array.
 		/// </summary>
-		public virtual System.Object Clone()
+		public virtual object Clone()
 		{
-			Payload clone = new Payload(this.ToByteArray());
-			return clone;
+            // start with a shallow copy of data
+            Payload clone = (Payload) base.MemberwiseClone();
+            // only copy the part of data that belongs to this payload
+            if (offset == 0 && length == data.Length)
+                // it is the whole thing so just clone it
+                clone.data = (byte[])data.Clone();
+            else
+            {
+                // just get the part
+                clone.data = this.ToByteArray();
+                clone.offset = 0;
+            }
+            return clone;
 		}
+
+        public override bool Equals(object obj)
+        {
+            if (obj == this)
+                return true;
+            if (obj is Payload)
+            {
+                Payload other = (Payload)obj;
+                if (length == other.length)
+                {
+                    for (int i = 0; i < length; i++)
+                        if (data[offset + 1] != other.data[other.offset + i])
+                            return false;
+                    return true;
+                }
+                else
+                    return false;
+            }
+            else
+                return false;
+        }
+
+        public override int GetHashCode()
+        {
+            return ArrayUtil.HashCode(data, offset, offset + length);
+        }
 	}
 }
\ No newline at end of file

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/RawPostingList.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/RawPostingList.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/RawPostingList.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/RawPostingList.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    /** This is the base class for an in-memory posting list,
+     *  keyed by a Token.  {@link TermsHash} maintains a hash
+     *  table holding one instance of this per unique Token.
+     *  Consumers of TermsHash (@link TermsHashConsumer} must
+     *  subclass this class with its own concrete class.
+     *  {@link FreqProxTermsWriter.RawPostingList} is the
+     *  subclass used for the freq/prox postings, and {@link
+     *  TermVectorsTermsWriter.PostingList} is the subclass
+     *  used to hold TermVectors postings. */
+
+    abstract class RawPostingList
+    {
+        internal readonly static int BYTES_SIZE = DocumentsWriter.object_HEADER_BYTES + 3 * DocumentsWriter.INT_NUM_BYTE;
+        internal int textStart;
+        internal int intStart;
+        internal int byteStart;
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlyMultiSegmentReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ReadOnlyMultiSegmentReader.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlyMultiSegmentReader.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlyMultiSegmentReader.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System.Collections.Generic;
+
+using Directory = Lucene.Net.Store.Directory;
+
+namespace Lucene.Net.Index
+{
+    internal class ReadOnlyMultiSegmentReader : MultiSegmentReader
+    {
+        internal ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory)
+            : base(directory, sis, closeDirectory, true)
+        {
+        }
+
+        internal ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos infos, bool closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Dictionary<string, byte[]> oldNormsCache)
+            : base(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true)
+        {
+        }
+
+        protected internal override void AcquireWriteLock()
+        {
+            ReadOnlySegmentReader.NoWrite();
+        }
+    }
+}

Added: incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlySegmentReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/C%23/src/Lucene.Net/Index/ReadOnlySegmentReader.cs?rev=798995&view=auto
==============================================================================
--- incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlySegmentReader.cs (added)
+++ incubator/lucene.net/trunk/C#/src/Lucene.Net/Index/ReadOnlySegmentReader.cs Wed Jul 29 18:04:12 2009
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Lucene.Net.Index
+{
+    internal class ReadOnlySegmentReader : SegmentReader
+    {
+        internal static void NoWrite()
+        {
+            throw new System.Exception("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
+        }
+
+        protected internal override void AcquireWriteLock()
+        {
+            NoWrite();
+        }
+
+        // Not synchronized
+        public override bool IsDeleted(int n)
+        {
+            return deletedDocs != null && deletedDocs.Get(n);
+        }
+    }
+}