You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2011/11/09 22:03:52 UTC

[Lucene.Net] svn commit: r1199962 [7/14] - in /incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk: src/core/ src/core/Analysis/ src/core/Analysis/Standard/ src/core/Document/ src/core/Index/ src/core/QueryParser/ src/core/Search/ src/core/Search/Function/ src/cor...

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/InvertedDocEndConsumer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/InvertedDocEndConsumer.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/InvertedDocEndConsumer.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/InvertedDocEndConsumer.cs Wed Nov  9 21:03:47 2011
@@ -16,6 +16,7 @@
  */
 
 using System;
+using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
@@ -23,7 +24,7 @@ namespace Lucene.Net.Index
 	abstract class InvertedDocEndConsumer
 	{
 		public abstract InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
-		public abstract void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state);
+        public abstract void Flush(IDictionary<InvertedDocEndConsumerPerThread, ICollection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state);
 		internal abstract void  CloseDocStore(SegmentWriteState state);
 		public abstract void  Abort();
 		internal abstract void  SetFieldInfos(FieldInfos fieldInfos);

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogByteSizeMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogByteSizeMergePolicy.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogByteSizeMergePolicy.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogByteSizeMergePolicy.cs Wed Nov  9 21:03:47 2011
@@ -23,7 +23,7 @@ namespace Lucene.Net.Index
 	/// <summary>This is a <see cref="LogMergePolicy" /> that measures size of a
 	/// segment as the total byte size of the segment's files. 
 	/// </summary>
-	public class LogByteSizeMergePolicy:LogMergePolicy
+	public class LogByteSizeMergePolicy : LogMergePolicy
 	{
 		
 		/// <seealso cref="SetMinMergeMB">
@@ -33,9 +33,10 @@ namespace Lucene.Net.Index
 		/// <summary>Default maximum segment size.  A segment of this size</summary>
 		/// <seealso cref="SetMaxMergeMB">
 		/// </seealso>
-		public static readonly long DEFAULT_MAX_MERGE_MB = System.Int64.MaxValue;
+		public static readonly long DEFAULT_MAX_MERGE_MB = long.MaxValue;
 		
-		public LogByteSizeMergePolicy(IndexWriter writer):base(writer)
+		public LogByteSizeMergePolicy(IndexWriter writer)
+            : base(writer)
 		{
 			minMergeSize = (long) (DEFAULT_MIN_MERGE_MB * 1024 * 1024);
             //mgarski - the line below causes an overflow in .NET, resulting in a negative number...

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogDocMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogDocMergePolicy.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogDocMergePolicy.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogDocMergePolicy.cs Wed Nov  9 21:03:47 2011
@@ -25,7 +25,7 @@ namespace Lucene.Net.Index
 	/// into account). 
 	/// </summary>
 	
-	public class LogDocMergePolicy:LogMergePolicy
+	public class LogDocMergePolicy : LogMergePolicy
 	{
 		
 		/// <seealso cref="SetMinMergeDocs">

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogMergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogMergePolicy.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogMergePolicy.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/LogMergePolicy.cs Wed Nov  9 21:03:47 2011
@@ -16,6 +16,7 @@
  */
 
 using System;
+using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
@@ -38,7 +39,7 @@ namespace Lucene.Net.Index
 	/// file(s) for the segment.<p/>
 	/// </summary>
 	
-	public abstract class LogMergePolicy:MergePolicy
+	public abstract class LogMergePolicy : MergePolicy
 	{
 		
 		/// <summary>Defines the allowed range of log(size) for each
@@ -205,9 +206,16 @@ namespace Lucene.Net.Index
 			return calibrateSizeByDeletes;
 		}
 		
+        [Obsolete("This method is being replaced by Dispose()")]
 		public override void  Close()
 		{
+            Dispose();
 		}
+
+        public override void Dispose()
+        {
+            //wow do nothing!
+        }
 		
 		abstract protected internal long Size(SegmentInfo info);
 		
@@ -239,7 +247,7 @@ namespace Lucene.Net.Index
 			}
 		}
 		
-		private bool IsOptimized(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+		private bool IsOptimized(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
 		{
 			int numSegments = infos.Count;
 			int numToOptimize = 0;
@@ -277,7 +285,7 @@ namespace Lucene.Net.Index
 		/// (mergeFactor at a time) so the <see cref="MergeScheduler" />
 		/// in use may make use of concurrency. 
 		/// </summary>
-		public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+		public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
 		{
 			MergeSpecification spec;
 			
@@ -558,14 +566,14 @@ namespace Lucene.Net.Index
             else
             {
                 long totSize = 0;
-                for (int i = 0; i < infos.Count; i++)
+                foreach(SegmentInfo info in infos)
                 {
-                    totSize += Size(infos.Info(i));
+                    totSize += Size(info);
                 }
                 long mergeSize = 0;
-                for (int i = 0; i < infosToMerge.Count; i++)
+                foreach(SegmentInfo info in infosToMerge)
                 {
-                    mergeSize += Size(infosToMerge.Info(i));
+                    mergeSize += Size(info);
                 }
 
                 doCFS = mergeSize <= noCFSRatio * totSize;

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MergePolicy.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MergePolicy.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MergePolicy.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MergePolicy.cs Wed Nov  9 21:03:47 2011
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using System.Collections.Generic;
 using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
@@ -58,7 +58,7 @@ namespace Lucene.Net.Index
 	/// these APIs.
 	/// </summary>
 	
-	public abstract class MergePolicy
+	public abstract class MergePolicy : IDisposable
 	{
 		
 		/// <summary>OneMerge provides the information necessary to perform
@@ -71,21 +71,19 @@ namespace Lucene.Net.Index
 		public class OneMerge
 		{
 			
-			internal SegmentInfo info; // used by IndexWriter
-			internal bool mergeDocStores; // used by IndexWriter
-			internal bool optimize; // used by IndexWriter
-			internal bool registerDone; // used by IndexWriter
-			internal long mergeGen; // used by IndexWriter
-			internal bool isExternal; // used by IndexWriter
-			internal int maxNumSegmentsOptimize; // used by IndexWriter
-			internal SegmentReader[] readers; // used by IndexWriter
-			internal SegmentReader[] readersClone; // used by IndexWriter
+			internal SegmentInfo info;              // used by IndexWriter
+			internal bool mergeDocStores;           // used by IndexWriter
+			internal bool optimize;                 // used by IndexWriter
+			internal bool registerDone;             // used by IndexWriter
+			internal long mergeGen;                 // used by IndexWriter
+			internal bool isExternal;               // used by IndexWriter
+			internal int maxNumSegmentsOptimize;    // used by IndexWriter
+			internal SegmentReader[] readers;       // used by IndexWriter
+			internal SegmentReader[] readersClone;  // used by IndexWriter
 			internal SegmentInfos segments;
 			internal bool useCompoundFile;
 			internal bool aborted;
 			internal System.Exception error;
-
-            internal volatile bool mergeDone;     // used by IndexWriter
 			
 			public OneMerge(SegmentInfos segments, bool useCompoundFile)
 			{
@@ -184,7 +182,7 @@ namespace Lucene.Net.Index
 			
 			/// <summary> The subset of segments to be included in the primitive merge.</summary>
 			
-			public System.Collections.IList merges = new System.Collections.ArrayList();
+			public IList<OneMerge> merges = new List<OneMerge>();
 			
 			public virtual void  Add(OneMerge merge)
 			{
@@ -197,7 +195,7 @@ namespace Lucene.Net.Index
 				b.Append("MergeSpec:\n");
 				int count = merges.Count;
 				for (int i = 0; i < count; i++)
-					b.Append("  ").Append(1 + i).Append(": ").Append(((OneMerge) merges[i]).SegString(dir));
+					b.Append("  ").Append(1 + i).Append(": ").Append(merges[i].SegString(dir));
 				return b.ToString();
 			}
 		}
@@ -209,24 +207,12 @@ namespace Lucene.Net.Index
 		public class MergeException:System.SystemException
 		{
 			private Directory dir;
-			/// <deprecated>
-			/// Use <see cref="MergePolicy.MergeException(String,Directory)" /> instead 
-			/// </deprecated>
-            [Obsolete("Use MergePolicy.MergeException(String,Directory) instead ")]
-			public MergeException(System.String message):base(message)
-			{
-			}
+
 			public MergeException(System.String message, Directory dir):base(message)
 			{
 				this.dir = dir;
 			}
-			/// <deprecated>
-			/// Use <see cref="MergePolicy.MergeException(Exception,Directory)" /> instead 
-			/// </deprecated>
-            [Obsolete("Use MergePolicy.MergeException(Throwable,Directory) instead ")]
-			public MergeException(System.Exception exc):base(null, exc)
-			{
-			}
+
 			public MergeException(System.Exception exc, Directory dir):base(null, exc)
 			{
 				this.dir = dir;
@@ -267,23 +253,24 @@ namespace Lucene.Net.Index
 		/// <param name="segmentInfos">the total set of segments in the index
 		/// </param>
 		public abstract MergeSpecification FindMerges(SegmentInfos segmentInfos);
-		
-		/// <summary> Determine what set of merge operations is necessary in order to optimize
-		/// the index. <see cref="IndexWriter" /> calls this when its
-		/// <see cref="IndexWriter.Optimize()" /> method is called. This call is always
-		/// synchronized on the <see cref="IndexWriter" /> instance so only one thread at a
-		/// time will call this method.
-		/// 
-		/// </summary>
-		/// <param name="segmentInfos">the total set of segments in the index
-		/// </param>
-		/// <param name="maxSegmentCount">requested maximum number of segments in the index (currently this
-		/// is always 1)
-		/// </param>
-		/// <param name="segmentsToOptimize">contains the specific SegmentInfo instances that must be merged
-		/// away. This may be a subset of all SegmentInfos.
-		/// </param>
-		public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount, System.Collections.Hashtable segmentsToOptimize);
+
+	    /// <summary> Determine what set of merge operations is necessary in order to optimize
+	    /// the index. <see cref="IndexWriter" /> calls this when its
+	    /// <see cref="IndexWriter.Optimize()" /> method is called. This call is always
+	    /// synchronized on the <see cref="IndexWriter" /> instance so only one thread at a
+	    /// time will call this method.
+	    /// 
+	    /// </summary>
+	    /// <param name="segmentInfos">the total set of segments in the index
+	    /// </param>
+	    /// <param name="maxSegmentCount">requested maximum number of segments in the index (currently this
+	    /// is always 1)
+	    /// </param>
+	    /// <param name="segmentsToOptimize">contains the specific SegmentInfo instances that must be merged
+	    /// away. This may be a subset of all SegmentInfos.
+	    /// </param>
+	    public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount,
+	                                                             ISet<SegmentInfo> segmentsToOptimize);
 		
 		/// <summary> Determine what set of merge operations is necessary in order to expunge all
 		/// deletes from the index.
@@ -292,9 +279,13 @@ namespace Lucene.Net.Index
 		/// <param name="segmentInfos">the total set of segments in the index
 		/// </param>
 		public abstract MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos);
-		
-		/// <summary> Release all resources for the policy.</summary>
+
+        /// <summary> Release all resources for the policy.</summary>
+        [Obsolete("This method is being replaced by Dispose()")]
 		public abstract void  Close();
+
+        /// <summary> Release all resources for the policy.</summary>
+	    public abstract void Dispose();
 		
 		/// <summary> Returns true if a newly flushed (not from merge)
 		/// segment should use the compound file format.

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultiReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultiReader.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultiReader.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultiReader.cs Wed Nov  9 21:03:47 2011
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using System.Linq;
 using Document = Lucene.Net.Documents.Document;
 using FieldSelector = Lucene.Net.Documents.FieldSelector;
 using MultiTermDocs = Lucene.Net.Index.DirectoryReader.MultiTermDocs;
@@ -26,481 +26,462 @@ using DefaultSimilarity = Lucene.Net.Sea
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>An IndexReader which reads multiple indexes, appending their content.
-	/// 
-	/// </summary>
-	/// <version>  $Id: MultiReader.java 782406 2009-06-07 16:31:18Z mikemccand $
-	/// </version>
-	public class MultiReader:IndexReader, System.ICloneable
-	{
-		protected internal IndexReader[] subReaders;
-		private int[] starts; // 1st docno for each segment
-		private bool[] decrefOnClose; // remember which subreaders to decRef on close
-		private System.Collections.IDictionary normsCache = new System.Collections.Hashtable();
-		private int maxDoc = 0;
-		private int numDocs = - 1;
-		private bool hasDeletions = false;
-		
-		/// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
-		/// Directory locking for delete, undeleteAll, and setNorm operations is
-		/// left to the subreaders. <p/>
-		/// <p/>Note that all subreaders are closed if this Multireader is closed.<p/>
-		/// </summary>
-		/// <param name="subReaders">set of (sub)readers
-		/// </param>
-		/// <throws>  IOException </throws>
-		public MultiReader(IndexReader[] subReaders)
-		{
-			Initialize(subReaders, true);
-		}
-		
-		/// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
-		/// Directory locking for delete, undeleteAll, and setNorm operations is
-		/// left to the subreaders. <p/>
-		/// </summary>
-		/// <param name="closeSubReaders">indicates whether the subreaders should be closed
-		/// when this MultiReader is closed
-		/// </param>
-		/// <param name="subReaders">set of (sub)readers
-		/// </param>
-		/// <throws>  IOException </throws>
-		public MultiReader(IndexReader[] subReaders, bool closeSubReaders)
-		{
-			Initialize(subReaders, closeSubReaders);
-		}
-		
-		private void  Initialize(IndexReader[] subReaders, bool closeSubReaders)
-		{
-			this.subReaders = new IndexReader[subReaders.Length];
-			subReaders.CopyTo(this.subReaders, 0);
-			starts = new int[subReaders.Length + 1]; // build starts array
-			decrefOnClose = new bool[subReaders.Length];
-			for (int i = 0; i < subReaders.Length; i++)
-			{
-				starts[i] = maxDoc;
-				maxDoc += subReaders[i].MaxDoc(); // compute maxDocs
-				
-				if (!closeSubReaders)
-				{
-					subReaders[i].IncRef();
-					decrefOnClose[i] = true;
-				}
-				else
-				{
-					decrefOnClose[i] = false;
-				}
-				
-				if (subReaders[i].HasDeletions())
-					hasDeletions = true;
-			}
-			starts[subReaders.Length] = maxDoc;
-		}
-		
-		/// <summary> Tries to reopen the subreaders.
-		/// <br/>
-		/// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
-		/// returned a new instance != subReader), then a new MultiReader instance 
-		/// is returned, otherwise this instance is returned.
-		/// <p/>
-		/// A re-opened instance might share one or more subreaders with the old 
-		/// instance. Index modification operations result in undefined behavior
-		/// when performed before the old instance is closed.
-		/// (see <see cref="IndexReader.Reopen()" />).
-		/// <p/>
-		/// If subreaders are shared, then the reference count of those
-		/// readers is increased to ensure that the subreaders remain open
-		/// until the last referring reader is closed.
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error  </throws>
-		public override IndexReader Reopen()
-		{
-			lock (this)
-			{
-				return DoReopen(false);
-			}
-		}
-		
-		/// <summary> Clones the subreaders.
-		/// (see <see cref="IndexReader.Clone()" />).
-		/// <br/>
-		/// <p/>
-		/// If subreaders are shared, then the reference count of those
-		/// readers is increased to ensure that the subreaders remain open
-		/// until the last referring reader is closed.
-		/// </summary>
-		public override System.Object Clone()
-		{
-			try
-			{
-				return DoReopen(true);
-			}
-			catch (System.Exception ex)
-			{
-				throw new System.SystemException(ex.Message, ex);
-			}
-		}
-		
-		/// <summary> If clone is true then we clone each of the subreaders</summary>
-		/// <param name="doClone">
-		/// </param>
-		/// <returns> New IndexReader, or same one (this) if
-		/// reopen/clone is not necessary
-		/// </returns>
-		/// <throws>  CorruptIndexException </throws>
-		/// <throws>  IOException </throws>
-		protected internal virtual IndexReader DoReopen(bool doClone)
-		{
-			EnsureOpen();
-			
-			bool reopened = false;
-			IndexReader[] newSubReaders = new IndexReader[subReaders.Length];
-			
-			bool success = false;
-			try
-			{
-				for (int i = 0; i < subReaders.Length; i++)
-				{
-					if (doClone)
-						newSubReaders[i] = (IndexReader) subReaders[i].Clone();
-					else
-						newSubReaders[i] = subReaders[i].Reopen();
-					// if at least one of the subreaders was updated we remember that
-					// and return a new MultiReader
-					if (newSubReaders[i] != subReaders[i])
-					{
-						reopened = true;
-					}
-				}
-				success = true;
-			}
-			finally
-			{
-				if (!success && reopened)
-				{
-					for (int i = 0; i < newSubReaders.Length; i++)
-					{
-						if (newSubReaders[i] != subReaders[i])
-						{
-							try
-							{
-								newSubReaders[i].Close();
-							}
-							catch (System.IO.IOException ignore)
-							{
-								// keep going - we want to clean up as much as possible
-							}
-						}
-					}
-				}
-			}
-			
-			if (reopened)
-			{
-				bool[] newDecrefOnClose = new bool[subReaders.Length];
-				for (int i = 0; i < subReaders.Length; i++)
-				{
-					if (newSubReaders[i] == subReaders[i])
-					{
-						newSubReaders[i].IncRef();
-						newDecrefOnClose[i] = true;
-					}
-				}
-				MultiReader mr = new MultiReader(newSubReaders);
-				mr.decrefOnClose = newDecrefOnClose;
-				mr.SetDisableFakeNorms(GetDisableFakeNorms());
-				return mr;
-			}
-			else
-			{
-				return this;
-			}
-		}
-		
-		public override TermFreqVector[] GetTermFreqVectors(int n)
-		{
-			EnsureOpen();
-			int i = ReaderIndex(n); // find segment num
-			return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
-		}
-		
-		public override TermFreqVector GetTermFreqVector(int n, System.String field)
-		{
-			EnsureOpen();
-			int i = ReaderIndex(n); // find segment num
-			return subReaders[i].GetTermFreqVector(n - starts[i], field);
-		}
-		
-		
-		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			int i = ReaderIndex(docNumber); // find segment num
-			subReaders[i].GetTermFreqVector(docNumber - starts[i], field, mapper);
-		}
-		
-		public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			int i = ReaderIndex(docNumber); // find segment num
-			subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
-		}
-		
-		public override bool IsOptimized()
-		{
-			return false;
-		}
-		
-		public override int NumDocs()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
+    
+    /// <summary>An IndexReader which reads multiple indexes, appending 
+    /// their content.
+    /// </summary>
+    public class MultiReader:IndexReader, System.ICloneable
+    {
+        protected internal IndexReader[] subReaders;
+        private int[] starts; // 1st docno for each segment
+        private bool[] decrefOnClose; // remember which subreaders to decRef on close
+        private System.Collections.Generic.IDictionary<string, byte[]> normsCache = new SupportClass.HashMap<string,byte[]>();
+        private int maxDoc = 0;
+        private int numDocs = - 1;
+        private bool hasDeletions = false;
+        
+        /// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
+        /// Directory locking for delete, undeleteAll, and setNorm operations is
+        /// left to the subreaders. <p/>
+        /// <p/>Note that all subreaders are closed if this Multireader is closed.<p/>
+        /// </summary>
+        /// <param name="subReaders">set of (sub)readers
+        /// </param>
+        /// <throws>  IOException </throws>
+        public MultiReader(params IndexReader[] subReaders)
+        {
+            Initialize(subReaders, true);
+        }
+        
+        /// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
+        /// Directory locking for delete, undeleteAll, and setNorm operations is
+        /// left to the subreaders. <p/>
+        /// </summary>
+        /// <param name="closeSubReaders">indicates whether the subreaders should be closed
+        /// when this MultiReader is closed
+        /// </param>
+        /// <param name="subReaders">set of (sub)readers
+        /// </param>
+        /// <throws>  IOException </throws>
+        public MultiReader(IndexReader[] subReaders, bool closeSubReaders)
+        {
+            Initialize(subReaders, closeSubReaders);
+        }
+        
+        private void  Initialize(IndexReader[] subReaders, bool closeSubReaders)
+        {
+            // Deep copy
+            this.subReaders = subReaders.ToArray();
+            starts = new int[subReaders.Length + 1]; // build starts array
+            decrefOnClose = new bool[subReaders.Length];
+            for (int i = 0; i < subReaders.Length; i++)
+            {
+                starts[i] = maxDoc;
+                maxDoc += subReaders[i].MaxDoc(); // compute maxDocs
+                
+                if (!closeSubReaders)
+                {
+                    subReaders[i].IncRef();
+                    decrefOnClose[i] = true;
+                }
+                else
+                {
+                    decrefOnClose[i] = false;
+                }
+                
+                if (subReaders[i].HasDeletions())
+                    hasDeletions = true;
+            }
+            starts[subReaders.Length] = maxDoc;
+        }
+        
+        /// <summary> Tries to reopen the subreaders.
+        /// <br/>
+        /// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
+        /// returned a new instance != subReader), then a new MultiReader instance 
+        /// is returned, otherwise this instance is returned.
+        /// <p/>
+        /// A re-opened instance might share one or more subreaders with the old 
+        /// instance. Index modification operations result in undefined behavior
+        /// when performed before the old instance is closed.
+        /// (see <see cref="IndexReader.Reopen()" />).
+        /// <p/>
+        /// If subreaders are shared, then the reference count of those
+        /// readers is increased to ensure that the subreaders remain open
+        /// until the last referring reader is closed.
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error  </throws>
+        public override IndexReader Reopen()
+        {
+            lock (this)
+            {
+                return DoReopen(false);
+            }
+        }
+        
+        /// <summary> Clones the subreaders.
+        /// (see <see cref="IndexReader.Clone()" />).
+        /// <br/>
+        /// <p/>
+        /// If subreaders are shared, then the reference count of those
+        /// readers is increased to ensure that the subreaders remain open
+        /// until the last referring reader is closed.
+        /// </summary>
+        public override System.Object Clone()
+        {
+            try
+            {
+                return DoReopen(true);
+            }
+            catch (System.Exception ex)
+            {
+                throw new System.SystemException(ex.Message, ex);
+            }
+        }
+        
+        /// <summary> If clone is true then we clone each of the subreaders</summary>
+        /// <param name="doClone">
+        /// </param>
+        /// <returns> New IndexReader, or same one (this) if
+        /// reopen/clone is not necessary
+        /// </returns>
+        /// <throws>  CorruptIndexException </throws>
+        /// <throws>  IOException </throws>
+        protected internal virtual IndexReader DoReopen(bool doClone)
+        {
+            EnsureOpen();
+            
+            bool reopened = false;
+            IndexReader[] newSubReaders = new IndexReader[subReaders.Length];
+            
+            bool success = false;
+            try
+            {
+                for (int i = 0; i < subReaders.Length; i++)
+                {
+                    if (doClone)
+                        newSubReaders[i] = (IndexReader) subReaders[i].Clone();
+                    else
+                        newSubReaders[i] = subReaders[i].Reopen();
+                    // if at least one of the subreaders was updated we remember that
+                    // and return a new MultiReader
+                    if (newSubReaders[i] != subReaders[i])
+                    {
+                        reopened = true;
+                    }
+                }
+                success = true;
+            }
+            finally
+            {
+                if (!success && reopened)
+                {
+                    for (int i = 0; i < newSubReaders.Length; i++)
+                    {
+                        if (newSubReaders[i] != subReaders[i])
+                        {
+                            try
+                            {
+                                newSubReaders[i].Close();
+                            }
+                            catch (System.IO.IOException ignore)
+                            {
+                                // keep going - we want to clean up as much as possible
+                            }
+                        }
+                    }
+                }
+            }
+            
+            if (reopened)
+            {
+                bool[] newDecrefOnClose = new bool[subReaders.Length];
+                for (int i = 0; i < subReaders.Length; i++)
+                {
+                    if (newSubReaders[i] == subReaders[i])
+                    {
+                        newSubReaders[i].IncRef();
+                        newDecrefOnClose[i] = true;
+                    }
+                }
+                MultiReader mr = new MultiReader(newSubReaders);
+                mr.decrefOnClose = newDecrefOnClose;
+                return mr;
+            }
+            else
+            {
+                return this;
+            }
+        }
+        
+        public override TermFreqVector[] GetTermFreqVectors(int n)
+        {
+            EnsureOpen();
+            int i = ReaderIndex(n); // find segment num
+            return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
+        }
+        
+        public override TermFreqVector GetTermFreqVector(int n, System.String field)
+        {
+            EnsureOpen();
+            int i = ReaderIndex(n); // find segment num
+            return subReaders[i].GetTermFreqVector(n - starts[i], field);
+        }
+        
+        
+        public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            int i = ReaderIndex(docNumber); // find segment num
+            subReaders[i].GetTermFreqVector(docNumber - starts[i], field, mapper);
+        }
+        
+        public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            int i = ReaderIndex(docNumber); // find segment num
+            subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
+        }
+        
+        public override bool IsOptimized()
+        {
+            return false;
+        }
+        
+        public override int NumDocs()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
             // NOTE: multiple threads may wind up init'ing
             // numDocs... but that's harmless
-			if (numDocs == - 1)
-			{
-				// check cache
-				int n = 0; // cache miss--recompute
-				for (int i = 0; i < subReaders.Length; i++)
-					n += subReaders[i].NumDocs(); // sum from readers
-				numDocs = n;
-			}
-			return numDocs;
-		}
-		
-		public override int MaxDoc()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return maxDoc;
-		}
-		
-		// inherit javadoc
-		public override Document Document(int n, FieldSelector fieldSelector)
-		{
-			EnsureOpen();
-			int i = ReaderIndex(n); // find segment num
-			return subReaders[i].Document(n - starts[i], fieldSelector); // dispatch to segment reader
-		}
-		
-		public override bool IsDeleted(int n)
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			int i = ReaderIndex(n); // find segment num
-			return subReaders[i].IsDeleted(n - starts[i]); // dispatch to segment reader
-		}
-		
-		public override bool HasDeletions()
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return hasDeletions;
-		}
-		
-		protected internal override void  DoDelete(int n)
-		{
-			numDocs = - 1; // invalidate cache
-			int i = ReaderIndex(n); // find segment num
-			subReaders[i].DeleteDocument(n - starts[i]); // dispatch to segment reader
-			hasDeletions = true;
-		}
-		
-		protected internal override void  DoUndeleteAll()
-		{
-			for (int i = 0; i < subReaders.Length; i++)
-				subReaders[i].UndeleteAll();
-			
-			hasDeletions = false;
-			numDocs = - 1; // invalidate cache
-		}
-		
-		private int ReaderIndex(int n)
-		{
-			// find reader for doc n:
-			return DirectoryReader.ReaderIndex(n, this.starts, this.subReaders.Length);
-		}
-		
-		public override bool HasNorms(System.String field)
-		{
-			EnsureOpen();
-			for (int i = 0; i < subReaders.Length; i++)
-			{
-				if (subReaders[i].HasNorms(field))
-					return true;
-			}
-			return false;
-		}
-		
-		private byte[] ones;
-		private byte[] FakeNorms()
-		{
-			if (ones == null)
-				ones = SegmentReader.CreateFakeNorms(MaxDoc());
-			return ones;
-		}
-		
-		public override byte[] Norms(System.String field)
-		{
-			lock (this)
-			{
-				EnsureOpen();
-				byte[] bytes = (byte[]) normsCache[field];
-				if (bytes != null)
-					return bytes; // cache hit
-				if (!HasNorms(field))
-					return GetDisableFakeNorms()?null:FakeNorms();
-				
-				bytes = new byte[MaxDoc()];
-				for (int i = 0; i < subReaders.Length; i++)
-					subReaders[i].Norms(field, bytes, starts[i]);
-				normsCache[field] = bytes; // update cache
-				return bytes;
-			}
-		}
-		
-		public override void  Norms(System.String field, byte[] result, int offset)
-		{
-			lock (this)
-			{
-				EnsureOpen();
-				byte[] bytes = (byte[]) normsCache[field];
-				for (int i = 0; i < subReaders.Length; i++)
-				// read from segments
-					subReaders[i].Norms(field, result, offset + starts[i]);
-				
-				if (bytes == null && !HasNorms(field))
-				{
+            if (numDocs == - 1)
+            {
+                // check cache
+                int n = 0; // cache miss--recompute
+                for (int i = 0; i < subReaders.Length; i++)
+                    n += subReaders[i].NumDocs(); // sum from readers
+                numDocs = n;
+            }
+            return numDocs;
+        }
+        
+        public override int MaxDoc()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            return maxDoc;
+        }
+        
+        // inherit javadoc
+        public override Document Document(int n, FieldSelector fieldSelector)
+        {
+            EnsureOpen();
+            int i = ReaderIndex(n); // find segment num
+            return subReaders[i].Document(n - starts[i], fieldSelector); // dispatch to segment reader
+        }
+        
+        public override bool IsDeleted(int n)
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            int i = ReaderIndex(n); // find segment num
+            return subReaders[i].IsDeleted(n - starts[i]); // dispatch to segment reader
+        }
+        
+        public override bool HasDeletions()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            return hasDeletions;
+        }
+        
+        protected internal override void  DoDelete(int n)
+        {
+            numDocs = - 1; // invalidate cache
+            int i = ReaderIndex(n); // find segment num
+            subReaders[i].DeleteDocument(n - starts[i]); // dispatch to segment reader
+            hasDeletions = true;
+        }
+        
+        protected internal override void  DoUndeleteAll()
+        {
+            for (int i = 0; i < subReaders.Length; i++)
+                subReaders[i].UndeleteAll();
+            
+            hasDeletions = false;
+            numDocs = - 1; // invalidate cache
+        }
+        
+        private int ReaderIndex(int n)
+        {
+            // find reader for doc n:
+            return DirectoryReader.ReaderIndex(n, this.starts, this.subReaders.Length);
+        }
+        
+        public override bool HasNorms(System.String field)
+        {
+            EnsureOpen();
+            for (int i = 0; i < subReaders.Length; i++)
+            {
+                if (subReaders[i].HasNorms(field))
+                    return true;
+            }
+            return false;
+        }
+        
+        public override byte[] Norms(System.String field)
+        {
+            lock (this)
+            {
+                EnsureOpen();
+                byte[] bytes = normsCache[field];
+                if (bytes != null)
+                    return bytes; // cache hit
+                if (!HasNorms(field))
+                    return null;
+                
+                bytes = new byte[MaxDoc()];
+                for (int i = 0; i < subReaders.Length; i++)
+                    subReaders[i].Norms(field, bytes, starts[i]);
+                normsCache[field] = bytes; // update cache
+                return bytes;
+            }
+        }
+        
+        public override void  Norms(System.String field, byte[] result, int offset)
+        {
+            lock (this)
+            {
+                EnsureOpen();
+                byte[] bytes = normsCache[field];
+                for (int i = 0; i < subReaders.Length; i++)
+                // read from segments
+                    subReaders[i].Norms(field, result, offset + starts[i]);
+                
+                if (bytes == null && !HasNorms(field))
+                {
                     for (int i = offset; i < result.Length; i++)
                     {
                         result[i] = (byte) DefaultSimilarity.EncodeNorm(1.0f);
                     }
-				}
-				else if (bytes != null)
-				{
-					// cache hit
-					Array.Copy(bytes, 0, result, offset, MaxDoc());
-				}
-				else
-				{
-					for (int i = 0; i < subReaders.Length; i++)
-					{
-						// read from segments
-						subReaders[i].Norms(field, result, offset + starts[i]);
-					}
-				}
-			}
-		}
-		
-		protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
-		{
-			lock (normsCache.SyncRoot)
-			{
-				normsCache.Remove(field); // clear cache
-			}
-			int i = ReaderIndex(n); // find segment num
-			subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
-		}
-		
-		public override TermEnum Terms()
-		{
-			EnsureOpen();
-			return new MultiTermEnum(this, subReaders, starts, null);
-		}
-		
-		public override TermEnum Terms(Term term)
-		{
-			EnsureOpen();
-			return new MultiTermEnum(this, subReaders, starts, term);
-		}
-		
-		public override int DocFreq(Term t)
-		{
-			EnsureOpen();
-			int total = 0; // sum freqs in segments
-			for (int i = 0; i < subReaders.Length; i++)
-				total += subReaders[i].DocFreq(t);
-			return total;
-		}
-		
-		public override TermDocs TermDocs()
-		{
-			EnsureOpen();
-			return new MultiTermDocs(this, subReaders, starts);
-		}
-		
-		public override TermPositions TermPositions()
-		{
-			EnsureOpen();
-			return new MultiTermPositions(this, subReaders, starts);
-		}
-		
-		/// <deprecated> 
-		/// </deprecated>
-        [Obsolete]
-		protected internal override void  DoCommit()
-		{
-			DoCommit(null);
-		}
+                }
+                else if (bytes != null)
+                {
+                    // cache hit
+                    Array.Copy(bytes, 0, result, offset, MaxDoc());
+                }
+                else
+                {
+                    for (int i = 0; i < subReaders.Length; i++)
+                    {
+                        // read from segments
+                        subReaders[i].Norms(field, result, offset + starts[i]);
+                    }
+                }
+            }
+        }
+        
+        protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+        {
+            lock (normsCache)
+            {
+                normsCache.Remove(field); // clear cache
+            }
+            int i = ReaderIndex(n); // find segment num
+            subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
+        }
+        
+        public override TermEnum Terms()
+        {
+            EnsureOpen();
+            return new MultiTermEnum(this, subReaders, starts, null);
+        }
+        
+        public override TermEnum Terms(Term term)
+        {
+            EnsureOpen();
+            return new MultiTermEnum(this, subReaders, starts, term);
+        }
+        
+        public override int DocFreq(Term t)
+        {
+            EnsureOpen();
+            int total = 0; // sum freqs in segments
+            for (int i = 0; i < subReaders.Length; i++)
+                total += subReaders[i].DocFreq(t);
+            return total;
+        }
+        
+        public override TermDocs TermDocs()
+        {
+            EnsureOpen();
+            return new MultiTermDocs(this, subReaders, starts);
+        }
+        
+        public override TermPositions TermPositions()
+        {
+            EnsureOpen();
+            return new MultiTermPositions(this, subReaders, starts);
+        }
 
         protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
-		{
-			for (int i = 0; i < subReaders.Length; i++)
-				subReaders[i].Commit(commitUserData);
-		}
-		
-		protected internal override void  DoClose()
-		{
-			lock (this)
-			{
-				for (int i = 0; i < subReaders.Length; i++)
-				{
-					if (decrefOnClose[i])
-					{
-						subReaders[i].DecRef();
-					}
-					else
-					{
-						subReaders[i].Close();
-					}
-				}
-			}
+        {
+            for (int i = 0; i < subReaders.Length; i++)
+                subReaders[i].Commit(commitUserData);
+        }
+        
+        protected internal override void  DoClose()
+        {
+            lock (this)
+            {
+                for (int i = 0; i < subReaders.Length; i++)
+                {
+                    if (decrefOnClose[i])
+                    {
+                        subReaders[i].DecRef();
+                    }
+                    else
+                    {
+                        subReaders[i].Close();
+                    }
+                }
+            }
 
             // NOTE: only needed in case someone had asked for
             // FieldCache for top-level reader (which is generally
             // not a good idea):
             Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
-		}
+        }
 
         public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
-		{
-			EnsureOpen();
-			return DirectoryReader.GetFieldNames(fieldNames, this.subReaders);
-		}
-		
-		/// <summary> Checks recursively if all subreaders are up to date. </summary>
-		public override bool IsCurrent()
-		{
-			for (int i = 0; i < subReaders.Length; i++)
-			{
-				if (!subReaders[i].IsCurrent())
-				{
-					return false;
-				}
-			}
-			
-			// all subreaders are up to date
-			return true;
-		}
-		
-		/// <summary>Not implemented.</summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		public override long GetVersion()
-		{
-			throw new System.NotSupportedException("MultiReader does not support this method.");
-		}
-		
-		public override IndexReader[] GetSequentialSubReaders()
-		{
-			return subReaders;
-		}
-	}
+        {
+            EnsureOpen();
+            return DirectoryReader.GetFieldNames(fieldNames, this.subReaders);
+        }
+        
+        /// <summary> Checks recursively if all subreaders are up to date. </summary>
+        public override bool IsCurrent()
+        {
+            for (int i = 0; i < subReaders.Length; i++)
+            {
+                if (!subReaders[i].IsCurrent())
+                {
+                    return false;
+                }
+            }
+            
+            // all subreaders are up to date
+            return true;
+        }
+        
+        /// <summary>Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public override long GetVersion()
+        {
+            throw new System.NotSupportedException("MultiReader does not support this method.");
+        }
+        
+        public override IndexReader[] GetSequentialSubReaders()
+        {
+            return subReaders;
+        }
+    }
 }
\ No newline at end of file

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultipleTermPositions.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultipleTermPositions.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultipleTermPositions.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/MultipleTermPositions.cs Wed Nov  9 21:03:47 2011
@@ -16,8 +16,7 @@
  */
 
 using System;
-
-using PriorityQueue = Lucene.Net.Util.PriorityQueue;
+using Lucene.Net.Util;
 
 namespace Lucene.Net.Index
 {
@@ -28,30 +27,25 @@ namespace Lucene.Net.Index
 	/// </summary>
 	public class MultipleTermPositions : TermPositions
 	{
-		
-		private sealed class TermPositionsQueue:PriorityQueue
+		private sealed class TermPositionsQueue : PriorityQueue<TermPositions>
 		{
-			internal TermPositionsQueue(System.Collections.IList termPositions)
+			internal TermPositionsQueue(System.Collections.Generic.IList<TermPositions> termPositions)
 			{
 				Initialize(termPositions.Count);
 				
-				System.Collections.IEnumerator i = termPositions.GetEnumerator();
-				while (i.MoveNext())
-				{
-					TermPositions tp = (TermPositions) i.Current;
+				foreach(TermPositions tp in termPositions)
 					if (tp.Next())
-						Put(tp);
-				}
+						Add(tp);
 			}
 			
 			internal TermPositions Peek()
 			{
-				return (TermPositions) Top();
+				return Top();
 			}
 			
-			public override bool LessThan(System.Object a, System.Object b)
+			public override bool LessThan(TermPositions a, TermPositions b)
 			{
-				return ((TermPositions) a).Doc() < ((TermPositions) b).Doc();
+				return a.Doc() < b.Doc();
 			}
 		}
 		
@@ -120,7 +114,10 @@ namespace Lucene.Net.Index
 		/// </exception>
 		public MultipleTermPositions(IndexReader indexReader, Term[] terms)
 		{
-			System.Collections.IList termPositions = new System.Collections.ArrayList();
+            // TODO: Java implementation uses a LinkedList, which has different performance costs
+            //       for methods, particularly inserts.  If inserts are done, it may be beneficial
+            //       from a performance point of view to implement Java's version of LinkedList<T>
+			var termPositions = new System.Collections.Generic.List<TermPositions>();
 			
 			for (int i = 0; i < terms.Length; i++)
 				termPositions.Add(indexReader.TermPositions(terms[i]));
@@ -146,7 +143,7 @@ namespace Lucene.Net.Index
 					_posList.add(tp.NextPosition());
 				
 				if (tp.Next())
-					_termPositionsQueue.AdjustTop();
+					_termPositionsQueue.UpdateTop();
 				else
 				{
 					_termPositionsQueue.Pop();
@@ -170,9 +167,9 @@ namespace Lucene.Net.Index
 		{
 			while (_termPositionsQueue.Peek() != null && target > _termPositionsQueue.Peek().Doc())
 			{
-				TermPositions tp = (TermPositions) _termPositionsQueue.Pop();
+				TermPositions tp = _termPositionsQueue.Pop();
 				if (tp.SkipTo(target))
-					_termPositionsQueue.Put(tp);
+					_termPositionsQueue.Add(tp);
 				else
 					tp.Close();
 			}
@@ -192,19 +189,19 @@ namespace Lucene.Net.Index
 		public void  Close()
 		{
 			while (_termPositionsQueue.Size() > 0)
-				((TermPositions) _termPositionsQueue.Pop()).Close();
+				_termPositionsQueue.Pop().Close();
 		}
 		
 		/// <summary> Not implemented.</summary>
 		/// <throws>  UnsupportedOperationException </throws>
-		public virtual void  Seek(Term arg0)
+		public virtual void Seek(Term arg0)
 		{
 			throw new System.NotSupportedException();
 		}
 		
 		/// <summary> Not implemented.</summary>
 		/// <throws>  UnsupportedOperationException </throws>
-		public virtual void  Seek(TermEnum termEnum)
+		public virtual void Seek(TermEnum termEnum)
 		{
 			throw new System.NotSupportedException();
 		}

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/NormsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/NormsWriter.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/NormsWriter.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/NormsWriter.cs Wed Nov  9 21:03:47 2011
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using System.Collections.Generic;
 using IndexOutput = Lucene.Net.Store.IndexOutput;
 using Similarity = Lucene.Net.Search.Similarity;
 
@@ -30,7 +30,7 @@ namespace Lucene.Net.Index
 	/// merges all of these together into a single _X.nrm file.
 	/// </summary>
 	
-	sealed class NormsWriter:InvertedDocEndConsumer
+	sealed class NormsWriter : InvertedDocEndConsumer
 	{
 		
 		private static readonly byte defaultNorm;
@@ -45,7 +45,7 @@ namespace Lucene.Net.Index
 		}
 		
 		// We only write the _X.nrm file at flush
-		internal void  Files(System.Collections.ICollection files)
+		internal void  Files(ICollection<string> files)
 		{
 		}
 		
@@ -57,35 +57,31 @@ namespace Lucene.Net.Index
 		/// <summary>Produce _X.nrm if any document had a field with norms
 		/// not disabled 
 		/// </summary>
-		public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+        public override void Flush(IDictionary<InvertedDocEndConsumerPerThread,ICollection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state)
 		{
-			
-			System.Collections.IDictionary byField = new System.Collections.Hashtable();
+
+            IDictionary<FieldInfo, IList<NormsWriterPerField>> byField = new SupportClass.HashMap<FieldInfo, IList<NormsWriterPerField>>();
 			
 			// Typically, each thread will have encountered the same
 			// field.  So first we collate by field, ie, all
 			// per-thread field instances that correspond to the
 			// same FieldInfo
-			System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
-			while (it.MoveNext())
+			foreach(var entry in threadsAndFields)
 			{
-				System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
-				
-				System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
-				System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
-                System.Collections.ArrayList fieldsToRemove = new System.Collections.ArrayList();
-				
+				ICollection<InvertedDocEndConsumerPerField> fields = entry.Value;
+				IEnumerator<InvertedDocEndConsumerPerField> fieldsIt = fields.GetEnumerator();
+			    var fieldsToRemove = new HashSet<NormsWriterPerField>();
 				while (fieldsIt.MoveNext())
 				{
-					NormsWriterPerField perField = (NormsWriterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
+					NormsWriterPerField perField = (NormsWriterPerField) fieldsIt.Current;
 					
 					if (perField.upto > 0)
 					{
 						// It has some norms
-						System.Collections.IList l = (System.Collections.IList) byField[perField.fieldInfo];
+						IList<NormsWriterPerField> l = byField[perField.fieldInfo];
 						if (l == null)
 						{
-							l = new System.Collections.ArrayList();
+							l = new List<NormsWriterPerField>();
 							byField[perField.fieldInfo] = l;
 						}
 						l.Add(perField);
@@ -97,16 +93,14 @@ namespace Lucene.Net.Index
                         fieldsToRemove.Add(perField);
 					}
 				}
-
-                System.Collections.Hashtable fieldsHT = (System.Collections.Hashtable)fields;
-                for (int i = 0; i < fieldsToRemove.Count; i++)
+                foreach (var field in fieldsToRemove)
                 {
-                    fieldsHT.Remove(fieldsToRemove[i]);    
+                    fields.Remove(field);
                 }
 			}
 			
 			System.String normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
-			state.flushedFiles[normsFileName] = normsFileName;
+			state.flushedFiles.Add(normsFileName);
 			IndexOutput normsOut = state.directory.CreateOutput(normsFileName);
 			
 			try
@@ -122,7 +116,7 @@ namespace Lucene.Net.Index
 					
 					FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
 					
-					System.Collections.IList toMerge = (System.Collections.IList) byField[fieldInfo];
+					IList<NormsWriterPerField> toMerge = byField[fieldInfo];
 					int upto = 0;
 					if (toMerge != null)
 					{
@@ -135,7 +129,7 @@ namespace Lucene.Net.Index
 						int[] uptos = new int[numFields];
 						
 						for (int j = 0; j < numFields; j++)
-							fields[j] = (NormsWriterPerField) toMerge[j];
+							fields[j] = toMerge[j];
 						
 						int numLeft = numFields;
 						

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ParallelReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ParallelReader.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ParallelReader.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ParallelReader.cs Wed Nov  9 21:03:47 2011
@@ -24,8 +24,6 @@ using Fieldable = Lucene.Net.Documents.F
 
 namespace Lucene.Net.Index
 {
-	
-	
 	/// <summary>An IndexReader which reads multiple, parallel indexes.  Each index added
 	/// must have the same number of documents, but typically each contains
 	/// different fields.  Each document contains the union of the fields of all
@@ -503,14 +501,6 @@ namespace Lucene.Net.Index
 		{
 			return (IndexReader[]) readers.ToArray(typeof(IndexReader));
 		}
-		
-		/// <deprecated> 
-		/// </deprecated>
-        [Obsolete]
-		protected internal override void  DoCommit()
-		{
-			DoCommit(null);
-		}
 
         protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
 		{

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ReadOnlyDirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ReadOnlyDirectoryReader.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ReadOnlyDirectoryReader.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ReadOnlyDirectoryReader.cs Wed Nov  9 21:03:47 2011
@@ -27,12 +27,13 @@ namespace Lucene.Net.Index
 		internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor):base(directory, sis, deletionPolicy, true, termInfosIndexDivisor)
 		{
 		}
-		
-		internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.IDictionary oldNormsCache, bool doClone, int termInfosIndexDivisor):base(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor)
-		{
-		}
-		
-		internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor)
+
+        internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.Generic.IDictionary<string, byte[]> oldNormsCache, bool doClone, int termInfosIndexDivisor)
+            : base(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor)
+        {
+        }
+
+	    internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor)
 		{
 		}
 		

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfo.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfo.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfo.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfo.cs Wed Nov  9 21:03:47 2011
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using System.Collections.Generic;
 using Directory = Lucene.Net.Store.Directory;
 using IndexInput = Lucene.Net.Store.IndexInput;
 using IndexOutput = Lucene.Net.Store.IndexOutput;
@@ -34,59 +34,59 @@ namespace Lucene.Net.Index
 	public sealed class SegmentInfo : System.ICloneable
 	{
 		
-		internal const int NO = - 1; // e.g. no norms; no deletes;
-		internal const int YES = 1; // e.g. have norms; have deletes;
-		internal const int CHECK_DIR = 0; // e.g. must check dir to see if there are norms/deletions
-		internal const int WITHOUT_GEN = 0; // a file name that has no GEN in it. 
-		
-		public System.String name; // unique name in dir
-		public int docCount; // number of docs in seg
-		public Directory dir; // where segment resides
-		
-		private bool preLockless; // true if this is a segments file written before
-		// lock-less commits (2.1)
-		
-		private long delGen; // current generation of del file; NO if there
-		// are no deletes; CHECK_DIR if it's a pre-2.1 segment
-		// (and we must check filesystem); YES or higher if
-		// there are deletes at generation N
-		
-		private long[] normGen; // current generation of each field's norm file.
-		// If this array is null, for lockLess this means no 
-		// separate norms.  For preLockLess this means we must 
-		// check filesystem. If this array is not null, its 
-		// values mean: NO says this field has no separate  
-		// norms; CHECK_DIR says it is a preLockLess segment and    
-		// filesystem must be checked; >= YES says this field  
-		// has separate norms with the specified generation
-		
-		private sbyte isCompoundFile; // NO if it is not; YES if it is; CHECK_DIR if it's
-		// pre-2.1 (ie, must check file system to see
-		// if <name>.cfs and <name>.nrm exist)         
-		
-		private bool hasSingleNormFile; // true if this segment maintains norms in a single file; 
-		// false otherwise
-		// this is currently false for segments populated by DocumentWriter
-		// and true for newly created merged segments (both
-		// compound and non compound).
-		
-		private System.Collections.Generic.IList<string> files; // cached list of files that this segment uses
-		// in the Directory
-		
-		internal long sizeInBytes = - 1; // total byte size of all of our files (computed on demand)
-		
-		private int docStoreOffset; // if this segment shares stored fields & vectors, this
-		// offset is where in that file this segment's docs begin
-		private System.String docStoreSegment; // name used to derive fields/vectors file we share with
-		// other segments
-		private bool docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx)
+		internal const int NO = - 1;            // e.g. no norms; no deletes;
+		internal const int YES = 1;             // e.g. have norms; have deletes;
+		internal const int CHECK_DIR = 0;       // e.g. must check dir to see if there are norms/deletions
+		internal const int WITHOUT_GEN = 0;     // a file name that has no GEN in it. 
+		
+		public System.String name;              // unique name in dir
+		public int docCount;                    // number of docs in seg
+		public Directory dir;                   // where segment resides
+		
+		private bool preLockless;               // true if this is a segments file written before
+		                                        // lock-less commits (2.1)
+		
+		private long delGen;                    // current generation of del file; NO if there
+		                                        // are no deletes; CHECK_DIR if it's a pre-2.1 segment
+		                                        // (and we must check filesystem); YES or higher if
+		                                        // there are deletes at generation N
+		
+		private long[] normGen;                 // current generation of each field's norm file.
+		                                        // If this array is null, for lockLess this means no 
+		                                        // separate norms.  For preLockLess this means we must 
+		                                        // check filesystem. If this array is not null, its 
+		                                        // values mean: NO says this field has no separate  
+		                                        // norms; CHECK_DIR says it is a preLockLess segment and    
+		                                        // filesystem must be checked; >= YES says this field  
+		                                        // has separate norms with the specified generation
+		
+		private sbyte isCompoundFile;           // NO if it is not; YES if it is; CHECK_DIR if it's
+		                                        // pre-2.1 (ie, must check file system to see
+		                                        // if <name>.cfs and <name>.nrm exist)         
+		
+		private bool hasSingleNormFile;         // true if this segment maintains norms in a single file; 
+		                                        // false otherwise
+		                                        // this is currently false for segments populated by DocumentWriter
+		                                        // and true for newly created merged segments (both
+		                                        // compound and non compound).
+		
+		private IList<string> files;            // cached list of files that this segment uses
+		                                        // in the Directory
+		
+		internal long sizeInBytes = - 1;        // total byte size of all of our files (computed on demand)
+		
+		private int docStoreOffset;             // if this segment shares stored fields & vectors, this
+		                                        // offset is where in that file this segment's docs begin
+		private System.String docStoreSegment;  // name used to derive fields/vectors file we share with
+		                                        // other segments
+		private bool docStoreIsCompoundFile;    // whether doc store files are stored in compound file (*.cfx)
 		
-		private int delCount; // How many deleted docs in this segment, or -1 if not yet known
-		// (if it's an older index)
+		private int delCount;                   // How many deleted docs in this segment, or -1 if not yet known
+		                                        // (if it's an older index)
 		
-		private bool hasProx; // True if this segment has any fields with omitTermFreqAndPositions==false
+		private bool hasProx;                   // True if this segment has any fields with omitTermFreqAndPositions==false
 
-        private System.Collections.Generic.IDictionary<string, string> diagnostics;
+        private IDictionary<string, string> diagnostics;
 		
 		public override System.String ToString()
 		{
@@ -151,14 +151,12 @@ namespace Lucene.Net.Index
 			delCount = src.delCount;
 		}
 		
-		// must be Map<String, String>
-        internal void SetDiagnostics(System.Collections.Generic.IDictionary<string, string> diagnostics)
+        internal void SetDiagnostics(IDictionary<string, string> diagnostics)
 		{
 			this.diagnostics = diagnostics;
 		}
 		
-		// returns Map<String, String>
-        public System.Collections.Generic.IDictionary<string, string> GetDiagnostics()
+        public IDictionary<string, string> GetDiagnostics()
 		{
 			return diagnostics;
 		}
@@ -242,7 +240,7 @@ namespace Lucene.Net.Index
 				}
 				else
 				{
-					diagnostics = new System.Collections.Generic.Dictionary<string,string>();
+					diagnostics = new Dictionary<string,string>();
 				}
 			}
 			else
@@ -257,7 +255,7 @@ namespace Lucene.Net.Index
 				docStoreSegment = null;
 				delCount = - 1;
 				hasProx = true;
-				diagnostics = new System.Collections.Generic.Dictionary<string,string>();
+				diagnostics = new Dictionary<string,string>();
 			}
 		}
 		
@@ -294,12 +292,12 @@ namespace Lucene.Net.Index
 		{
 			if (sizeInBytes == - 1)
 			{
-				System.Collections.Generic.IList<string> files = Files();
+				IList<string> files = Files();
 				int size = files.Count;
 				sizeInBytes = 0;
 				for (int i = 0; i < size; i++)
 				{
-					System.String fileName = (System.String) files[i];
+					System.String fileName = files[i];
 					// We don't count bytes used by a shared doc store
 					// against this segment:
 					if (docStoreOffset == - 1 || !IndexFileNames.IsDocStoreFile(fileName))
@@ -368,6 +366,7 @@ namespace Lucene.Net.Index
 			si.hasProx = hasProx;
 			si.preLockless = preLockless;
 			si.hasSingleNormFile = hasSingleNormFile;
+		    si.diagnostics = new SupportClass.HashMap<string, string>(this.diagnostics);
             if (this.diagnostics != null)
             {
                 si.diagnostics = new System.Collections.Generic.Dictionary<string, string>();
@@ -450,18 +449,20 @@ namespace Lucene.Net.Index
 					// This means this segment was saved with pre-LOCKLESS
 					// code.  So we must fallback to the original
 					// directory list check:
-					System.String[] result = dir.List();
+					System.String[] result = dir.ListAll();
 					if (result == null)
 					{
-						throw new System.IO.IOException("cannot read directory " + dir + ": list() returned null");
+                        throw new System.IO.IOException("cannot read directory " + dir + ": ListAll() returned null");
 					}
-					
+
+				    IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
 					System.String pattern;
 					pattern = name + ".s";
 					int patternLength = pattern.Length;
 					for (int i = 0; i < result.Length; i++)
 					{
-						if (result[i].StartsWith(pattern) && System.Char.IsDigit(result[i][patternLength]))
+					    string fileName = result[i];
+						if (filter.Accept(null, fileName) && fileName.StartsWith(pattern) && char.IsDigit(fileName[patternLength]))
 							return true;
 					}
 					return false;
@@ -690,7 +691,7 @@ namespace Lucene.Net.Index
 			return hasProx;
 		}
 		
-		private void  AddIfExists(System.Collections.Generic.IList<string> files, System.String fileName)
+		private void  AddIfExists(IList<string> files, System.String fileName)
 		{
 			if (dir.FileExists(fileName))
 				files.Add(fileName);
@@ -702,7 +703,7 @@ namespace Lucene.Net.Index
 		* modify it.
 		*/
 		
-		public System.Collections.Generic.IList<string> Files()
+		public IList<string> Files()
 		{
 			
 			if (files != null)
@@ -711,7 +712,7 @@ namespace Lucene.Net.Index
 				return files;
 			}
 
-            System.Collections.Generic.List<string> fileList = new System.Collections.Generic.List<string>();
+            var fileList = new System.Collections.Generic.List<string>();
 			
 			bool useCompoundFile = GetUseCompoundFile();
 			
@@ -865,16 +866,14 @@ namespace Lucene.Net.Index
 		/// </summary>
 		public  override bool Equals(System.Object obj)
 		{
-			SegmentInfo other;
-			try
-			{
-				other = (SegmentInfo) obj;
-			}
-			catch (System.InvalidCastException cce)
-			{
-				return false;
-			}
-			return other.dir == dir && other.name.Equals(name);
+            if (this == obj) return true;
+
+            if (obj is SegmentInfo)
+            {
+                SegmentInfo other = (SegmentInfo) obj;
+                return other.dir == dir && other.name.Equals(name);
+            }
+		    return false;
 		}
 		
 		public override int GetHashCode()

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfos.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfos.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfos.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentInfos.cs Wed Nov  9 21:03:47 2011
@@ -34,7 +34,7 @@ namespace Lucene.Net.Index
 	/// (subject to change suddenly in the next release)<p/>
 	/// </summary>
 	[Serializable]
-	public sealed class SegmentInfos:System.Collections.ArrayList
+	public sealed class SegmentInfos : System.Collections.ArrayList
 	{
 		private class AnonymousClassFindSegmentsFile:FindSegmentsFile
 		{
@@ -314,6 +314,9 @@ namespace Lucene.Net.Index
 					}
 					else if (0 != input.ReadByte())
 					{
+                        // TODO: In java, this is a read-only dictionary, probably for a reason, though
+                        //       I can't see immediately why.  We'd have to roll our own, I can't find
+                        //       and equivelant in the BCL
                         userData = new System.Collections.Generic.Dictionary<string,string>();
 						userData.Add("userData", input.ReadString());
 					}
@@ -443,7 +446,7 @@ namespace Lucene.Net.Index
             sis.generation = this.generation;
             sis.lastGeneration = this.lastGeneration;
             // sis.pendingSegnOutput = this.pendingSegnOutput; // {{Aroush-2.9}} needed?
-            sis.userData = new System.Collections.Generic.Dictionary<string, string>(userData);
+            sis.userData = new SupportClass.HashMap<string, string>(userData);
             sis.version = this.version;
             return sis;
 		}
@@ -701,10 +704,11 @@ namespace Lucene.Net.Index
 							}
 							catch (System.Threading.ThreadInterruptedException ie)
 							{
-								// In 3.0 we will change this to throw
-								// InterruptedException instead
-								SupportClass.ThreadClass.Current().Interrupt();
-								throw new System.SystemException(ie.Message, ie);
+                                //// In 3.0 we will change this to throw
+                                //// InterruptedException instead
+                                //SupportClass.ThreadClass.Current().Interrupt();
+                                //throw new System.SystemException(ie.Message, ie);
+							    throw;
 							}
 						}
 						
@@ -718,17 +722,7 @@ namespace Lucene.Net.Index
 						
 						if (gen == - 1)
 						{
-							// Neither approach found a generation
-							System.String s;
-							if (files != null)
-							{
-								s = "";
-								for (int i = 0; i < files.Length; i++)
-									s += (" " + files[i]);
-							}
-							else
-								s = " null";
-							throw new System.IO.FileNotFoundException("no segments* file found in " + directory + ": files:" + s);
+							throw new System.IO.FileNotFoundException("no segments* file found in " + directory + ": files:" + string.Join(" ", files));
 						}
 					}
 					
@@ -913,11 +907,10 @@ namespace Lucene.Net.Index
 		/// </summary>
         public System.Collections.Generic.ICollection<string> Files(Directory dir, bool includeSegmentsFile)
 		{
-            System.Collections.Generic.Dictionary<string, string> files = new System.Collections.Generic.Dictionary<string, string>();
+            System.Collections.Generic.HashSet<string> files = new System.Collections.Generic.HashSet<string>();
 			if (includeSegmentsFile)
 			{
-                string tmp = GetCurrentSegmentFileName();
-                files.Add(tmp, tmp);
+                files.Add(GetCurrentSegmentFileName());
 			}
 			int size = Count;
 			for (int i = 0; i < size; i++)
@@ -925,10 +918,10 @@ namespace Lucene.Net.Index
 				SegmentInfo info = Info(i);
 				if (info.dir == dir)
 				{
-					SupportClass.CollectionsHelper.AddAllIfNotContains(files, Info(i).Files());
+                    files.UnionWith(Info(i).Files());
 				}
 			}
-			return files.Keys;
+			return files;
 		}
 		
 		internal void  FinishCommit(Directory dir)

Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentMergeQueue.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentMergeQueue.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentMergeQueue.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/SegmentMergeQueue.cs Wed Nov  9 21:03:47 2011
@@ -16,23 +16,20 @@
  */
 
 using System;
-
-using PriorityQueue = Lucene.Net.Util.PriorityQueue;
+using Lucene.Net.Util;
 
 namespace Lucene.Net.Index
 {
 	
-	sealed class SegmentMergeQueue:PriorityQueue
+	sealed class SegmentMergeQueue:PriorityQueue<SegmentMergeInfo>
 	{
 		internal SegmentMergeQueue(int size)
 		{
 			Initialize(size);
 		}
-		
-		public override bool LessThan(System.Object a, System.Object b)
+
+        public override bool LessThan(SegmentMergeInfo stiA, SegmentMergeInfo stiB)
 		{
-			SegmentMergeInfo stiA = (SegmentMergeInfo) a;
-			SegmentMergeInfo stiB = (SegmentMergeInfo) b;
 			int comparison = stiA.term.CompareTo(stiB.term);
 			if (comparison == 0)
 				return stiA.base_Renamed < stiB.base_Renamed;
@@ -43,7 +40,7 @@ namespace Lucene.Net.Index
 		internal void  Close()
 		{
 			while (Top() != null)
-				((SegmentMergeInfo) Pop()).Close();
+				Pop().Close();
 		}
 	}
 }
\ No newline at end of file