You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2013/04/03 19:39:44 UTC

[01/51] [partial] Mass convert mixed tabs to spaces

Updated Branches:
  refs/heads/master 1d08baedd -> 62f018abd


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentTermDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentTermDocs.cs b/src/core/Index/SegmentTermDocs.cs
index f7efde6..1f482d9 100644
--- a/src/core/Index/SegmentTermDocs.cs
+++ b/src/core/Index/SegmentTermDocs.cs
@@ -22,94 +22,94 @@ using BitVector = Lucene.Net.Util.BitVector;
 
 namespace Lucene.Net.Index
 {
-	
-	internal class SegmentTermDocs : TermDocs
-	{
-		protected internal SegmentReader parent;
-		protected internal IndexInput freqStream;
-		protected internal int count;
-		protected internal int df;
-		protected internal BitVector deletedDocs;
-		internal int doc = 0;
-		internal int freq;
-		
-		private readonly int skipInterval;
-		private readonly int maxSkipLevels;
-		private DefaultSkipListReader skipListReader;
-		
-		private long freqBasePointer;
-		private long proxBasePointer;
-		
-		private long skipPointer;
-		private bool haveSkipped;
-		
-		protected internal bool currentFieldStoresPayloads;
-		protected internal bool currentFieldOmitTermFreqAndPositions;
+    
+    internal class SegmentTermDocs : TermDocs
+    {
+        protected internal SegmentReader parent;
+        protected internal IndexInput freqStream;
+        protected internal int count;
+        protected internal int df;
+        protected internal BitVector deletedDocs;
+        internal int doc = 0;
+        internal int freq;
+        
+        private readonly int skipInterval;
+        private readonly int maxSkipLevels;
+        private DefaultSkipListReader skipListReader;
+        
+        private long freqBasePointer;
+        private long proxBasePointer;
+        
+        private long skipPointer;
+        private bool haveSkipped;
+        
+        protected internal bool currentFieldStoresPayloads;
+        protected internal bool currentFieldOmitTermFreqAndPositions;
 
-	    private bool isDisposed;
-		
-		public /*protected internal*/ SegmentTermDocs(SegmentReader parent)
-		{
-			this.parent = parent;
-			this.freqStream = (IndexInput) parent.core.freqStream.Clone();
-			lock (parent)
-			{
-				this.deletedDocs = parent.deletedDocs;
-			}
-			this.skipInterval = parent.core.GetTermsReader().SkipInterval;
-			this.maxSkipLevels = parent.core.GetTermsReader().MaxSkipLevels;
-		}
-		
-		public virtual void  Seek(Term term)
-		{
-			TermInfo ti = parent.core.GetTermsReader().Get(term);
-			Seek(ti, term);
-		}
-		
-		public virtual void  Seek(TermEnum termEnum)
-		{
-			TermInfo ti;
-			Term term;
-			
-			// use comparison of fieldinfos to verify that termEnum belongs to the same segment as this SegmentTermDocs
-			if (termEnum is SegmentTermEnum && ((SegmentTermEnum) termEnum).fieldInfos == parent.core.fieldInfos)
-			{
-				// optimized case
-				var segmentTermEnum = ((SegmentTermEnum) termEnum);
-				term = segmentTermEnum.Term;
-				ti = segmentTermEnum.TermInfo();
-			}
-			else
-			{
-				// punt case
-				term = termEnum.Term;
-				ti = parent.core.GetTermsReader().Get(term);
-			}
-			
-			Seek(ti, term);
-		}
-		
-		internal virtual void  Seek(TermInfo ti, Term term)
-		{
-			count = 0;
-			FieldInfo fi = parent.core.fieldInfos.FieldInfo(term.Field);
-			currentFieldOmitTermFreqAndPositions = (fi != null) && fi.omitTermFreqAndPositions;
-			currentFieldStoresPayloads = (fi != null) && fi.storePayloads;
-			if (ti == null)
-			{
-				df = 0;
-			}
-			else
-			{
-				df = ti.docFreq;
-				doc = 0;
-				freqBasePointer = ti.freqPointer;
-				proxBasePointer = ti.proxPointer;
-				skipPointer = freqBasePointer + ti.skipOffset;
-				freqStream.Seek(freqBasePointer);
-				haveSkipped = false;
-			}
-		}
+        private bool isDisposed;
+        
+        public /*protected internal*/ SegmentTermDocs(SegmentReader parent)
+        {
+            this.parent = parent;
+            this.freqStream = (IndexInput) parent.core.freqStream.Clone();
+            lock (parent)
+            {
+                this.deletedDocs = parent.deletedDocs;
+            }
+            this.skipInterval = parent.core.GetTermsReader().SkipInterval;
+            this.maxSkipLevels = parent.core.GetTermsReader().MaxSkipLevels;
+        }
+        
+        public virtual void  Seek(Term term)
+        {
+            TermInfo ti = parent.core.GetTermsReader().Get(term);
+            Seek(ti, term);
+        }
+        
+        public virtual void  Seek(TermEnum termEnum)
+        {
+            TermInfo ti;
+            Term term;
+            
+            // use comparison of fieldinfos to verify that termEnum belongs to the same segment as this SegmentTermDocs
+            if (termEnum is SegmentTermEnum && ((SegmentTermEnum) termEnum).fieldInfos == parent.core.fieldInfos)
+            {
+                // optimized case
+                var segmentTermEnum = ((SegmentTermEnum) termEnum);
+                term = segmentTermEnum.Term;
+                ti = segmentTermEnum.TermInfo();
+            }
+            else
+            {
+                // punt case
+                term = termEnum.Term;
+                ti = parent.core.GetTermsReader().Get(term);
+            }
+            
+            Seek(ti, term);
+        }
+        
+        internal virtual void  Seek(TermInfo ti, Term term)
+        {
+            count = 0;
+            FieldInfo fi = parent.core.fieldInfos.FieldInfo(term.Field);
+            currentFieldOmitTermFreqAndPositions = (fi != null) && fi.omitTermFreqAndPositions;
+            currentFieldStoresPayloads = (fi != null) && fi.storePayloads;
+            if (ti == null)
+            {
+                df = 0;
+            }
+            else
+            {
+                df = ti.docFreq;
+                doc = 0;
+                freqBasePointer = ti.freqPointer;
+                proxBasePointer = ti.proxPointer;
+                skipPointer = freqBasePointer + ti.skipOffset;
+                freqStream.Seek(freqBasePointer);
+                haveSkipped = false;
+            }
+        }
 
         public void Dispose()
         {
@@ -133,150 +133,150 @@ namespace Lucene.Net.Index
             isDisposed = true;
         }
 
-	    public int Doc
-	    {
-	        get { return doc; }
-	    }
+        public int Doc
+        {
+            get { return doc; }
+        }
 
-	    public int Freq
-	    {
-	        get { return freq; }
-	    }
+        public int Freq
+        {
+            get { return freq; }
+        }
 
-	    protected internal virtual void  SkippingDoc()
-		{
-		}
-		
-		public virtual bool Next()
-		{
-			while (true)
-			{
-				if (count == df)
-					return false;
-				int docCode = freqStream.ReadVInt();
-				
-				if (currentFieldOmitTermFreqAndPositions)
-				{
-					doc += docCode;
-					freq = 1;
-				}
-				else
-				{
-					doc += Number.URShift(docCode, 1); // shift off low bit
-					if ((docCode & 1) != 0)
-					// if low bit is set
-						freq = 1;
-					// freq is one
-					else
-						freq = freqStream.ReadVInt(); // else read freq
-				}
-				
-				count++;
-				
-				if (deletedDocs == null || !deletedDocs.Get(doc))
-					break;
-				SkippingDoc();
-			}
-			return true;
-		}
-		
-		/// <summary>Optimized implementation. </summary>
-		public virtual int Read(int[] docs, int[] freqs)
-		{
-			int length = docs.Length;
-			if (currentFieldOmitTermFreqAndPositions)
-			{
-				return ReadNoTf(docs, freqs, length);
-			}
-			else
-			{
-				int i = 0;
-				while (i < length && count < df)
-				{
-					// manually inlined call to next() for speed
-					int docCode = freqStream.ReadVInt();
-					doc += Number.URShift(docCode, 1); // shift off low bit
-					if ((docCode & 1) != 0)
-					// if low bit is set
-						freq = 1;
-					// freq is one
-					else
-						freq = freqStream.ReadVInt(); // else read freq
-					count++;
-					
-					if (deletedDocs == null || !deletedDocs.Get(doc))
-					{
-						docs[i] = doc;
-						freqs[i] = freq;
-						++i;
-					}
-				}
-				return i;
-			}
-		}
-		
-		private int ReadNoTf(int[] docs, int[] freqs, int length)
-		{
-			int i = 0;
-			while (i < length && count < df)
-			{
-				// manually inlined call to next() for speed
-				doc += freqStream.ReadVInt();
-				count++;
-				
-				if (deletedDocs == null || !deletedDocs.Get(doc))
-				{
-					docs[i] = doc;
-					// Hardware freq to 1 when term freqs were not
-					// stored in the index
-					freqs[i] = 1;
-					++i;
-				}
-			}
-			return i;
-		}
-		
-		
-		/// <summary>Overridden by SegmentTermPositions to skip in prox stream. </summary>
-		protected internal virtual void  SkipProx(long proxPointer, int payloadLength)
-		{
-		}
-		
-		/// <summary>Optimized implementation. </summary>
-		public virtual bool SkipTo(int target)
-		{
-			if (df >= skipInterval)
-			{
-				// optimized case
-				if (skipListReader == null)
-					skipListReader = new DefaultSkipListReader((IndexInput) freqStream.Clone(), maxSkipLevels, skipInterval); // lazily clone
-				
-				if (!haveSkipped)
-				{
-					// lazily initialize skip stream
-					skipListReader.Init(skipPointer, freqBasePointer, proxBasePointer, df, currentFieldStoresPayloads);
-					haveSkipped = true;
-				}
-				
-				int newCount = skipListReader.SkipTo(target);
-				if (newCount > count)
-				{
-					freqStream.Seek(skipListReader.GetFreqPointer());
-					SkipProx(skipListReader.GetProxPointer(), skipListReader.GetPayloadLength());
-					
-					doc = skipListReader.GetDoc();
-					count = newCount;
-				}
-			}
-			
-			// done skipping, now just scan
-			do 
-			{
-				if (!Next())
-					return false;
-			}
-			while (target > doc);
-			return true;
-		}
+        protected internal virtual void  SkippingDoc()
+        {
+        }
+        
+        public virtual bool Next()
+        {
+            while (true)
+            {
+                if (count == df)
+                    return false;
+                int docCode = freqStream.ReadVInt();
+                
+                if (currentFieldOmitTermFreqAndPositions)
+                {
+                    doc += docCode;
+                    freq = 1;
+                }
+                else
+                {
+                    doc += Number.URShift(docCode, 1); // shift off low bit
+                    if ((docCode & 1) != 0)
+                    // if low bit is set
+                        freq = 1;
+                    // freq is one
+                    else
+                        freq = freqStream.ReadVInt(); // else read freq
+                }
+                
+                count++;
+                
+                if (deletedDocs == null || !deletedDocs.Get(doc))
+                    break;
+                SkippingDoc();
+            }
+            return true;
+        }
+        
+        /// <summary>Optimized implementation. </summary>
+        public virtual int Read(int[] docs, int[] freqs)
+        {
+            int length = docs.Length;
+            if (currentFieldOmitTermFreqAndPositions)
+            {
+                return ReadNoTf(docs, freqs, length);
+            }
+            else
+            {
+                int i = 0;
+                while (i < length && count < df)
+                {
+                    // manually inlined call to next() for speed
+                    int docCode = freqStream.ReadVInt();
+                    doc += Number.URShift(docCode, 1); // shift off low bit
+                    if ((docCode & 1) != 0)
+                    // if low bit is set
+                        freq = 1;
+                    // freq is one
+                    else
+                        freq = freqStream.ReadVInt(); // else read freq
+                    count++;
+                    
+                    if (deletedDocs == null || !deletedDocs.Get(doc))
+                    {
+                        docs[i] = doc;
+                        freqs[i] = freq;
+                        ++i;
+                    }
+                }
+                return i;
+            }
+        }
+        
+        private int ReadNoTf(int[] docs, int[] freqs, int length)
+        {
+            int i = 0;
+            while (i < length && count < df)
+            {
+                // manually inlined call to next() for speed
+                doc += freqStream.ReadVInt();
+                count++;
+                
+                if (deletedDocs == null || !deletedDocs.Get(doc))
+                {
+                    docs[i] = doc;
+                    // Hardware freq to 1 when term freqs were not
+                    // stored in the index
+                    freqs[i] = 1;
+                    ++i;
+                }
+            }
+            return i;
+        }
+        
+        
+        /// <summary>Overridden by SegmentTermPositions to skip in prox stream. </summary>
+        protected internal virtual void  SkipProx(long proxPointer, int payloadLength)
+        {
+        }
+        
+        /// <summary>Optimized implementation. </summary>
+        public virtual bool SkipTo(int target)
+        {
+            if (df >= skipInterval)
+            {
+                // optimized case
+                if (skipListReader == null)
+                    skipListReader = new DefaultSkipListReader((IndexInput) freqStream.Clone(), maxSkipLevels, skipInterval); // lazily clone
+                
+                if (!haveSkipped)
+                {
+                    // lazily initialize skip stream
+                    skipListReader.Init(skipPointer, freqBasePointer, proxBasePointer, df, currentFieldStoresPayloads);
+                    haveSkipped = true;
+                }
+                
+                int newCount = skipListReader.SkipTo(target);
+                if (newCount > count)
+                {
+                    freqStream.Seek(skipListReader.GetFreqPointer());
+                    SkipProx(skipListReader.GetProxPointer(), skipListReader.GetPayloadLength());
+                    
+                    doc = skipListReader.GetDoc();
+                    count = newCount;
+                }
+            }
+            
+            // done skipping, now just scan
+            do 
+            {
+                if (!Next())
+                    return false;
+            }
+            while (target > doc);
+            return true;
+        }
     }
 }
\ No newline at end of file


[04/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfos.cs b/src/core/Index/SegmentInfos.cs
index ca5297e..798b7d9 100644
--- a/src/core/Index/SegmentInfos.cs
+++ b/src/core/Index/SegmentInfos.cs
@@ -28,417 +28,417 @@ using NoSuchDirectoryException = Lucene.Net.Store.NoSuchDirectoryException;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> A collection of segmentInfo objects with methods for operating on
-	/// those segments in relation to the file system.
-	/// 
-	/// <p/><b>NOTE:</b> This API is new and still experimental
-	/// (subject to change suddenly in the next release)<p/>
-	/// </summary>
-	[Serializable]
-	public sealed class SegmentInfos : List<SegmentInfo>, ICloneable
-	{
-		private class AnonymousClassFindSegmentsFile:FindSegmentsFile
-		{
-			private void  InitBlock(SegmentInfos enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private SegmentInfos enclosingInstance;
-			public SegmentInfos Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal AnonymousClassFindSegmentsFile(SegmentInfos enclosingInstance, Lucene.Net.Store.Directory Param1):base(Param1)
-			{
-				InitBlock(enclosingInstance);
-			}
-			
-			public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
-			{
-				Enclosing_Instance.Read(directory, segmentFileName);
-				return null;
-			}
-		}
-		/// <summary>The file format version, a negative number. </summary>
-		/* Works since counter, the old 1st entry, is always >= 0 */
-		public const int FORMAT = - 1;
-		
-		/// <summary>This format adds details used for lockless commits.  It differs
-		/// slightly from the previous format in that file names
-		/// are never re-used (write once).  Instead, each file is
-		/// written to the next generation.  For example,
-		/// segments_1, segments_2, etc.  This allows us to not use
-		/// a commit lock.  See <a
-		/// href="http://lucene.apache.org/java/docs/fileformats.html">file
-		/// formats</a> for details.
-		/// </summary>
-		public const int FORMAT_LOCKLESS = - 2;
-		
-		/// <summary>This format adds a "hasSingleNormFile" flag into each segment info.
-		/// See <a href="http://issues.apache.org/jira/browse/LUCENE-756">LUCENE-756</a>
-		/// for details.
-		/// </summary>
-		public const int FORMAT_SINGLE_NORM_FILE = - 3;
-		
-		/// <summary>This format allows multiple segments to share a single
-		/// vectors and stored fields file. 
-		/// </summary>
-		public const int FORMAT_SHARED_DOC_STORE = - 4;
-		
-		/// <summary>This format adds a checksum at the end of the file to
-		/// ensure all bytes were successfully written. 
-		/// </summary>
-		public const int FORMAT_CHECKSUM = - 5;
-		
-		/// <summary>This format adds the deletion count for each segment.
-		/// This way IndexWriter can efficiently report numDocs(). 
-		/// </summary>
-		public const int FORMAT_DEL_COUNT = - 6;
-		
-		/// <summary>This format adds the boolean hasProx to record if any
-		/// fields in the segment store prox information (ie, have
-		/// omitTermFreqAndPositions==false) 
-		/// </summary>
-		public const int FORMAT_HAS_PROX = - 7;
-		
-		/// <summary>This format adds optional commit userData (String) storage. </summary>
-		public const int FORMAT_USER_DATA = - 8;
-		
-		/// <summary>This format adds optional per-segment String
-		/// dianostics storage, and switches userData to Map 
-		/// </summary>
-		public const int FORMAT_DIAGNOSTICS = - 9;
-		
-		/* This must always point to the most recent file format. */
-		internal static readonly int CURRENT_FORMAT = FORMAT_DIAGNOSTICS;
-		
-		public int counter = 0; // used to name new segments
-		/// <summary> counts how often the index has been changed by adding or deleting docs.
-		/// starting with the current time in milliseconds forces to create unique version numbers.
-		/// </summary>
-		private long version = (DateTime.UtcNow.Ticks / TimeSpan.TicksPerMillisecond);
-		
-		private long generation = 0; // generation of the "segments_N" for the next commit
-		private long lastGeneration = 0; // generation of the "segments_N" file we last successfully read
-		// or wrote; this is normally the same as generation except if
-		// there was an IOException that had interrupted a commit
+    
+    /// <summary> A collection of segmentInfo objects with methods for operating on
+    /// those segments in relation to the file system.
+    /// 
+    /// <p/><b>NOTE:</b> This API is new and still experimental
+    /// (subject to change suddenly in the next release)<p/>
+    /// </summary>
+    [Serializable]
+    public sealed class SegmentInfos : List<SegmentInfo>, ICloneable
+    {
+        private class AnonymousClassFindSegmentsFile:FindSegmentsFile
+        {
+            private void  InitBlock(SegmentInfos enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private SegmentInfos enclosingInstance;
+            public SegmentInfos Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal AnonymousClassFindSegmentsFile(SegmentInfos enclosingInstance, Lucene.Net.Store.Directory Param1):base(Param1)
+            {
+                InitBlock(enclosingInstance);
+            }
+            
+            public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
+            {
+                Enclosing_Instance.Read(directory, segmentFileName);
+                return null;
+            }
+        }
+        /// <summary>The file format version, a negative number. </summary>
+        /* Works since counter, the old 1st entry, is always >= 0 */
+        public const int FORMAT = - 1;
+        
+        /// <summary>This format adds details used for lockless commits.  It differs
+        /// slightly from the previous format in that file names
+        /// are never re-used (write once).  Instead, each file is
+        /// written to the next generation.  For example,
+        /// segments_1, segments_2, etc.  This allows us to not use
+        /// a commit lock.  See <a
+        /// href="http://lucene.apache.org/java/docs/fileformats.html">file
+        /// formats</a> for details.
+        /// </summary>
+        public const int FORMAT_LOCKLESS = - 2;
+        
+        /// <summary>This format adds a "hasSingleNormFile" flag into each segment info.
+        /// See <a href="http://issues.apache.org/jira/browse/LUCENE-756">LUCENE-756</a>
+        /// for details.
+        /// </summary>
+        public const int FORMAT_SINGLE_NORM_FILE = - 3;
+        
+        /// <summary>This format allows multiple segments to share a single
+        /// vectors and stored fields file. 
+        /// </summary>
+        public const int FORMAT_SHARED_DOC_STORE = - 4;
+        
+        /// <summary>This format adds a checksum at the end of the file to
+        /// ensure all bytes were successfully written. 
+        /// </summary>
+        public const int FORMAT_CHECKSUM = - 5;
+        
+        /// <summary>This format adds the deletion count for each segment.
+        /// This way IndexWriter can efficiently report numDocs(). 
+        /// </summary>
+        public const int FORMAT_DEL_COUNT = - 6;
+        
+        /// <summary>This format adds the boolean hasProx to record if any
+        /// fields in the segment store prox information (ie, have
+        /// omitTermFreqAndPositions==false) 
+        /// </summary>
+        public const int FORMAT_HAS_PROX = - 7;
+        
+        /// <summary>This format adds optional commit userData (String) storage. </summary>
+        public const int FORMAT_USER_DATA = - 8;
+        
+        /// <summary>This format adds optional per-segment String
+        /// dianostics storage, and switches userData to Map 
+        /// </summary>
+        public const int FORMAT_DIAGNOSTICS = - 9;
+        
+        /* This must always point to the most recent file format. */
+        internal static readonly int CURRENT_FORMAT = FORMAT_DIAGNOSTICS;
+        
+        public int counter = 0; // used to name new segments
+        /// <summary> counts how often the index has been changed by adding or deleting docs.
+        /// starting with the current time in milliseconds forces to create unique version numbers.
+        /// </summary>
+        private long version = (DateTime.UtcNow.Ticks / TimeSpan.TicksPerMillisecond);
+        
+        private long generation = 0; // generation of the "segments_N" for the next commit
+        private long lastGeneration = 0; // generation of the "segments_N" file we last successfully read
+        // or wrote; this is normally the same as generation except if
+        // there was an IOException that had interrupted a commit
 
         private IDictionary<string, string> userData = new HashMap<string, string>(); // Opaque Map<String, String> that user can specify during IndexWriter.commit
-		
-		/// <summary> If non-null, information about loading segments_N files</summary>
-		/// <seealso cref="SetInfoStream">
-		/// </seealso>
-		private static System.IO.StreamWriter infoStream;
-		
-		public SegmentInfo Info(int i)
-		{
-			return (SegmentInfo) this[i];
-		}
-		
-		/// <summary> Get the generation (N) of the current segments_N file
-		/// from a list of files.
-		/// 
-		/// </summary>
-		/// <param name="files">-- array of file names to check
-		/// </param>
-		public static long GetCurrentSegmentGeneration(System.String[] files)
-		{
-			if (files == null)
-			{
-				return - 1;
-			}
-			long max = - 1;
-			for (int i = 0; i < files.Length; i++)
-			{
-				System.String file = files[i];
-				if (file.StartsWith(IndexFileNames.SEGMENTS) && !file.Equals(IndexFileNames.SEGMENTS_GEN))
-				{
-					long gen = GenerationFromSegmentsFileName(file);
-					if (gen > max)
-					{
-						max = gen;
-					}
-				}
-			}
-			return max;
-		}
-		
-		/// <summary> Get the generation (N) of the current segments_N file
-		/// in the directory.
-		/// 
-		/// </summary>
-		/// <param name="directory">-- directory to search for the latest segments_N file
-		/// </param>
-		public static long GetCurrentSegmentGeneration(Directory directory)
-		{
-			try
-			{
-				return GetCurrentSegmentGeneration(directory.ListAll());
-			}
-			catch (NoSuchDirectoryException)
-			{
-				return - 1;
-			}
-		}
-		
-		/// <summary> Get the filename of the current segments_N file
-		/// from a list of files.
-		/// 
-		/// </summary>
-		/// <param name="files">-- array of file names to check
-		/// </param>
-		
-		public static System.String GetCurrentSegmentFileName(System.String[] files)
-		{
-			return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(files));
-		}
-		
-		/// <summary> Get the filename of the current segments_N file
-		/// in the directory.
-		/// 
-		/// </summary>
-		/// <param name="directory">-- directory to search for the latest segments_N file
-		/// </param>
-		public static System.String GetCurrentSegmentFileName(Directory directory)
-		{
-			return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(directory));
-		}
-		
-		/// <summary> Get the segments_N filename in use by this segment infos.</summary>
-		public System.String GetCurrentSegmentFileName()
-		{
-			return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", lastGeneration);
-		}
-		
-		/// <summary> Parse the generation off the segments file name and
-		/// return it.
-		/// </summary>
-		public static long GenerationFromSegmentsFileName(System.String fileName)
-		{
-			if (fileName.Equals(IndexFileNames.SEGMENTS))
-			{
-				return 0;
-			}
-			else if (fileName.StartsWith(IndexFileNames.SEGMENTS))
-			{
-				return Number.ToInt64(fileName.Substring(1 + IndexFileNames.SEGMENTS.Length));
-			}
-			else
-			{
-				throw new System.ArgumentException("fileName \"" + fileName + "\" is not a segments file");
-			}
-		}
-		
-		
-		/// <summary> Get the next segments_N filename that will be written.</summary>
+        
+        /// <summary> If non-null, information about loading segments_N files</summary>
+        /// <seealso cref="SetInfoStream">
+        /// </seealso>
+        private static System.IO.StreamWriter infoStream;
+        
+        public SegmentInfo Info(int i)
+        {
+            return (SegmentInfo) this[i];
+        }
+        
+        /// <summary> Get the generation (N) of the current segments_N file
+        /// from a list of files.
+        /// 
+        /// </summary>
+        /// <param name="files">-- array of file names to check
+        /// </param>
+        public static long GetCurrentSegmentGeneration(System.String[] files)
+        {
+            if (files == null)
+            {
+                return - 1;
+            }
+            long max = - 1;
+            for (int i = 0; i < files.Length; i++)
+            {
+                System.String file = files[i];
+                if (file.StartsWith(IndexFileNames.SEGMENTS) && !file.Equals(IndexFileNames.SEGMENTS_GEN))
+                {
+                    long gen = GenerationFromSegmentsFileName(file);
+                    if (gen > max)
+                    {
+                        max = gen;
+                    }
+                }
+            }
+            return max;
+        }
+        
+        /// <summary> Get the generation (N) of the current segments_N file
+        /// in the directory.
+        /// 
+        /// </summary>
+        /// <param name="directory">-- directory to search for the latest segments_N file
+        /// </param>
+        public static long GetCurrentSegmentGeneration(Directory directory)
+        {
+            try
+            {
+                return GetCurrentSegmentGeneration(directory.ListAll());
+            }
+            catch (NoSuchDirectoryException)
+            {
+                return - 1;
+            }
+        }
+        
+        /// <summary> Get the filename of the current segments_N file
+        /// from a list of files.
+        /// 
+        /// </summary>
+        /// <param name="files">-- array of file names to check
+        /// </param>
+        
+        public static System.String GetCurrentSegmentFileName(System.String[] files)
+        {
+            return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(files));
+        }
+        
+        /// <summary> Get the filename of the current segments_N file
+        /// in the directory.
+        /// 
+        /// </summary>
+        /// <param name="directory">-- directory to search for the latest segments_N file
+        /// </param>
+        public static System.String GetCurrentSegmentFileName(Directory directory)
+        {
+            return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(directory));
+        }
+        
+        /// <summary> Get the segments_N filename in use by this segment infos.</summary>
+        public System.String GetCurrentSegmentFileName()
+        {
+            return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", lastGeneration);
+        }
+        
+        /// <summary> Parse the generation off the segments file name and
+        /// return it.
+        /// </summary>
+        public static long GenerationFromSegmentsFileName(System.String fileName)
+        {
+            if (fileName.Equals(IndexFileNames.SEGMENTS))
+            {
+                return 0;
+            }
+            else if (fileName.StartsWith(IndexFileNames.SEGMENTS))
+            {
+                return Number.ToInt64(fileName.Substring(1 + IndexFileNames.SEGMENTS.Length));
+            }
+            else
+            {
+                throw new System.ArgumentException("fileName \"" + fileName + "\" is not a segments file");
+            }
+        }
+        
+        
+        /// <summary> Get the next segments_N filename that will be written.</summary>
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public System.String GetNextSegmentFileName()
-		{
-			long nextGeneration;
-			
-			if (generation == - 1)
-			{
-				nextGeneration = 1;
-			}
-			else
-			{
-				nextGeneration = generation + 1;
-			}
-			return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", nextGeneration);
-		}
-		
-		/// <summary> Read a particular segmentFileName.  Note that this may
-		/// throw an IOException if a commit is in process.
-		/// 
-		/// </summary>
-		/// <param name="directory">-- directory containing the segments file
-		/// </param>
-		/// <param name="segmentFileName">-- segment file to load
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public void  Read(Directory directory, System.String segmentFileName)
-		{
-			bool success = false;
-			
-			// Clear any previous segments:
-			Clear();
-			
-			var input = new ChecksumIndexInput(directory.OpenInput(segmentFileName));
-			
-			generation = GenerationFromSegmentsFileName(segmentFileName);
-			
-			lastGeneration = generation;
-			
-			try
-			{
-				int format = input.ReadInt();
-				if (format < 0)
-				{
-					// file contains explicit format info
-					// check that it is a format we can understand
-					if (format < CURRENT_FORMAT)
-						throw new CorruptIndexException("Unknown format version: " + format);
-					version = input.ReadLong(); // read version
-					counter = input.ReadInt(); // read counter
-				}
-				else
-				{
-					// file is in old format without explicit format info
-					counter = format;
-				}
-				
-				for (int i = input.ReadInt(); i > 0; i--)
-				{
-					// read segmentInfos
-					Add(new SegmentInfo(directory, format, input));
-				}
-				
-				if (format >= 0)
-				{
-					// in old format the version number may be at the end of the file
-					if (input.FilePointer >= input.Length())
-						version = (DateTime.UtcNow.Ticks / TimeSpan.TicksPerMillisecond);
-					// old file format without version number
-					else
-						version = input.ReadLong(); // read version
-				}
-				
-				if (format <= FORMAT_USER_DATA)
-				{
-					if (format <= FORMAT_DIAGNOSTICS)
-					{
-						userData = input.ReadStringStringMap();
-					}
-					else if (0 != input.ReadByte())
-					{
+        {
+            long nextGeneration;
+            
+            if (generation == - 1)
+            {
+                nextGeneration = 1;
+            }
+            else
+            {
+                nextGeneration = generation + 1;
+            }
+            return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", nextGeneration);
+        }
+        
+        /// <summary> Read a particular segmentFileName.  Note that this may
+        /// throw an IOException if a commit is in process.
+        /// 
+        /// </summary>
+        /// <param name="directory">-- directory containing the segments file
+        /// </param>
+        /// <param name="segmentFileName">-- segment file to load
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public void  Read(Directory directory, System.String segmentFileName)
+        {
+            bool success = false;
+            
+            // Clear any previous segments:
+            Clear();
+            
+            var input = new ChecksumIndexInput(directory.OpenInput(segmentFileName));
+            
+            generation = GenerationFromSegmentsFileName(segmentFileName);
+            
+            lastGeneration = generation;
+            
+            try
+            {
+                int format = input.ReadInt();
+                if (format < 0)
+                {
+                    // file contains explicit format info
+                    // check that it is a format we can understand
+                    if (format < CURRENT_FORMAT)
+                        throw new CorruptIndexException("Unknown format version: " + format);
+                    version = input.ReadLong(); // read version
+                    counter = input.ReadInt(); // read counter
+                }
+                else
+                {
+                    // file is in old format without explicit format info
+                    counter = format;
+                }
+                
+                for (int i = input.ReadInt(); i > 0; i--)
+                {
+                    // read segmentInfos
+                    Add(new SegmentInfo(directory, format, input));
+                }
+                
+                if (format >= 0)
+                {
+                    // in old format the version number may be at the end of the file
+                    if (input.FilePointer >= input.Length())
+                        version = (DateTime.UtcNow.Ticks / TimeSpan.TicksPerMillisecond);
+                    // old file format without version number
+                    else
+                        version = input.ReadLong(); // read version
+                }
+                
+                if (format <= FORMAT_USER_DATA)
+                {
+                    if (format <= FORMAT_DIAGNOSTICS)
+                    {
+                        userData = input.ReadStringStringMap();
+                    }
+                    else if (0 != input.ReadByte())
+                    {
                         // TODO: Should be read-only map
                         userData = new HashMap<string,string> {{"userData", input.ReadString()}};
-					}
-					else
-					{
+                    }
+                    else
+                    {
                         // TODO: Should be empty read-only map
                         userData = new HashMap<string, string>();
-					}
-				}
-				else
-				{
+                    }
+                }
+                else
+                {
                     // TODO: Should be empty read-only map
                     userData = new HashMap<string, string>();
-				}
-				
-				if (format <= FORMAT_CHECKSUM)
-				{
-					long checksumNow = input.Checksum;
-					long checksumThen = input.ReadLong();
-					if (checksumNow != checksumThen)
-						throw new CorruptIndexException("checksum mismatch in segments file");
-				}
-				success = true;
-			}
-			finally
-			{
-				input.Close();
-				if (!success)
-				{
-					// Clear any segment infos we had loaded so we
-					// have a clean slate on retry:
-					Clear();
-				}
-			}
-		}
-		
-		/// <summary> This version of read uses the retry logic (for lock-less
-		/// commits) to find the right segments file to load.
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public void  Read(Directory directory)
-		{
-			
-			generation = lastGeneration = - 1;
-			
-			new AnonymousClassFindSegmentsFile(this, directory).Run();
-		}
-		
-		// Only non-null after prepareCommit has been called and
-		// before finishCommit is called
-		internal ChecksumIndexOutput pendingSegnOutput;
-		
-		private void  Write(Directory directory)
-		{
-			
-			System.String segmentFileName = GetNextSegmentFileName();
-			
-			// Always advance the generation on write:
-			if (generation == - 1)
-			{
-				generation = 1;
-			}
-			else
-			{
-				generation++;
-			}
-			
-			var segnOutput = new ChecksumIndexOutput(directory.CreateOutput(segmentFileName));
-			
-			bool success = false;
-			
-			try
-			{
-				segnOutput.WriteInt(CURRENT_FORMAT); // write FORMAT
-				segnOutput.WriteLong(++version); // every write changes
-				// the index
-				segnOutput.WriteInt(counter); // write counter
-				segnOutput.WriteInt(Count); // write infos
-				for (int i = 0; i < Count; i++)
-				{
-					Info(i).Write(segnOutput);
-				}
-				segnOutput.WriteStringStringMap(userData);
-				segnOutput.PrepareCommit();
-				success = true;
-				pendingSegnOutput = segnOutput;
-			}
-			finally
-			{
-				if (!success)
-				{
-					// We hit an exception above; try to close the file
-					// but suppress any exception:
-					try
-					{
-						segnOutput.Close();
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-					try
-					{
-						// Try not to leave a truncated segments_N file in
-						// the index:
-						directory.DeleteFile(segmentFileName);
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-				}
-			}
-		}
-		
-		/// <summary> Returns a copy of this instance, also copying each
-		/// SegmentInfo.
-		/// </summary>
-		
-		public System.Object Clone()
-		{
+                }
+                
+                if (format <= FORMAT_CHECKSUM)
+                {
+                    long checksumNow = input.Checksum;
+                    long checksumThen = input.ReadLong();
+                    if (checksumNow != checksumThen)
+                        throw new CorruptIndexException("checksum mismatch in segments file");
+                }
+                success = true;
+            }
+            finally
+            {
+                input.Close();
+                if (!success)
+                {
+                    // Clear any segment infos we had loaded so we
+                    // have a clean slate on retry:
+                    Clear();
+                }
+            }
+        }
+        
+        /// <summary> This version of read uses the retry logic (for lock-less
+        /// commits) to find the right segments file to load.
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public void  Read(Directory directory)
+        {
+            
+            generation = lastGeneration = - 1;
+            
+            new AnonymousClassFindSegmentsFile(this, directory).Run();
+        }
+        
+        // Only non-null after prepareCommit has been called and
+        // before finishCommit is called
+        internal ChecksumIndexOutput pendingSegnOutput;
+        
+        private void  Write(Directory directory)
+        {
+            
+            System.String segmentFileName = GetNextSegmentFileName();
+            
+            // Always advance the generation on write:
+            if (generation == - 1)
+            {
+                generation = 1;
+            }
+            else
+            {
+                generation++;
+            }
+            
+            var segnOutput = new ChecksumIndexOutput(directory.CreateOutput(segmentFileName));
+            
+            bool success = false;
+            
+            try
+            {
+                segnOutput.WriteInt(CURRENT_FORMAT); // write FORMAT
+                segnOutput.WriteLong(++version); // every write changes
+                // the index
+                segnOutput.WriteInt(counter); // write counter
+                segnOutput.WriteInt(Count); // write infos
+                for (int i = 0; i < Count; i++)
+                {
+                    Info(i).Write(segnOutput);
+                }
+                segnOutput.WriteStringStringMap(userData);
+                segnOutput.PrepareCommit();
+                success = true;
+                pendingSegnOutput = segnOutput;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    // We hit an exception above; try to close the file
+                    // but suppress any exception:
+                    try
+                    {
+                        segnOutput.Close();
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                    try
+                    {
+                        // Try not to leave a truncated segments_N file in
+                        // the index:
+                        directory.DeleteFile(segmentFileName);
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                }
+            }
+        }
+        
+        /// <summary> Returns a copy of this instance, also copying each
+        /// SegmentInfo.
+        /// </summary>
+        
+        public System.Object Clone()
+        {
             SegmentInfos sis = new SegmentInfos();
             for (int i = 0; i < this.Count; i++)
             {
@@ -451,29 +451,29 @@ namespace Lucene.Net.Index
             sis.userData = new HashMap<string, string>(userData);
             sis.version = this.version;
             return sis;
-		}
+        }
 
-	    /// <summary> version number when this SegmentInfos was generated.</summary>
-	    public long Version
-	    {
-	        get { return version; }
-	    }
+        /// <summary> version number when this SegmentInfos was generated.</summary>
+        public long Version
+        {
+            get { return version; }
+        }
 
-	    public long Generation
-	    {
-	        get { return generation; }
-	    }
+        public long Generation
+        {
+            get { return generation; }
+        }
 
-	    public long LastGeneration
-	    {
-	        get { return lastGeneration; }
-	    }
+        public long LastGeneration
+        {
+            get { return lastGeneration; }
+        }
 
-	    /// <summary> Current version number from segments file.</summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public static long ReadCurrentVersion(Directory directory)
-		{
+        /// <summary> Current version number from segments file.</summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public static long ReadCurrentVersion(Directory directory)
+        {
             // Fully read the segments file: this ensures that it's
             // completely written so that if
             // IndexWriter.prepareCommit has been called (but not
@@ -482,552 +482,552 @@ namespace Lucene.Net.Index
             var sis = new SegmentInfos();
             sis.Read(directory);
             return sis.version;
-			//return (long) ((System.Int64) new AnonymousClassFindSegmentsFile1(directory).Run());
+            //return (long) ((System.Int64) new AnonymousClassFindSegmentsFile1(directory).Run());
             //DIGY: AnonymousClassFindSegmentsFile1 can safely be deleted
-		}
-		
-		/// <summary> Returns userData from latest segments file</summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
+        }
+        
+        /// <summary> Returns userData from latest segments file</summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
         public static System.Collections.Generic.IDictionary<string, string> ReadCurrentUserData(Directory directory)
-		{
-			var sis = new SegmentInfos();
-			sis.Read(directory);
-			return sis.UserData;
-		}
-		
-		/// <summary>If non-null, information about retries when loading
-		/// the segments file will be printed to this.
-		/// </summary>
-		public static void  SetInfoStream(System.IO.StreamWriter infoStream)
-		{
-			SegmentInfos.infoStream = infoStream;
-		}
-		
-		/* Advanced configuration of retry logic in loading
-		segments_N file */
-		private static int defaultGenFileRetryCount = 10;
-		private static int defaultGenFileRetryPauseMsec = 50;
-		private static int defaultGenLookaheadCount = 10;
+        {
+            var sis = new SegmentInfos();
+            sis.Read(directory);
+            return sis.UserData;
+        }
+        
+        /// <summary>If non-null, information about retries when loading
+        /// the segments file will be printed to this.
+        /// </summary>
+        public static void  SetInfoStream(System.IO.StreamWriter infoStream)
+        {
+            SegmentInfos.infoStream = infoStream;
+        }
+        
+        /* Advanced configuration of retry logic in loading
+        segments_N file */
+        private static int defaultGenFileRetryCount = 10;
+        private static int defaultGenFileRetryPauseMsec = 50;
+        private static int defaultGenLookaheadCount = 10;
 
-	    /// <summary> Advanced: Gets or sets how many times to try loading the
-	    /// segments.gen file contents to determine current segment
-	    /// generation.  This file is only referenced when the
-	    /// primary method (listing the directory) fails.
-	    /// </summary>
-	    public static int DefaultGenFileRetryCount
-	    {
-	        get { return defaultGenFileRetryCount; }
-	        set { defaultGenFileRetryCount = value; }
-	    }
+        /// <summary> Advanced: Gets or sets how many times to try loading the
+        /// segments.gen file contents to determine current segment
+        /// generation.  This file is only referenced when the
+        /// primary method (listing the directory) fails.
+        /// </summary>
+        public static int DefaultGenFileRetryCount
+        {
+            get { return defaultGenFileRetryCount; }
+            set { defaultGenFileRetryCount = value; }
+        }
 
-	    public static int DefaultGenFileRetryPauseMsec
-	    {
-	        set { defaultGenFileRetryPauseMsec = value; }
-	        get { return defaultGenFileRetryPauseMsec; }
-	    }
+        public static int DefaultGenFileRetryPauseMsec
+        {
+            set { defaultGenFileRetryPauseMsec = value; }
+            get { return defaultGenFileRetryPauseMsec; }
+        }
 
-	    /// <summary> Advanced: set how many times to try incrementing the
-	    /// gen when loading the segments file.  This only runs if
-	    /// the primary (listing directory) and secondary (opening
-	    /// segments.gen file) methods fail to find the segments
-	    /// file.
-	    /// </summary>
-	    public static int DefaultGenLookaheadCount
-	    {
+        /// <summary> Advanced: set how many times to try incrementing the
+        /// gen when loading the segments file.  This only runs if
+        /// the primary (listing directory) and secondary (opening
+        /// segments.gen file) methods fail to find the segments
+        /// file.
+        /// </summary>
+        public static int DefaultGenLookaheadCount
+        {
             set { defaultGenLookaheadCount = value; }
             get { return defaultGenLookaheadCount; }
-	    }
+        }
 
-	    /// <seealso cref="SetInfoStream">
-	    /// </seealso>
-	    public static StreamWriter InfoStream
-	    {
-	        get { return infoStream; }
-	    }
+        /// <seealso cref="SetInfoStream">
+        /// </seealso>
+        public static StreamWriter InfoStream
+        {
+            get { return infoStream; }
+        }
 
-	    private static void  Message(System.String message)
-		{
-			if (infoStream != null)
-			{
-				infoStream.WriteLine("SIS [" + ThreadClass.Current().Name + "]: " + message);
-			}
-		}
-		
-		/// <summary> Utility class for executing code that needs to do
-		/// something with the current segments file.  This is
-		/// necessary with lock-less commits because from the time
-		/// you locate the current segments file name, until you
-		/// actually open it, read its contents, or check modified
-		/// time, etc., it could have been deleted due to a writer
-		/// commit finishing.
-		/// </summary>
-		public abstract class FindSegmentsFile
-		{
-			
-			internal Directory directory;
+        private static void  Message(System.String message)
+        {
+            if (infoStream != null)
+            {
+                infoStream.WriteLine("SIS [" + ThreadClass.Current().Name + "]: " + message);
+            }
+        }
+        
+        /// <summary> Utility class for executing code that needs to do
+        /// something with the current segments file.  This is
+        /// necessary with lock-less commits because from the time
+        /// you locate the current segments file name, until you
+        /// actually open it, read its contents, or check modified
+        /// time, etc., it could have been deleted due to a writer
+        /// commit finishing.
+        /// </summary>
+        public abstract class FindSegmentsFile
+        {
+            
+            internal Directory directory;
 
-		    protected FindSegmentsFile(Directory directory)
-			{
-				this.directory = directory;
-			}
-			
-			public System.Object Run()
-			{
-				return Run(null);
-			}
-			
-			public System.Object Run(IndexCommit commit)
-			{
-				if (commit != null)
-				{
-					if (directory != commit.Directory)
-						throw new System.IO.IOException("the specified commit does not match the specified Directory");
-					return DoBody(commit.SegmentsFileName);
-				}
-				
-				System.String segmentFileName = null;
-				long lastGen = - 1;
-				long gen = 0;
-				int genLookaheadCount = 0;
-				System.IO.IOException exc = null;
-				bool retry = false;
-				
-				int method = 0;
-				
-				// Loop until we succeed in calling doBody() without
-				// hitting an IOException.  An IOException most likely
-				// means a commit was in process and has finished, in
-				// the time it took us to load the now-old infos files
-				// (and segments files).  It's also possible it's a
-				// true error (corrupt index).  To distinguish these,
-				// on each retry we must see "forward progress" on
-				// which generation we are trying to load.  If we
-				// don't, then the original error is real and we throw
-				// it.
-				
-				// We have three methods for determining the current
-				// generation.  We try the first two in parallel, and
-				// fall back to the third when necessary.
-				
-				while (true)
-				{
-					
-					if (0 == method)
-					{
-						
-						// Method 1: list the directory and use the highest
-						// segments_N file.  This method works well as long
-						// as there is no stale caching on the directory
-						// contents (NOTE: NFS clients often have such stale
-						// caching):
-						System.String[] files = null;
-						
-						long genA = - 1;
-						
-						files = directory.ListAll();
-						
-						if (files != null)
-							genA = Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(files);
-						
-						Lucene.Net.Index.SegmentInfos.Message("directory listing genA=" + genA);
-						
-						// Method 2: open segments.gen and read its
-						// contents.  Then we take the larger of the two
-						// gens.  This way, if either approach is hitting
-						// a stale cache (NFS) we have a better chance of
-						// getting the right generation.
-						long genB = - 1;
-						for (int i = 0; i < Lucene.Net.Index.SegmentInfos.defaultGenFileRetryCount; i++)
-						{
-							IndexInput genInput = null;
-							try
-							{
-								genInput = directory.OpenInput(IndexFileNames.SEGMENTS_GEN);
-							}
-							catch (System.IO.FileNotFoundException e)
-							{
-								Lucene.Net.Index.SegmentInfos.Message("segments.gen open: FileNotFoundException " + e);
-								break;
-							}
-							catch (System.IO.IOException e)
-							{
-								Lucene.Net.Index.SegmentInfos.Message("segments.gen open: IOException " + e);
-							}
-							
-							if (genInput != null)
-							{
-								try
-								{
-									int version = genInput.ReadInt();
-									if (version == Lucene.Net.Index.SegmentInfos.FORMAT_LOCKLESS)
-									{
-										long gen0 = genInput.ReadLong();
-										long gen1 = genInput.ReadLong();
-										Lucene.Net.Index.SegmentInfos.Message("fallback check: " + gen0 + "; " + gen1);
-										if (gen0 == gen1)
-										{
-											// The file is consistent.
-											genB = gen0;
-											break;
-										}
-									}
-								}
-								catch (System.IO.IOException)
-								{
-									// will retry
-								}
-								finally
-								{
-									genInput.Close();
-								}
-							}
-							
+            protected FindSegmentsFile(Directory directory)
+            {
+                this.directory = directory;
+            }
+            
+            public System.Object Run()
+            {
+                return Run(null);
+            }
+            
+            public System.Object Run(IndexCommit commit)
+            {
+                if (commit != null)
+                {
+                    if (directory != commit.Directory)
+                        throw new System.IO.IOException("the specified commit does not match the specified Directory");
+                    return DoBody(commit.SegmentsFileName);
+                }
+                
+                System.String segmentFileName = null;
+                long lastGen = - 1;
+                long gen = 0;
+                int genLookaheadCount = 0;
+                System.IO.IOException exc = null;
+                bool retry = false;
+                
+                int method = 0;
+                
+                // Loop until we succeed in calling doBody() without
+                // hitting an IOException.  An IOException most likely
+                // means a commit was in process and has finished, in
+                // the time it took us to load the now-old infos files
+                // (and segments files).  It's also possible it's a
+                // true error (corrupt index).  To distinguish these,
+                // on each retry we must see "forward progress" on
+                // which generation we are trying to load.  If we
+                // don't, then the original error is real and we throw
+                // it.
+                
+                // We have three methods for determining the current
+                // generation.  We try the first two in parallel, and
+                // fall back to the third when necessary.
+                
+                while (true)
+                {
+                    
+                    if (0 == method)
+                    {
+                        
+                        // Method 1: list the directory and use the highest
+                        // segments_N file.  This method works well as long
+                        // as there is no stale caching on the directory
+                        // contents (NOTE: NFS clients often have such stale
+                        // caching):
+                        System.String[] files = null;
+                        
+                        long genA = - 1;
+                        
+                        files = directory.ListAll();
+                        
+                        if (files != null)
+                            genA = Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(files);
+                        
+                        Lucene.Net.Index.SegmentInfos.Message("directory listing genA=" + genA);
+                        
+                        // Method 2: open segments.gen and read its
+                        // contents.  Then we take the larger of the two
+                        // gens.  This way, if either approach is hitting
+                        // a stale cache (NFS) we have a better chance of
+                        // getting the right generation.
+                        long genB = - 1;
+                        for (int i = 0; i < Lucene.Net.Index.SegmentInfos.defaultGenFileRetryCount; i++)
+                        {
+                            IndexInput genInput = null;
+                            try
+                            {
+                                genInput = directory.OpenInput(IndexFileNames.SEGMENTS_GEN);
+                            }
+                            catch (System.IO.FileNotFoundException e)
+                            {
+                                Lucene.Net.Index.SegmentInfos.Message("segments.gen open: FileNotFoundException " + e);
+                                break;
+                            }
+                            catch (System.IO.IOException e)
+                            {
+                                Lucene.Net.Index.SegmentInfos.Message("segments.gen open: IOException " + e);
+                            }
+                            
+                            if (genInput != null)
+                            {
+                                try
+                                {
+                                    int version = genInput.ReadInt();
+                                    if (version == Lucene.Net.Index.SegmentInfos.FORMAT_LOCKLESS)
+                                    {
+                                        long gen0 = genInput.ReadLong();
+                                        long gen1 = genInput.ReadLong();
+                                        Lucene.Net.Index.SegmentInfos.Message("fallback check: " + gen0 + "; " + gen1);
+                                        if (gen0 == gen1)
+                                        {
+                                            // The file is consistent.
+                                            genB = gen0;
+                                            break;
+                                        }
+                                    }
+                                }
+                                catch (System.IO.IOException)
+                                {
+                                    // will retry
+                                }
+                                finally
+                                {
+                                    genInput.Close();
+                                }
+                            }
+                            
                             System.Threading.Thread.Sleep(new TimeSpan((System.Int64) 10000 * Lucene.Net.Index.SegmentInfos.defaultGenFileRetryPauseMsec));
-							
-							
-						}
-						
-						Lucene.Net.Index.SegmentInfos.Message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
-						
-						// Pick the larger of the two gen's:
-						if (genA > genB)
-							gen = genA;
-						else
-							gen = genB;
-						
-						if (gen == - 1)
-						{
-							throw new System.IO.FileNotFoundException("no segments* file found in " + directory + ": files:" + string.Join(" ", files));
-						}
-					}
-					
-					// Third method (fallback if first & second methods
-					// are not reliable): since both directory cache and
-					// file contents cache seem to be stale, just
-					// advance the generation.
-					if (1 == method || (0 == method && lastGen == gen && retry))
-					{
-						
-						method = 1;
-						
-						if (genLookaheadCount < Lucene.Net.Index.SegmentInfos.defaultGenLookaheadCount)
-						{
-							gen++;
-							genLookaheadCount++;
-							Lucene.Net.Index.SegmentInfos.Message("look ahead increment gen to " + gen);
-						}
-					}
-					
-					if (lastGen == gen)
-					{
-						
-						// This means we're about to try the same
-						// segments_N last tried.  This is allowed,
-						// exactly once, because writer could have been in
-						// the process of writing segments_N last time.
-						
-						if (retry)
-						{
-							// OK, we've tried the same segments_N file
-							// twice in a row, so this must be a real
-							// error.  We throw the original exception we
-							// got.
-							throw exc;
-						}
+                            
+                            
+                        }
+                        
+                        Lucene.Net.Index.SegmentInfos.Message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
+                        
+                        // Pick the larger of the two gen's:
+                        if (genA > genB)
+                            gen = genA;
+                        else
+                            gen = genB;
+                        
+                        if (gen == - 1)
+                        {
+                            throw new System.IO.FileNotFoundException("no segments* file found in " + directory + ": files:" + string.Join(" ", files));
+                        }
+                    }
+                    
+                    // Third method (fallback if first & second methods
+                    // are not reliable): since both directory cache and
+                    // file contents cache seem to be stale, just
+                    // advance the generation.
+                    if (1 == method || (0 == method && lastGen == gen && retry))
+                    {
+                        
+                        method = 1;
+                        
+                        if (genLookaheadCount < Lucene.Net.Index.SegmentInfos.defaultGenLookaheadCount)
+                        {
+                            gen++;
+                            genLookaheadCount++;
+                            Lucene.Net.Index.SegmentInfos.Message("look ahead increment gen to " + gen);
+                        }
+                    }
+                    
+                    if (lastGen == gen)
+                    {
+                        
+                        // This means we're about to try the same
+                        // segments_N last tried.  This is allowed,
+                        // exactly once, because writer could have been in
+                        // the process of writing segments_N last time.
+                        
+                        if (retry)
+                        {
+                            // OK, we've tried the same segments_N file
+                            // twice in a row, so this must be a real
+                            // error.  We throw the original exception we
+                            // got.
+                            throw exc;
+                        }
 
-						retry = true;
-					}
-					else if (0 == method)
-					{
-						// Segment file has advanced since our last loop, so
-						// reset retry:
-						retry = false;
-					}
-					
-					lastGen = gen;
-					
-					segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
-					
-					try
-					{
-						System.Object v = DoBody(segmentFileName);
-						Lucene.Net.Index.SegmentInfos.Message("success on " + segmentFileName);
-						
-						return v;
-					}
-					catch (System.IO.IOException err)
-					{
-						
-						// Save the original root cause:
-						if (exc == null)
-						{
-							exc = err;
-						}
-						
-						Lucene.Net.Index.SegmentInfos.Message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
-						
-						if (!retry && gen > 1)
-						{
-							
-							// This is our first time trying this segments
-							// file (because retry is false), and, there is
-							// possibly a segments_(N-1) (because gen > 1).
-							// So, check if the segments_(N-1) exists and
-							// try it if so:
-							System.String prevSegmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen - 1);
-							
-							bool prevExists;
-							prevExists = directory.FileExists(prevSegmentFileName);
-							
-							if (prevExists)
-							{
-								Lucene.Net.Index.SegmentInfos.Message("fallback to prior segment file '" + prevSegmentFileName + "'");
-								try
-								{
-									System.Object v = DoBody(prevSegmentFileName);
-									if (exc != null)
-									{
-										Lucene.Net.Index.SegmentInfos.Message("success on fallback " + prevSegmentFileName);
-									}
-									return v;
-								}
-								catch (System.IO.IOException err2)
-								{
-									Lucene.Net.Index.SegmentInfos.Message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
-								}
-							}
-						}
-					}
-				}
-			}
-			
-			/// <summary> Subclass must implement this.  The assumption is an
-			/// IOException will be thrown if something goes wrong
-			/// during the processing that could have been caused by
-			/// a writer committing.
-			/// </summary>
-			public /*internal*/ abstract System.Object DoBody(System.String segmentFileName);
-		}
-		
-		/// <summary> Returns a new SegmentInfos containg the SegmentInfo
-		/// instances in the specified range first (inclusive) to
-		/// last (exclusive), so total number of segments returned
-		/// is last-first.
-		/// </summary>
-		public SegmentInfos Range(int first, int last)
-		{
-			SegmentInfos infos = new SegmentInfos();
-			infos.AddRange(this.GetRange(first, last - first));
-			return infos;
-		}
-		
-		// Carry over generation numbers from another SegmentInfos
-		internal void  UpdateGeneration(SegmentInfos other)
-		{
-			lastGeneration = other.lastGeneration;
-			generation = other.generation;
-			version = other.version;
-		}
-		
-		internal void  RollbackCommit(Directory dir)
-		{
-			if (pendingSegnOutput != null)
-			{
-				try
-				{
-					pendingSegnOutput.Close();
-				}
-				catch (System.Exception)
-				{
-					// Suppress so we keep throwing the original exception
-					// in our caller
-				}
-				
-				// Must carefully compute fileName from "generation"
-				// since lastGeneration isn't incremented:
-				try
-				{
-					System.String segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
-					dir.DeleteFile(segmentFileName);
-				}
-				catch (System.Exception)
-				{
-					// Suppress so we keep throwing the original exception
-					// in our caller
-				}
-				pendingSegnOutput = null;
-			}
-		}
-		
-		/// <summary>Call this to start a commit.  This writes the new
-		/// segments file, but writes an invalid checksum at the
-		/// end, so that it is not visible to readers.  Once this
-		/// is called you must call <see cref="FinishCommit" /> to complete
-		/// the commit or <see cref="RollbackCommit" /> to abort it. 
-		/// </summary>
-		internal void  PrepareCommit(Directory dir)
-		{
-			if (pendingSegnOutput != null)
-				throw new System.SystemException("prepareCommit was already called");
-			Write(dir);
-		}
-		
-		/// <summary>Returns all file names referenced by SegmentInfo
-		/// instances matching the provided Directory (ie files
-		/// associated with any "external" segments are skipped).
-		/// The returned collection is recomputed on each
-		/// invocation.  
-		/// </summary>
+                        retry = true;
+                    }
+                    else if (0 == method)
+                    {
+                        // Segment file has advanced since our last loop, so
+                        // reset retry:
+                        retry = false;
+                    }
+                    
+                    lastGen = gen;
+                    
+                    segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
+                    
+                    try
+                    {
+                        System.Object v = DoBody(segmentFileName);
+                        Lucene.Net.Index.SegmentInfos.Message("success on " + segmentFileName);
+                        
+                        return v;
+                    }
+                    catch (System.IO.IOException err)
+                    {
+                        
+                        // Save the original root cause:
+                        if (exc == null)
+                        {
+                            exc = err;
+                        }
+                        
+                        Lucene.Net.Index.SegmentInfos.Message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
+                        
+                        if (!retry && gen > 1)
+                        {
+                            
+                            // This is our first time trying this segments
+                            // file (because retry is false), and, there is
+                            // possibly a segments_(N-1) (because gen > 1).
+                            // So, check if the segments_(N-1) exists and
+                            // try it if so:
+                            System.String prevSegmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen - 1);
+                            
+                            bool prevExists;
+                            prevExists = directory.FileExists(prevSegmentFileName);
+                            
+                            if (prevExists)
+                            {
+                                Lucene.Net.Index.SegmentInfos.Message("fallback to prior segment file '" + prevSegmentFileName + "'");
+                                try
+                                {
+                                    System.Object v = DoBody(prevSegmentFileName);
+                                    if (exc != null)
+                                    {
+                                        Lucene.Net.Index.SegmentInfos.Message("success on fallback " + prevSegmentFileName);
+                                    }
+                                    return v;
+                                }
+                                catch (System.IO.IOException err2)
+                                {
+                                    Lucene.Net.Index.SegmentInfos.Message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+            
+            /// <summary> Subclass must implement this.  The assumption is an
+            /// IOException will be thrown if something goes wrong
+            /// during the processing that could have been caused by
+            /// a writer committing.
+            /// </summary>
+            public /*internal*/ abstract System.Object DoBody(System.String segmentFileName);
+        }
+        
+        /// <summary> Returns a new SegmentInfos containg the SegmentInfo
+        /// instances in the specified range first (inclusive) to
+        /// last (exclusive), so total number of segments returned
+        /// is last-first.
+        /// </summary>
+        public SegmentInfos Range(int first, int last)
+        {
+            SegmentInfos infos = new SegmentInfos();
+            infos.AddRange(this.GetRange(first, last - first));
+            return infos;
+        }
+        
+        // Carry over generation numbers from another SegmentInfos
+        internal void  UpdateGeneration(SegmentInfos other)
+        {
+            lastGeneration = other.lastGeneration;
+            generation = other.generation;
+            version = other.version;
+        }
+        
+        internal void  RollbackCommit(Directory dir)
+        {
+            if (pendingSegnOutput != null)
+            {
+                try
+                {
+                    pendingSegnOutput.Close();
+                }
+                catch (System.Exception)
+                {
+                    // Suppress so we keep throwing the original exception
+                    // in our caller
+                }
+                
+                // Must carefully compute fileName from "generation"
+                // since lastGeneration isn't incremented:
+                try
+                {
+                    System.String segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
+                    dir.DeleteFile(segmentFileName);
+                }
+                catch (System.Exception)
+                {
+                    // Suppress so we keep throwing the original exception
+                    // in our caller
+                }
+                pendingSegnOutput = null;
+            }
+        }
+        
+        /// <summary>Call this to start a commit.  This writes the new
+        /// segments file, but writes an invalid checksum at the
+        /// end, so that it is not visible to readers.  Once this
+        /// is called you must call <see cref="FinishCommit" /> to complete
+        /// the commit or <see cref="RollbackCommit" /> to abort it. 
+        /// </summary>
+        internal void  PrepareCommit(Directory dir)
+        {
+            if (pendingSegnOutput != null)
+                throw new System.SystemException("prepareCommit was already called");
+            Write(dir);
+        }
+        
+        /// <summary>Returns all file names referenced by SegmentInfo
+        /// instances matching the provided Directory (ie files
+        /// associated with any "external" segments are skipped).
+        /// The returned collection is recomputed on each
+        /// invocation.  
+        /// </summary>
         public System.Collections.Generic.ICollection<string> Files(Directory dir, bool includeSegmentsFile)
-		{
+        {
             System.Collections.Generic.HashSet<string> files = new System.Collections.Generic.HashSet<string>();
-			if (includeSegmentsFile)
-			{
+            if (includeSegmentsFile)
+            {
                 files.Add(GetCurrentSegmentFileName());
-			}
-			int size = Count;
-			for (int i = 0; i < size; i++)
-			{
-				SegmentInfo info = Info(i);
-				if (info.dir == dir)
-				{
+            }
+            int size = Count;
+            for (int i = 0; i < size; i++)
+            {
+                SegmentInfo info = Info(i);
+                if (info.dir == dir)
+                {
                     files.UnionWith(Info(i).Files());
-				}
-			}
-			return files;
-		}
-		
-		internal void  FinishCommit(Directory dir)
-		{
-			if (pendingSegnOutput == null)
-				throw new System.SystemException("prepareCommit was not called");
-			bool success = false;
-			try
-			{
-				pendingSegnOutput.FinishCommit();
-				pendingSegnOutput.Close();
-				pendingSegnOutput = null;
-				success = true;
-			}
-			finally
-			{
-				if (!success)
-					RollbackCommit(dir);
-			}
-			
-			// NOTE: if we crash here, we have left a segments_N
-			// file in the directory in a possibly corrupt state (if
-			// some bytes made it to stable storage and others
-			// didn't).  But, the segments_N file includes checksum
-			// at the end, which should catch this case.  So when a
-			// reader tries to read it, it will throw a
-			// CorruptIndexException, which should cause the retry
-			// logic in SegmentInfos to kick in and load the last
-			// good (previous) segments_N-1 file.
-			
-			System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
-			success = false;
-			try
-			{
-				dir.Sync(fileName);
-				success = true;
-			}
-			finally
-			{
-				if (!success)
-				{
-					try
-					{
-						dir.DeleteFile(fileName);
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-				}
-			}
-			
-			lastGeneration = generation;
-			
-			try
-			{
-				IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN);
-				try
-				{
-					genOutput.WriteInt(FORMAT_LOCKLESS);
-					genOutput.WriteLong(generation);
-					genOutput.WriteLong(generation);
-				}
-				finally
-				{
-					genOutput.Close();
-				}
-			}
-			catch (System.Exception)
-			{
-				// It's OK if we fail to write this file since it's
-				// used only as one of the retry fallbacks.
-			}
-		}
-		
-		/// <summary>Writes &amp; syncs to the Directory dir, taking care to
-		/// remove the segments file on exception 
-		/// </summary>
-		public /*internal*/ void  Commit(Directory dir)
-		{
-			PrepareCommit(dir);
-			FinishCommit(dir);
-		}
-		
-		public System.String SegString(Directory directory)
-		{
-			lock (this)
-			{
-				var buffer = new System.Text.StringBuilder();
-				int count = Count;
-				for (int i = 0; i < count; i++)
-				{
-					if (i > 0)
-					{
-						buffer.Append(' ');
-					}
-					SegmentInfo info = Info(i);
-					buffer.Append(info.SegString(directory));
-					if (info.dir != directory)
-						buffer.Append("**");
-				}
-				return buffer.ToString();
-			}
-		}
+                }
+            }
+            return files;
+        }
+        
+        internal void  FinishCommit(Directory dir)
+        {
+            if (pendingSegnOutput == null)
+                throw new System.SystemException("prepareCommit was not called");
+            bool success = false;
+            try
+            {
+                pendingSegnOutput.FinishCommit();
+                pendingSegnOutput.Close();
+                pendingSegnOutput = null;
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                    RollbackCommit(dir);
+            }
+            
+            // NOTE: if we crash here, we have left a segments_N
+            // file in the directory in a possibly corrupt state (if
+            // some bytes made it to stable storage and others
+            // didn't).  But, the segments_N file includes checksum
+            // at the end, which should catch this case.  So when a
+            // reader tries to read it, it will throw a
+            // CorruptIndexException, which should cause the retry
+            // logic in SegmentInfos to kick in and load the last
+            // good (previous) segments_N-1 file.
+            
+            System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
+            success = false;
+            try
+            {
+                dir.Sync(fileName);
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    try
+                    {
+                        dir.DeleteFile(fileName);
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                }
+            }
+            
+            lastGeneration = generation;
+            
+            try
+            {
+                IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN);
+                try
+                {
+                    genOutput.WriteInt(FORMAT_LOCKLESS);
+                    genOutput.WriteLong(generation);
+                    genOutput.WriteLong(generation);
+                }
+                finally
+                {
+                    genOutput.Close();
+                }
+            }
+            catch (System.Exception)
+            {
+                // It's OK if we fail to write this file since it's
+                // used only as one of the retry fallbacks.
+            }
+        }
+        
+        /// <summary>Writes &amp; syncs to the Directory dir, taking care to
+        /// remove the segments file on exception 
+        /// </summary>
+        public /*internal*/ void  Commit(Directory dir)
+        {
+            PrepareCommit(dir);
+            FinishCommit(dir);
+        }
+        
+        public System.String SegString(Directory directory)
+        {
+            lock (this)
+            {
+                var buffer = new System.Text.StringBuilder();
+                int count = Count;
+                for (int i = 0; i < count; i++)
+                {
+                    if (i > 0)
+                    {
+                        buffer.Append(' ');
+                    }
+                    SegmentInfo info = Info(i);
+                    buffer.Append(info.SegString(directory));
+                    if (info.dir != directory)
+                        buffer.Append("**");
+                }
+                return buffer.ToString();
+            }
+        }
 
-	    public IDictionary<string, string> UserData
-	    {
-	        get { return userData; }
-	        internal set {
-	            userData = value ?? new HashMap<string, string>();
-	        }
-	    }
+        public IDictionary<string, string> UserData
+        {
+            get { return userData; }
+            internal set {
+                userData = value ?? new HashMap<string, string>();
+            }
+        }
 
-	    /// <summary>Replaces all segments in this instance, but keeps
-		/// generation, version, counter so that future commits
-		/// remain write once.
-		/// </summary>
-		internal void  Replace(SegmentInfos other)
-		{
-			Clear();
-			AddRange(other);
-			lastGeneration = other.lastGeneration;
-		}
-		
-		// Used only for testing
-		public bool HasExternalSegments(Directory dir)
-		{
-			int numSegments = Count;
-			for (int i = 0; i < numSegments; i++)
-				if (Info(i).dir != dir)
-					return true;
-			return false;
+        /// <summary>Replaces all segments in this instance, but keeps
+        /// generation, version, counter so that future commits
+        /// remain write once.
+        /// </summary>
+        internal void  Replace(SegmentInfos other)
+        {
+            Clear();
+            AddRange(other);
+            lastGeneration = other.lastGeneration;
+        }
+        
+        // Used only for testing
+        public bool HasExternalSegments(Directory dir)
+        {
+            int numSegments = Count;
+            for (int i = 0; i < numSegments; i++)
+                if (Info(i).dir != dir)
+                    return true;
+            return false;
         }
 
         #region Lucene.NET (Equals & GetHashCode )
@@ -1070,5 +1070,5 @@ namespace Lucene.Net.Index
             return h;
         }
         #endregion
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentMergeInfo.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentMergeInfo.cs b/src/core/Index/SegmentMergeInfo.cs
index bad0aad..9bfa92a 100644
--- a/src/core/Index/SegmentMergeInfo.cs
+++ b/src/core/Index/SegmentMergeInfo.cs
@@ -19,77 +19,77 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class SegmentMergeInfo : IDisposable
-	{
-		internal Term term;
-		internal int base_Renamed;
-		internal int ord; // the position of the segment in a MultiReader
-		internal TermEnum termEnum;
-		internal IndexReader reader;
-		internal int delCount;
-		private TermPositions postings; // use getPositions()
-		private int[] docMap; // use getDocMap()
+    
+    sealed class SegmentMergeInfo : IDisposable
+    {
+        internal Term term;
+        internal int base_Renamed;
+        internal int ord; // the position of the segment in a MultiReader
+        internal TermEnum termEnum;
+        internal IndexReader reader;
+        internal int delCount;
+        private TermPositions postings; // use getPositions()
+        private int[] docMap; // use getDocMap()
 
-	    private bool isDisposed;
-		
-		internal SegmentMergeInfo(int b, TermEnum te, IndexReader r)
-		{
-			base_Renamed = b;
-			reader = r;
-			termEnum = te;
-			term = te.Term;
-		}
-		
-		// maps around deleted docs
-		internal int[] GetDocMap()
-		{
-			if (docMap == null)
-			{
-				delCount = 0;
-				// build array which maps document numbers around deletions 
-				if (reader.HasDeletions)
-				{
-					int maxDoc = reader.MaxDoc;
-					docMap = new int[maxDoc];
-					int j = 0;
-					for (int i = 0; i < maxDoc; i++)
-					{
-						if (reader.IsDeleted(i))
-						{
-							delCount++;
-							docMap[i] = - 1;
-						}
-						else
-							docMap[i] = j++;
-					}
-				}
-			}
-			return docMap;
-		}
-		
-		internal TermPositions GetPositions()
-		{
-			if (postings == null)
-			{
-				postings = reader.TermPositions();
-			}
-			return postings;
-		}
-		
-		internal bool Next()
-		{
-			if (termEnum.Next())
-			{
-				term = termEnum.Term;
-				return true;
-			}
-			else
-			{
-				term = null;
-				return false;
-			}
-		}
+        private bool isDisposed;
+        
+        internal SegmentMergeInfo(int b, TermEnum te, IndexReader r)
+        {
+            base_Renamed = b;
+            reader = r;
+            termEnum = te;
+            term = te.Term;
+        }
+        
+        // maps around deleted docs
+        internal int[] GetDocMap()
+        {
+            if (docMap == null)
+            {
+                delCount = 0;
+                // build array which maps document numbers around deletions 
+                if (reader.HasDeletions)
+                {
+                    int maxDoc = reader.MaxDoc;
+                    docMap = new int[maxDoc];
+                    int j = 0;
+                    for (int i = 0; i < maxDoc; i++)
+                    {
+                        if (reader.IsDeleted(i))
+                        {
+                            delCount++;
+                            docMap[i] = - 1;
+                        }
+                        else
+                            docMap[i] = j++;
+                    }
+                }
+            }
+            return docMap;
+        }
+        
+        internal TermPositions GetPositions()
+        {
+            if (postings == null)
+            {
+                postings = reader.TermPositions();
+            }
+            return postings;
+        }
+        
+        internal bool Next()
+        {
+            if (termEnum.Next())
+            {
+                term = termEnum.Term;
+                return true;
+            }
+            else
+            {
+                term = null;
+                return false;
+            }
+        }
 
         public void Dispose()
         {
@@ -104,5 +104,5 @@ namespace Lucene.Net.Index
 
             isDisposed = true;
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentMergeQueue.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentMergeQueue.cs b/src/core/Index/SegmentMergeQueue.cs
index 1b48584..25cbf13 100644
--- a/src/core/Index/SegmentMergeQueue.cs
+++ b/src/core/Index/SegmentMergeQueue.cs
@@ -20,28 +20,28 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class SegmentMergeQueue : PriorityQueue<SegmentMergeInfo>, IDisposable
-	{
-		internal SegmentMergeQueue(int size)
-		{
-			Initialize(size);
-		}
+    
+    sealed class SegmentMergeQueue : PriorityQueue<SegmentMergeInfo>, IDisposable
+    {
+        internal SegmentMergeQueue(int size)
+        {
+            Initialize(size);
+        }
 
         public override bool LessThan(SegmentMergeInfo stiA, SegmentMergeInfo stiB)
-		{
-			int comparison = stiA.term.CompareTo(stiB.term);
-			if (comparison == 0)
-				return stiA.base_Renamed < stiB.base_Renamed;
-			else
-				return comparison < 0;
-		}
+        {
+            int comparison = stiA.term.CompareTo(stiB.term);
+            if (comparison == 0)
+                return stiA.base_Renamed < stiB.base_Renamed;
+            else
+                return comparison < 0;
+        }
 
-	    public void Dispose()
-	    {
+        public void Dispose()
+        {
             // Move to protected method if class becomes unsealed
             while (Top() != null)
                 Pop().Dispose();
-	    }
-	}
+        }
+    }
 }
\ No newline at end of file


[09/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriter.cs b/src/core/Index/IndexWriter.cs
index dda1738..049d821 100644
--- a/src/core/Index/IndexWriter.cs
+++ b/src/core/Index/IndexWriter.cs
@@ -33,109 +33,109 @@ using Similarity = Lucene.Net.Search.Similarity;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>An <c>IndexWriter</c> creates and maintains an index.
-	/// <p/>The <c>create</c> argument to the 
+    
+    /// <summary>An <c>IndexWriter</c> creates and maintains an index.
+    /// <p/>The <c>create</c> argument to the 
     /// <see cref="IndexWriter(Directory, Analyzer, bool, MaxFieldLength)">constructor</see> determines 
-	/// whether a new index is created, or whether an existing index is
-	/// opened.  Note that you can open an index with <c>create=true</c>
-	/// even while readers are using the index.  The old readers will 
-	/// continue to search the "point in time" snapshot they had opened, 
-	/// and won't see the newly created index until they re-open.  There are
-	/// also <see cref="IndexWriter(Directory, Analyzer, MaxFieldLength)">constructors</see>
-	/// with no <c>create</c> argument which will create a new index
-	/// if there is not already an index at the provided path and otherwise 
-	/// open the existing index.<p/>
-	/// <p/>In either case, documents are added with <see cref="AddDocument(Document)" />
-	/// and removed with <see cref="DeleteDocuments(Term)" /> or
-	/// <see cref="DeleteDocuments(Query)" />. A document can be updated with
-	/// <see cref="UpdateDocument(Term, Document)" /> (which just deletes
-	/// and then adds the entire document). When finished adding, deleting 
-	/// and updating documents, <see cref="Close()" /> should be called.<p/>
-	/// <a name="flush"></a>
-	/// <p/>These changes are buffered in memory and periodically
-	/// flushed to the <see cref="Directory" /> (during the above method
-	/// calls).  A flush is triggered when there are enough
-	/// buffered deletes (see <see cref="SetMaxBufferedDeleteTerms" />)
-	/// or enough added documents since the last flush, whichever
-	/// is sooner.  For the added documents, flushing is triggered
-	/// either by RAM usage of the documents (see 
-	/// <see cref="SetRAMBufferSizeMB" />) or the number of added documents.
-	/// The default is to flush when RAM usage hits 16 MB.  For
-	/// best indexing speed you should flush by RAM usage with a
-	/// large RAM buffer.  Note that flushing just moves the
-	/// internal buffered state in IndexWriter into the index, but
-	/// these changes are not visible to IndexReader until either
-	/// <see cref="Commit()" /> or <see cref="Close()" /> is called.  A flush may
-	/// also trigger one or more segment merges which by default
-	/// run with a background thread so as not to block the
-	/// addDocument calls (see <a href="#mergePolicy">below</a>
-	/// for changing the <see cref="MergeScheduler" />).
-	/// <p/>
-	/// If an index will not have more documents added for a while and optimal search
-	/// performance is desired, then either the full <see cref="Optimize()" />
-	/// method or partial <see cref="Optimize(int)" /> method should be
-	/// called before the index is closed.
-	/// <p/>
-	/// Opening an <c>IndexWriter</c> creates a lock file for the directory in use. Trying to open
-	/// another <c>IndexWriter</c> on the same directory will lead to a
-	/// <see cref="LockObtainFailedException" />. The <see cref="LockObtainFailedException" />
-	/// is also thrown if an IndexReader on the same directory is used to delete documents
-	/// from the index.<p/>
-	/// </summary>
-	/// <summary><a name="deletionPolicy"></a>
-	/// <p/>Expert: <c>IndexWriter</c> allows an optional
-	/// <see cref="IndexDeletionPolicy" /> implementation to be
-	/// specified.  You can use this to control when prior commits
-	/// are deleted from the index.  The default policy is <see cref="KeepOnlyLastCommitDeletionPolicy" />
-	/// which removes all prior
-	/// commits as soon as a new commit is done (this matches
-	/// behavior before 2.2).  Creating your own policy can allow
-	/// you to explicitly keep previous "point in time" commits
-	/// alive in the index for some time, to allow readers to
-	/// refresh to the new commit without having the old commit
-	/// deleted out from under them.  This is necessary on
-	/// filesystems like NFS that do not support "delete on last
-	/// close" semantics, which Lucene's "point in time" search
-	/// normally relies on. <p/>
-	/// <a name="mergePolicy"></a> <p/>Expert:
-	/// <c>IndexWriter</c> allows you to separately change
-	/// the <see cref="MergePolicy" /> and the <see cref="MergeScheduler" />.
-	/// The <see cref="MergePolicy" /> is invoked whenever there are
-	/// changes to the segments in the index.  Its role is to
-	/// select which merges to do, if any, and return a <see cref="Index.MergePolicy.MergeSpecification" />
-	/// describing the merges.  It
-	/// also selects merges to do for optimize().  (The default is
-	/// <see cref="LogByteSizeMergePolicy" />.  Then, the <see cref="MergeScheduler" />
-	/// is invoked with the requested merges and
-	/// it decides when and how to run the merges.  The default is
-	/// <see cref="ConcurrentMergeScheduler" />. <p/>
-	/// <a name="OOME"></a><p/><b>NOTE</b>: if you hit an
-	/// OutOfMemoryError then IndexWriter will quietly record this
-	/// fact and block all future segment commits.  This is a
-	/// defensive measure in case any internal state (buffered
-	/// documents and deletions) were corrupted.  Any subsequent
-	/// calls to <see cref="Commit()" /> will throw an
-	/// IllegalStateException.  The only course of action is to
-	/// call <see cref="Close()" />, which internally will call <see cref="Rollback()" />
-	///, to undo any changes to the index since the
-	/// last commit.  You can also just call <see cref="Rollback()" />
-	/// directly.<p/>
-	/// <a name="thread-safety"></a><p/><b>NOTE</b>: 
+    /// whether a new index is created, or whether an existing index is
+    /// opened.  Note that you can open an index with <c>create=true</c>
+    /// even while readers are using the index.  The old readers will 
+    /// continue to search the "point in time" snapshot they had opened, 
+    /// and won't see the newly created index until they re-open.  There are
+    /// also <see cref="IndexWriter(Directory, Analyzer, MaxFieldLength)">constructors</see>
+    /// with no <c>create</c> argument which will create a new index
+    /// if there is not already an index at the provided path and otherwise 
+    /// open the existing index.<p/>
+    /// <p/>In either case, documents are added with <see cref="AddDocument(Document)" />
+    /// and removed with <see cref="DeleteDocuments(Term)" /> or
+    /// <see cref="DeleteDocuments(Query)" />. A document can be updated with
+    /// <see cref="UpdateDocument(Term, Document)" /> (which just deletes
+    /// and then adds the entire document). When finished adding, deleting 
+    /// and updating documents, <see cref="Close()" /> should be called.<p/>
+    /// <a name="flush"></a>
+    /// <p/>These changes are buffered in memory and periodically
+    /// flushed to the <see cref="Directory" /> (during the above method
+    /// calls).  A flush is triggered when there are enough
+    /// buffered deletes (see <see cref="SetMaxBufferedDeleteTerms" />)
+    /// or enough added documents since the last flush, whichever
+    /// is sooner.  For the added documents, flushing is triggered
+    /// either by RAM usage of the documents (see 
+    /// <see cref="SetRAMBufferSizeMB" />) or the number of added documents.
+    /// The default is to flush when RAM usage hits 16 MB.  For
+    /// best indexing speed you should flush by RAM usage with a
+    /// large RAM buffer.  Note that flushing just moves the
+    /// internal buffered state in IndexWriter into the index, but
+    /// these changes are not visible to IndexReader until either
+    /// <see cref="Commit()" /> or <see cref="Close()" /> is called.  A flush may
+    /// also trigger one or more segment merges which by default
+    /// run with a background thread so as not to block the
+    /// addDocument calls (see <a href="#mergePolicy">below</a>
+    /// for changing the <see cref="MergeScheduler" />).
+    /// <p/>
+    /// If an index will not have more documents added for a while and optimal search
+    /// performance is desired, then either the full <see cref="Optimize()" />
+    /// method or partial <see cref="Optimize(int)" /> method should be
+    /// called before the index is closed.
+    /// <p/>
+    /// Opening an <c>IndexWriter</c> creates a lock file for the directory in use. Trying to open
+    /// another <c>IndexWriter</c> on the same directory will lead to a
+    /// <see cref="LockObtainFailedException" />. The <see cref="LockObtainFailedException" />
+    /// is also thrown if an IndexReader on the same directory is used to delete documents
+    /// from the index.<p/>
+    /// </summary>
+    /// <summary><a name="deletionPolicy"></a>
+    /// <p/>Expert: <c>IndexWriter</c> allows an optional
+    /// <see cref="IndexDeletionPolicy" /> implementation to be
+    /// specified.  You can use this to control when prior commits
+    /// are deleted from the index.  The default policy is <see cref="KeepOnlyLastCommitDeletionPolicy" />
+    /// which removes all prior
+    /// commits as soon as a new commit is done (this matches
+    /// behavior before 2.2).  Creating your own policy can allow
+    /// you to explicitly keep previous "point in time" commits
+    /// alive in the index for some time, to allow readers to
+    /// refresh to the new commit without having the old commit
+    /// deleted out from under them.  This is necessary on
+    /// filesystems like NFS that do not support "delete on last
+    /// close" semantics, which Lucene's "point in time" search
+    /// normally relies on. <p/>
+    /// <a name="mergePolicy"></a> <p/>Expert:
+    /// <c>IndexWriter</c> allows you to separately change
+    /// the <see cref="MergePolicy" /> and the <see cref="MergeScheduler" />.
+    /// The <see cref="MergePolicy" /> is invoked whenever there are
+    /// changes to the segments in the index.  Its role is to
+    /// select which merges to do, if any, and return a <see cref="Index.MergePolicy.MergeSpecification" />
+    /// describing the merges.  It
+    /// also selects merges to do for optimize().  (The default is
+    /// <see cref="LogByteSizeMergePolicy" />.  Then, the <see cref="MergeScheduler" />
+    /// is invoked with the requested merges and
+    /// it decides when and how to run the merges.  The default is
+    /// <see cref="ConcurrentMergeScheduler" />. <p/>
+    /// <a name="OOME"></a><p/><b>NOTE</b>: if you hit an
+    /// OutOfMemoryError then IndexWriter will quietly record this
+    /// fact and block all future segment commits.  This is a
+    /// defensive measure in case any internal state (buffered
+    /// documents and deletions) were corrupted.  Any subsequent
+    /// calls to <see cref="Commit()" /> will throw an
+    /// IllegalStateException.  The only course of action is to
+    /// call <see cref="Close()" />, which internally will call <see cref="Rollback()" />
+    ///, to undo any changes to the index since the
+    /// last commit.  You can also just call <see cref="Rollback()" />
+    /// directly.<p/>
+    /// <a name="thread-safety"></a><p/><b>NOTE</b>: 
     /// <see cref="IndexWriter" /> instances are completely thread
-	/// safe, meaning multiple threads can call any of its
-	/// methods, concurrently.  If your application requires
-	/// external synchronization, you should <b>not</b>
-	/// synchronize on the <c>IndexWriter</c> instance as
-	/// this may cause deadlock; use your own (non-Lucene) objects
-	/// instead. <p/>
-	/// <b>NOTE:</b> if you call
-	/// <c>Thread.Interrupt()</c> on a thread that's within
-	/// IndexWriter, IndexWriter will try to catch this (eg, if
-	/// it's in a Wait() or Thread.Sleep()), and will then throw
-	/// the unchecked exception <see cref="System.Threading.ThreadInterruptedException"/>
-	/// and <b>clear</b> the interrupt status on the thread<p/>
-	/// </summary>
+    /// safe, meaning multiple threads can call any of its
+    /// methods, concurrently.  If your application requires
+    /// external synchronization, you should <b>not</b>
+    /// synchronize on the <c>IndexWriter</c> instance as
+    /// this may cause deadlock; use your own (non-Lucene) objects
+    /// instead. <p/>
+    /// <b>NOTE:</b> if you call
+    /// <c>Thread.Interrupt()</c> on a thread that's within
+    /// IndexWriter, IndexWriter will try to catch this (eg, if
+    /// it's in a Wait() or Thread.Sleep()), and will then throw
+    /// the unchecked exception <see cref="System.Threading.ThreadInterruptedException"/>
+    /// and <b>clear</b> the interrupt status on the thread<p/>
+    /// </summary>
 
     /*
     * Clarification: Check Points (and commits)
@@ -158,367 +158,367 @@ namespace Lucene.Net.Index
     * keeps track of the last non commit checkpoint.
     */
     public class IndexWriter : System.IDisposable
-	{
-		private void  InitBlock()
-		{
-			similarity = Search.Similarity.Default;
-			mergePolicy = new LogByteSizeMergePolicy(this);
-			readerPool = new ReaderPool(this);
-		}
-		
-		/// <summary> Default value for the write lock timeout (1,000).</summary>
-		/// <seealso cref="DefaultWriteLockTimeout">
-		/// </seealso>
-		public static long WRITE_LOCK_TIMEOUT = 1000;
-		
-		private long writeLockTimeout = WRITE_LOCK_TIMEOUT;
-		
-		/// <summary> Name of the write lock in the index.</summary>
-		public const System.String WRITE_LOCK_NAME = "write.lock";
-		
-		/// <summary> Value to denote a flush trigger is disabled</summary>
-		public const int DISABLE_AUTO_FLUSH = - 1;
-		
-		/// <summary> Disabled by default (because IndexWriter flushes by RAM usage
-		/// by default). Change using <see cref="SetMaxBufferedDocs(int)" />.
-		/// </summary>
-		public static readonly int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
-		
-		/// <summary> Default value is 16 MB (which means flush when buffered
-		/// docs consume 16 MB RAM).  Change using <see cref="SetRAMBufferSizeMB" />.
-		/// </summary>
-		public const double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
-		
-		/// <summary> Disabled by default (because IndexWriter flushes by RAM usage
-		/// by default). Change using <see cref="SetMaxBufferedDeleteTerms(int)" />.
-		/// </summary>
-		public static readonly int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
-		
-		/// <summary> Default value is 10,000. Change using <see cref="SetMaxFieldLength(int)" />.</summary>
-		public const int DEFAULT_MAX_FIELD_LENGTH = 10000;
-		
-		/// <summary> Default value is 128. Change using <see cref="TermIndexInterval" />.</summary>
-		public const int DEFAULT_TERM_INDEX_INTERVAL = 128;
-		
-		/// <summary> Absolute hard maximum length for a term.  If a term
-		/// arrives from the analyzer longer than this length, it
-		/// is skipped and a message is printed to infoStream, if
-		/// set (see <see cref="SetInfoStream" />).
-		/// </summary>
-		public static readonly int MAX_TERM_LENGTH;
-		
-		// The normal read buffer size defaults to 1024, but
-		// increasing this during merging seems to yield
-		// performance gains.  However we don't want to increase
-		// it too much because there are quite a few
-		// BufferedIndexInputs created during merging.  See
-		// LUCENE-888 for details.
-		private const int MERGE_READ_BUFFER_SIZE = 4096;
-		
-		// Used for printing messages
-		private static System.Object MESSAGE_ID_LOCK = new System.Object();
-		private static int MESSAGE_ID = 0;
-		private int messageID = - 1;
-		private volatile bool hitOOM;
-		
-		private Directory directory; // where this index resides
-		private Analyzer analyzer; // how to analyze text
-		
-		private Similarity similarity; // how to normalize
-		
-		private volatile uint changeCount; // increments every time a change is completed
-		private long lastCommitChangeCount; // last changeCount that was committed
-		
-		private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
-		private HashMap<SegmentInfo, int?> rollbackSegments;
-		
-		internal volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
-		internal volatile uint pendingCommitChangeCount;
-		
-		private SegmentInfos localRollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
-		private int localFlushedDocCount; // saved docWriter.getFlushedDocCount during local transaction
-		
-		private SegmentInfos segmentInfos = new SegmentInfos(); // the segments
+    {
+        private void  InitBlock()
+        {
+            similarity = Search.Similarity.Default;
+            mergePolicy = new LogByteSizeMergePolicy(this);
+            readerPool = new ReaderPool(this);
+        }
+        
+        /// <summary> Default value for the write lock timeout (1,000).</summary>
+        /// <seealso cref="DefaultWriteLockTimeout">
+        /// </seealso>
+        public static long WRITE_LOCK_TIMEOUT = 1000;
+        
+        private long writeLockTimeout = WRITE_LOCK_TIMEOUT;
+        
+        /// <summary> Name of the write lock in the index.</summary>
+        public const System.String WRITE_LOCK_NAME = "write.lock";
+        
+        /// <summary> Value to denote a flush trigger is disabled</summary>
+        public const int DISABLE_AUTO_FLUSH = - 1;
+        
+        /// <summary> Disabled by default (because IndexWriter flushes by RAM usage
+        /// by default). Change using <see cref="SetMaxBufferedDocs(int)" />.
+        /// </summary>
+        public static readonly int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
+        
+        /// <summary> Default value is 16 MB (which means flush when buffered
+        /// docs consume 16 MB RAM).  Change using <see cref="SetRAMBufferSizeMB" />.
+        /// </summary>
+        public const double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
+        
+        /// <summary> Disabled by default (because IndexWriter flushes by RAM usage
+        /// by default). Change using <see cref="SetMaxBufferedDeleteTerms(int)" />.
+        /// </summary>
+        public static readonly int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
+        
+        /// <summary> Default value is 10,000. Change using <see cref="SetMaxFieldLength(int)" />.</summary>
+        public const int DEFAULT_MAX_FIELD_LENGTH = 10000;
+        
+        /// <summary> Default value is 128. Change using <see cref="TermIndexInterval" />.</summary>
+        public const int DEFAULT_TERM_INDEX_INTERVAL = 128;
+        
+        /// <summary> Absolute hard maximum length for a term.  If a term
+        /// arrives from the analyzer longer than this length, it
+        /// is skipped and a message is printed to infoStream, if
+        /// set (see <see cref="SetInfoStream" />).
+        /// </summary>
+        public static readonly int MAX_TERM_LENGTH;
+        
+        // The normal read buffer size defaults to 1024, but
+        // increasing this during merging seems to yield
+        // performance gains.  However we don't want to increase
+        // it too much because there are quite a few
+        // BufferedIndexInputs created during merging.  See
+        // LUCENE-888 for details.
+        private const int MERGE_READ_BUFFER_SIZE = 4096;
+        
+        // Used for printing messages
+        private static System.Object MESSAGE_ID_LOCK = new System.Object();
+        private static int MESSAGE_ID = 0;
+        private int messageID = - 1;
+        private volatile bool hitOOM;
+        
+        private Directory directory; // where this index resides
+        private Analyzer analyzer; // how to analyze text
+        
+        private Similarity similarity; // how to normalize
+        
+        private volatile uint changeCount; // increments every time a change is completed
+        private long lastCommitChangeCount; // last changeCount that was committed
+        
+        private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
+        private HashMap<SegmentInfo, int?> rollbackSegments;
+        
+        internal volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
+        internal volatile uint pendingCommitChangeCount;
+        
+        private SegmentInfos localRollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
+        private int localFlushedDocCount; // saved docWriter.getFlushedDocCount during local transaction
+        
+        private SegmentInfos segmentInfos = new SegmentInfos(); // the segments
         private int optimizeMaxNumSegments;
 
-		private DocumentsWriter docWriter;
-		private IndexFileDeleter deleter;
+        private DocumentsWriter docWriter;
+        private IndexFileDeleter deleter;
 
         private ISet<SegmentInfo> segmentsToOptimize = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<SegmentInfo>(); // used by optimize to note those needing optimization
-		
-		private Lock writeLock;
-		
-		private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
-		
-		private bool closed;
-		private bool closing;
-		
-		// Holds all SegmentInfo instances currently involved in
-		// merges
+        
+        private Lock writeLock;
+        
+        private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
+        
+        private bool closed;
+        private bool closing;
+        
+        // Holds all SegmentInfo instances currently involved in
+        // merges
         private HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
-		
-		private MergePolicy mergePolicy;
-		private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
+        
+        private MergePolicy mergePolicy;
+        private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
         private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
-		private ISet<MergePolicy.OneMerge> runningMerges = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<MergePolicy.OneMerge>();
-		private IList<MergePolicy.OneMerge> mergeExceptions = new List<MergePolicy.OneMerge>();
-		private long mergeGen;
-		private bool stopMerges;
-		
-		private int flushCount;
-		private int flushDeletesCount;
-		
-		// Used to only allow one addIndexes to proceed at once
-		// TODO: use ReadWriteLock once we are on 5.0
-		private int readCount; // count of how many threads are holding read lock
-		private ThreadClass writeThread; // non-null if any thread holds write lock
-		internal ReaderPool readerPool;
-		private int upgradeCount;
+        private ISet<MergePolicy.OneMerge> runningMerges = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<MergePolicy.OneMerge>();
+        private IList<MergePolicy.OneMerge> mergeExceptions = new List<MergePolicy.OneMerge>();
+        private long mergeGen;
+        private bool stopMerges;
+        
+        private int flushCount;
+        private int flushDeletesCount;
+        
+        // Used to only allow one addIndexes to proceed at once
+        // TODO: use ReadWriteLock once we are on 5.0
+        private int readCount; // count of how many threads are holding read lock
+        private ThreadClass writeThread; // non-null if any thread holds write lock
+        internal ReaderPool readerPool;
+        private int upgradeCount;
 
         private int readerTermsIndexDivisor = IndexReader.DEFAULT_TERMS_INDEX_DIVISOR;
-		
-		// This is a "write once" variable (like the organic dye
-		// on a DVD-R that may or may not be heated by a laser and
-		// then cooled to permanently record the event): it's
-		// false, until getReader() is called for the first time,
-		// at which point it's switched to true and never changes
-		// back to false.  Once this is true, we hold open and
-		// reuse SegmentReader instances internally for applying
-		// deletes, doing merges, and reopening near real-time
-		// readers.
-		private volatile bool poolReaders;
-		
-		/// <summary> Expert: returns a readonly reader, covering all committed as well as
-		/// un-committed changes to the index. This provides "near real-time"
-		/// searching, in that changes made during an IndexWriter session can be
-		/// quickly made available for searching without closing the writer nor
-		/// calling <see cref="Commit()" />.
-		/// 
-		/// <p/>
-		/// Note that this is functionally equivalent to calling {#commit} and then
-		/// using <see cref="IndexReader.Open(Lucene.Net.Store.Directory, bool)" /> to open a new reader. But the turarnound
-		/// time of this method should be faster since it avoids the potentially
-		/// costly <see cref="Commit()" />.
-		/// <p/>
-		/// 
+        
+        // This is a "write once" variable (like the organic dye
+        // on a DVD-R that may or may not be heated by a laser and
+        // then cooled to permanently record the event): it's
+        // false, until getReader() is called for the first time,
+        // at which point it's switched to true and never changes
+        // back to false.  Once this is true, we hold open and
+        // reuse SegmentReader instances internally for applying
+        // deletes, doing merges, and reopening near real-time
+        // readers.
+        private volatile bool poolReaders;
+        
+        /// <summary> Expert: returns a readonly reader, covering all committed as well as
+        /// un-committed changes to the index. This provides "near real-time"
+        /// searching, in that changes made during an IndexWriter session can be
+        /// quickly made available for searching without closing the writer nor
+        /// calling <see cref="Commit()" />.
+        /// 
+        /// <p/>
+        /// Note that this is functionally equivalent to calling {#commit} and then
+        /// using <see cref="IndexReader.Open(Lucene.Net.Store.Directory, bool)" /> to open a new reader. But the turarnound
+        /// time of this method should be faster since it avoids the potentially
+        /// costly <see cref="Commit()" />.
+        /// <p/>
+        /// 
         /// You must close the <see cref="IndexReader" /> returned by  this method once you are done using it.
         /// 
-		/// <p/>
-		/// It's <i>near</i> real-time because there is no hard
-		/// guarantee on how quickly you can get a new reader after
-		/// making changes with IndexWriter.  You'll have to
-		/// experiment in your situation to determine if it's
-		/// faster enough.  As this is a new and experimental
-		/// feature, please report back on your findings so we can
-		/// learn, improve and iterate.<p/>
-		/// 
-		/// <p/>The resulting reader suppports <see cref="IndexReader.Reopen()" />
-		///, but that call will simply forward
-		/// back to this method (though this may change in the
-		/// future).<p/>
-		/// 
-		/// <p/>The very first time this method is called, this
-		/// writer instance will make every effort to pool the
-		/// readers that it opens for doing merges, applying
-		/// deletes, etc.  This means additional resources (RAM,
-		/// file descriptors, CPU time) will be consumed.<p/>
-		/// 
-		/// <p/>For lower latency on reopening a reader, you should call <see cref="MergedSegmentWarmer" /> 
+        /// <p/>
+        /// It's <i>near</i> real-time because there is no hard
+        /// guarantee on how quickly you can get a new reader after
+        /// making changes with IndexWriter.  You'll have to
+        /// experiment in your situation to determine if it's
+        /// faster enough.  As this is a new and experimental
+        /// feature, please report back on your findings so we can
+        /// learn, improve and iterate.<p/>
+        /// 
+        /// <p/>The resulting reader suppports <see cref="IndexReader.Reopen()" />
+        ///, but that call will simply forward
+        /// back to this method (though this may change in the
+        /// future).<p/>
+        /// 
+        /// <p/>The very first time this method is called, this
+        /// writer instance will make every effort to pool the
+        /// readers that it opens for doing merges, applying
+        /// deletes, etc.  This means additional resources (RAM,
+        /// file descriptors, CPU time) will be consumed.<p/>
+        /// 
+        /// <p/>For lower latency on reopening a reader, you should call <see cref="MergedSegmentWarmer" /> 
         /// to call <see cref="MergedSegmentWarmer" /> to
-		/// pre-warm a newly merged segment before it's committed
-		/// to the index. This is important for minimizing index-to-search 
+        /// pre-warm a newly merged segment before it's committed
+        /// to the index. This is important for minimizing index-to-search 
         /// delay after a large merge.
-		/// 
-		/// <p/>If an addIndexes* call is running in another thread,
-		/// then this reader will only search those segments from
-		/// the foreign index that have been successfully copied
-		/// over, so far<p/>.
-		/// 
-		/// <p/><b>NOTE</b>: Once the writer is closed, any
-		/// outstanding readers may continue to be used.  However,
-		/// if you attempt to reopen any of those readers, you'll
-		/// hit an <see cref="AlreadyClosedException" />.<p/>
-		/// 
-		/// <p/><b>NOTE:</b> This API is experimental and might
-		/// change in incompatible ways in the next release.<p/>
-		/// 
-		/// </summary>
-		/// <returns> IndexReader that covers entire index plus all
-		/// changes made so far by this IndexWriter instance
-		/// 
-		/// </returns>
-		/// <throws>  IOException </throws>
-		public virtual IndexReader GetReader()
-		{
+        /// 
+        /// <p/>If an addIndexes* call is running in another thread,
+        /// then this reader will only search those segments from
+        /// the foreign index that have been successfully copied
+        /// over, so far<p/>.
+        /// 
+        /// <p/><b>NOTE</b>: Once the writer is closed, any
+        /// outstanding readers may continue to be used.  However,
+        /// if you attempt to reopen any of those readers, you'll
+        /// hit an <see cref="AlreadyClosedException" />.<p/>
+        /// 
+        /// <p/><b>NOTE:</b> This API is experimental and might
+        /// change in incompatible ways in the next release.<p/>
+        /// 
+        /// </summary>
+        /// <returns> IndexReader that covers entire index plus all
+        /// changes made so far by this IndexWriter instance
+        /// 
+        /// </returns>
+        /// <throws>  IOException </throws>
+        public virtual IndexReader GetReader()
+        {
             return GetReader(readerTermsIndexDivisor);
-		}
-		
-		/// <summary>Expert: like <see cref="GetReader()" />, except you can
-		/// specify which termInfosIndexDivisor should be used for
-		/// any newly opened readers.
-		/// </summary>
-		/// <param name="termInfosIndexDivisor">Subsambles which indexed
-		/// terms are loaded into RAM. This has the same effect as <see cref="IndexWriter.TermIndexInterval" />
-		/// except that setting
-		/// must be done at indexing time while this setting can be
-		/// set per reader.  When set to N, then one in every
-		/// N*termIndexInterval terms in the index is loaded into
-		/// memory.  By setting this to a value > 1 you can reduce
-		/// memory usage, at the expense of higher latency when
-		/// loading a TermInfo.  The default value is 1.  Set this
-		/// to -1 to skip loading the terms index entirely. 
-		/// </param>
-		public virtual IndexReader GetReader(int termInfosIndexDivisor)
-		{
+        }
+        
+        /// <summary>Expert: like <see cref="GetReader()" />, except you can
+        /// specify which termInfosIndexDivisor should be used for
+        /// any newly opened readers.
+        /// </summary>
+        /// <param name="termInfosIndexDivisor">Subsambles which indexed
+        /// terms are loaded into RAM. This has the same effect as <see cref="IndexWriter.TermIndexInterval" />
+        /// except that setting
+        /// must be done at indexing time while this setting can be
+        /// set per reader.  When set to N, then one in every
+        /// N*termIndexInterval terms in the index is loaded into
+        /// memory.  By setting this to a value > 1 you can reduce
+        /// memory usage, at the expense of higher latency when
+        /// loading a TermInfo.  The default value is 1.  Set this
+        /// to -1 to skip loading the terms index entirely. 
+        /// </param>
+        public virtual IndexReader GetReader(int termInfosIndexDivisor)
+        {
             EnsureOpen();
 
-			if (infoStream != null)
-			{
-				Message("flush at getReader");
-			}
-			
-			// Do this up front before flushing so that the readers
-			// obtained during this flush are pooled, the first time
-			// this method is called:
-			poolReaders = true;
-			
-			// Prevent segmentInfos from changing while opening the
-			// reader; in theory we could do similar retry logic,
-			// just like we do when loading segments_N
+            if (infoStream != null)
+            {
+                Message("flush at getReader");
+            }
+            
+            // Do this up front before flushing so that the readers
+            // obtained during this flush are pooled, the first time
+            // this method is called:
+            poolReaders = true;
+            
+            // Prevent segmentInfos from changing while opening the
+            // reader; in theory we could do similar retry logic,
+            // just like we do when loading segments_N
             IndexReader r;
-			lock (this)
-			{
+            lock (this)
+            {
                 Flush(false, true, true);
                 r = new ReadOnlyDirectoryReader(this, segmentInfos, termInfosIndexDivisor);
-			}
+            }
             MaybeMerge();
             return r;
-		}
-		
-		/// <summary>Holds shared SegmentReader instances. IndexWriter uses
-		/// SegmentReaders for 1) applying deletes, 2) doing
-		/// merges, 3) handing out a real-time reader.  This pool
-		/// reuses instances of the SegmentReaders in all these
-		/// places if it is in "near real-time mode" (getReader()
-		/// has been called on this instance). 
-		/// </summary>
-		
-		internal class ReaderPool : IDisposable
-		{
-			public ReaderPool(IndexWriter enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(IndexWriter enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private IndexWriter enclosingInstance;
-			public IndexWriter Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
+        }
+        
+        /// <summary>Holds shared SegmentReader instances. IndexWriter uses
+        /// SegmentReaders for 1) applying deletes, 2) doing
+        /// merges, 3) handing out a real-time reader.  This pool
+        /// reuses instances of the SegmentReaders in all these
+        /// places if it is in "near real-time mode" (getReader()
+        /// has been called on this instance). 
+        /// </summary>
+        
+        internal class ReaderPool : IDisposable
+        {
+            public ReaderPool(IndexWriter enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(IndexWriter enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private IndexWriter enclosingInstance;
+            public IndexWriter Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
 
             private IDictionary<SegmentInfo, SegmentReader> readerMap = new HashMap<SegmentInfo, SegmentReader>();
-			
-			/// <summary>Forcefully clear changes for the specifed segments,
-			/// and remove from the pool.   This is called on succesful merge. 
-			/// </summary>
-			internal virtual void  Clear(SegmentInfos infos)
-			{
-				lock (this)
-				{
-					if (infos == null)
-					{
+            
+            /// <summary>Forcefully clear changes for the specifed segments,
+            /// and remove from the pool.   This is called on succesful merge. 
+            /// </summary>
+            internal virtual void  Clear(SegmentInfos infos)
+            {
+                lock (this)
+                {
+                    if (infos == null)
+                    {
                         foreach(KeyValuePair<SegmentInfo, SegmentReader> ent in readerMap)
-						{
-							ent.Value.hasChanges = false;
-						}
-					}
-					else
-					{
+                        {
+                            ent.Value.hasChanges = false;
+                        }
+                    }
+                    else
+                    {
                         foreach(SegmentInfo info in infos)
-						{
-							if (readerMap.ContainsKey(info))
-							{
-								readerMap[info].hasChanges = false;
-							}
-						}
-					}
-				}
-			}
-			
-			// used only by asserts
-			public virtual bool InfoIsLive(SegmentInfo info)
-			{
-				lock (this)
-				{
-					int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
-					System.Diagnostics.Debug.Assert(idx != -1);
+                        {
+                            if (readerMap.ContainsKey(info))
+                            {
+                                readerMap[info].hasChanges = false;
+                            }
+                        }
+                    }
+                }
+            }
+            
+            // used only by asserts
+            public virtual bool InfoIsLive(SegmentInfo info)
+            {
+                lock (this)
+                {
+                    int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
+                    System.Diagnostics.Debug.Assert(idx != -1);
                     System.Diagnostics.Debug.Assert(Enclosing_Instance.segmentInfos[idx] == info);
-					return true;
-				}
-			}
-			
-			public virtual SegmentInfo MapToLive(SegmentInfo info)
-			{
-				lock (this)
-				{
-					int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
-					if (idx != - 1)
-					{
-						info = Enclosing_Instance.segmentInfos[idx];
-					}
-					return info;
-				}
-			}
-			
-			/// <summary> Release the segment reader (i.e. decRef it and close if there
-			/// are no more references.
-			/// </summary>
-			/// <param name="sr">
-			/// </param>
-			/// <throws>  IOException </throws>
-			public virtual void  Release(SegmentReader sr)
-			{
-				lock (this)
-				{
-					Release(sr, false);
-				}
-			}
-
-		    /// <summary> Release the segment reader (i.e. decRef it and close if there
-		    /// are no more references.
-		    /// </summary>
-		    /// <param name="sr">
-		    /// </param>
-		    /// <param name="drop"></param>
-		    /// <throws>  IOException </throws>
-		    public virtual void  Release(SegmentReader sr, bool drop)
-			{
-				lock (this)
-				{
-					
-					bool pooled = readerMap.ContainsKey(sr.SegmentInfo);
+                    return true;
+                }
+            }
+            
+            public virtual SegmentInfo MapToLive(SegmentInfo info)
+            {
+                lock (this)
+                {
+                    int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
+                    if (idx != - 1)
+                    {
+                        info = Enclosing_Instance.segmentInfos[idx];
+                    }
+                    return info;
+                }
+            }
+            
+            /// <summary> Release the segment reader (i.e. decRef it and close if there
+            /// are no more references.
+            /// </summary>
+            /// <param name="sr">
+            /// </param>
+            /// <throws>  IOException </throws>
+            public virtual void  Release(SegmentReader sr)
+            {
+                lock (this)
+                {
+                    Release(sr, false);
+                }
+            }
+
+            /// <summary> Release the segment reader (i.e. decRef it and close if there
+            /// are no more references.
+            /// </summary>
+            /// <param name="sr">
+            /// </param>
+            /// <param name="drop"></param>
+            /// <throws>  IOException </throws>
+            public virtual void  Release(SegmentReader sr, bool drop)
+            {
+                lock (this)
+                {
+                    
+                    bool pooled = readerMap.ContainsKey(sr.SegmentInfo);
 
                     System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.SegmentInfo] == sr);
 
                     // Drop caller's ref; for an external reader (not
                     // pooled), this decRef will close it
-					sr.DecRef();
-					
-					if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.RefCount == 1)))
-					{
+                    sr.DecRef();
+                    
+                    if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.RefCount == 1)))
+                    {
 
                         // We invoke deleter.checkpoint below, so we must be
                         // sync'd on IW if there are changes:
-						
+                        
                         // TODO: Java 1.5 has this, .NET can't.
-						// System.Diagnostics.Debug.Assert(!sr.hasChanges || Thread.holdsLock(enclosingInstance));
+                        // System.Diagnostics.Debug.Assert(!sr.hasChanges || Thread.holdsLock(enclosingInstance));
 
                         // Discard (don't save) changes when we are dropping
                         // the reader; this is used only on the sub-readers
@@ -526,9 +526,9 @@ namespace Lucene.Net.Index
                         sr.hasChanges &= !drop;
 
                         bool hasChanges = sr.hasChanges;
-						
-						// Drop our ref -- this will commit any pending
-						// changes to the dir
+                        
+                        // Drop our ref -- this will commit any pending
+                        // changes to the dir
                         sr.Close();
 
                         // We are the last ref to this reader; since we're
@@ -542,17 +542,17 @@ namespace Lucene.Net.Index
                             // file.
                             enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
                         }
-					}
-				}
-			}
+                    }
+                }
+            }
 
             /// <summary>Remove all our references to readers, and commits
             /// any pending changes. 
             /// </summary>
-		    public void Dispose()
-		    {
-		        Dispose(true);
-		    }
+            public void Dispose()
+            {
+                Dispose(true);
+            }
 
             protected void Dispose(bool disposing)
             {
@@ -593,359 +593,359 @@ namespace Lucene.Net.Index
                     }
                 }
             }
-			
-			/// <summary> Commit all segment reader in the pool.</summary>
-			/// <throws>  IOException </throws>
-			internal virtual void  Commit()
-			{
+            
+            /// <summary> Commit all segment reader in the pool.</summary>
+            /// <throws>  IOException </throws>
+            internal virtual void  Commit()
+            {
                 // We invoke deleter.checkpoint below, so we must be
                 // sync'd on IW:
                 // TODO: assert Thread.holdsLock(IndexWriter.this);
                 lock (this)
-				{
+                {
                     foreach(KeyValuePair<SegmentInfo,SegmentReader> ent in readerMap)
-					{
-						SegmentReader sr = ent.Value;
-						if (sr.hasChanges)
-						{
-							System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
-							sr.DoCommit(null);
+                    {
+                        SegmentReader sr = ent.Value;
+                        if (sr.hasChanges)
+                        {
+                            System.Diagnostics.Debug.Assert(InfoIsLive(sr.SegmentInfo));
+                            sr.DoCommit(null);
                             // Must checkpoint w/ deleter, because this
                             // segment reader will have created new _X_N.del
                             // file.
                             enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
-						}
-					}
-				}
-			}
-			
-			/// <summary> Returns a ref to a clone.  NOTE: this clone is not
-			/// enrolled in the pool, so you should simply close()
-			/// it when you're done (ie, do not call release()).
-			/// </summary>
-			public virtual SegmentReader GetReadOnlyClone(SegmentInfo info, bool doOpenStores, int termInfosIndexDivisor)
-			{
-				lock (this)
-				{
-					SegmentReader sr = Get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, termInfosIndexDivisor);
-					try
-					{
-						return (SegmentReader) sr.Clone(true);
-					}
-					finally
-					{
-						sr.DecRef();
-					}
-				}
-			}
-			
-			/// <summary> Obtain a SegmentReader from the readerPool.  The reader
-			/// must be returned by calling <see cref="Release(SegmentReader)" />
-			/// </summary>
-			/// <seealso cref="Release(SegmentReader)">
-			/// </seealso>
-			/// <param name="info">
-			/// </param>
-			/// <param name="doOpenStores">
-			/// </param>
-			/// <throws>  IOException </throws>
-			public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores)
-			{
-				lock (this)
-				{
+                        }
+                    }
+                }
+            }
+            
+            /// <summary> Returns a ref to a clone.  NOTE: this clone is not
+            /// enrolled in the pool, so you should simply close()
+            /// it when you're done (ie, do not call release()).
+            /// </summary>
+            public virtual SegmentReader GetReadOnlyClone(SegmentInfo info, bool doOpenStores, int termInfosIndexDivisor)
+            {
+                lock (this)
+                {
+                    SegmentReader sr = Get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, termInfosIndexDivisor);
+                    try
+                    {
+                        return (SegmentReader) sr.Clone(true);
+                    }
+                    finally
+                    {
+                        sr.DecRef();
+                    }
+                }
+            }
+            
+            /// <summary> Obtain a SegmentReader from the readerPool.  The reader
+            /// must be returned by calling <see cref="Release(SegmentReader)" />
+            /// </summary>
+            /// <seealso cref="Release(SegmentReader)">
+            /// </seealso>
+            /// <param name="info">
+            /// </param>
+            /// <param name="doOpenStores">
+            /// </param>
+            /// <throws>  IOException </throws>
+            public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores)
+            {
+                lock (this)
+                {
                     return Get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, enclosingInstance.readerTermsIndexDivisor);
-				}
-			}
-			/// <summary> Obtain a SegmentReader from the readerPool.  The reader
-			/// must be returned by calling <see cref="Release(SegmentReader)" />
-			/// 
-			/// </summary>
-			/// <seealso cref="Release(SegmentReader)">
-			/// </seealso>
-			/// <param name="info">
-			/// </param>
-			/// <param name="doOpenStores">
-			/// </param>
-			/// <param name="readBufferSize">
-			/// </param>
-			/// <param name="termsIndexDivisor">
-			/// </param>
-			/// <throws>  IOException </throws>
-			public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores, int readBufferSize, int termsIndexDivisor)
-			{
-				lock (this)
-				{
-					if (Enclosing_Instance.poolReaders)
-					{
-						readBufferSize = BufferedIndexInput.BUFFER_SIZE;
-					}
-					
-					SegmentReader sr = readerMap[info];
-					if (sr == null)
-					{
-						// TODO: we may want to avoid doing this while
-						// synchronized
-						// Returns a ref, which we xfer to readerMap:
-						sr = SegmentReader.Get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
+                }
+            }
+            /// <summary> Obtain a SegmentReader from the readerPool.  The reader
+            /// must be returned by calling <see cref="Release(SegmentReader)" />
+            /// 
+            /// </summary>
+            /// <seealso cref="Release(SegmentReader)">
+            /// </seealso>
+            /// <param name="info">
+            /// </param>
+            /// <param name="doOpenStores">
+            /// </param>
+            /// <param name="readBufferSize">
+            /// </param>
+            /// <param name="termsIndexDivisor">
+            /// </param>
+            /// <throws>  IOException </throws>
+            public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores, int readBufferSize, int termsIndexDivisor)
+            {
+                lock (this)
+                {
+                    if (Enclosing_Instance.poolReaders)
+                    {
+                        readBufferSize = BufferedIndexInput.BUFFER_SIZE;
+                    }
+                    
+                    SegmentReader sr = readerMap[info];
+                    if (sr == null)
+                    {
+                        // TODO: we may want to avoid doing this while
+                        // synchronized
+                        // Returns a ref, which we xfer to readerMap:
+                        sr = SegmentReader.Get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
                         if (info.dir == enclosingInstance.directory)
                         {
                             // Only pool if reader is not external
                             readerMap[info]=sr;
                         }
-					}
-					else
-					{
-						if (doOpenStores)
-						{
-							sr.OpenDocStores();
-						}
-						if (termsIndexDivisor != - 1 && !sr.TermsIndexLoaded())
-						{
-							// If this reader was originally opened because we
-							// needed to merge it, we didn't load the terms
-							// index.  But now, if the caller wants the terms
-							// index (eg because it's doing deletes, or an NRT
-							// reader is being opened) we ask the reader to
-							// load its terms index.
-							sr.LoadTermsIndex(termsIndexDivisor);
-						}
-					}
-					
-					// Return a ref to our caller
+                    }
+                    else
+                    {
+                        if (doOpenStores)
+                        {
+                            sr.OpenDocStores();
+                        }
+                        if (termsIndexDivisor != - 1 && !sr.TermsIndexLoaded())
+                        {
+                            // If this reader was originally opened because we
+                            // needed to merge it, we didn't load the terms
+                            // index.  But now, if the caller wants the terms
+                            // index (eg because it's doing deletes, or an NRT
+                            // reader is being opened) we ask the reader to
+                            // load its terms index.
+                            sr.LoadTermsIndex(termsIndexDivisor);
+                        }
+                    }
+                    
+                    // Return a ref to our caller
                     if (info.dir == enclosingInstance.directory)
                     {
                         // Only incRef if we pooled (reader is not external)
                         sr.IncRef();
                     }
-					return sr;
-				}
-			}
-			
-			// Returns a ref
-			public virtual SegmentReader GetIfExists(SegmentInfo info)
-			{
-				lock (this)
-				{
-					SegmentReader sr = readerMap[info];
-					if (sr != null)
-					{
-						sr.IncRef();
-					}
-					return sr;
-				}
-			}
-		}
-		
-		/// <summary> Obtain the number of deleted docs for a pooled reader.
-		/// If the reader isn't being pooled, the segmentInfo's 
-		/// delCount is returned.
-		/// </summary>
-		public virtual int NumDeletedDocs(SegmentInfo info)
-		{
-			SegmentReader reader = readerPool.GetIfExists(info);
-			try
-			{
-				if (reader != null)
-				{
-					return reader.NumDeletedDocs;
-				}
-				else
-				{
-					return info.GetDelCount();
-				}
-			}
-			finally
-			{
-				if (reader != null)
-				{
-					readerPool.Release(reader);
-				}
-			}
-		}
-		
-		internal virtual void  AcquireWrite()
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(writeThread != ThreadClass.Current());
-				while (writeThread != null || readCount > 0)
-					DoWait();
-				
-				// We could have been closed while we were waiting:
-				EnsureOpen();
-				
-				writeThread = ThreadClass.Current();
-			}
-		}
-		
-		internal virtual void  ReleaseWrite()
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(ThreadClass.Current() == writeThread);
-				writeThread = null;
-				System.Threading.Monitor.PulseAll(this);
-			}
-		}
-		
-		internal virtual void  AcquireRead()
-		{
-			lock (this)
-			{
-				ThreadClass current = ThreadClass.Current();
-				while (writeThread != null && writeThread != current)
-					DoWait();
-				
-				readCount++;
-			}
-		}
-		
-		// Allows one readLock to upgrade to a writeLock even if
-		// there are other readLocks as long as all other
-		// readLocks are also blocked in this method:
-		internal virtual void  UpgradeReadToWrite()
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(readCount > 0);
-				upgradeCount++;
-				while (readCount > upgradeCount || writeThread != null)
-				{
-					DoWait();
-				}
-				
-				writeThread = ThreadClass.Current();
-				readCount--;
-				upgradeCount--;
-			}
-		}
-		
-		internal virtual void  ReleaseRead()
-		{
-			lock (this)
-			{
-				readCount--;
-				System.Diagnostics.Debug.Assert(readCount >= 0);
-				System.Threading.Monitor.PulseAll(this);
-			}
-		}
-		
-		internal bool IsOpen(bool includePendingClose)
-		{
-			lock (this)
-			{
-				return !(closed || (includePendingClose && closing));
-			}
-		}
-		
-		/// <summary> Used internally to throw an <see cref="AlreadyClosedException" />
-		/// if this IndexWriter has been
-		/// closed.
-		/// </summary>
-		/// <throws>  AlreadyClosedException if this IndexWriter is </throws>
-		protected internal void  EnsureOpen(bool includePendingClose)
-		{
-			lock (this)
-			{
-				if (!IsOpen(includePendingClose))
-				{
-					throw new AlreadyClosedException("this IndexWriter is closed");
-				}
-			}
-		}
-		
-		protected internal void  EnsureOpen()
-		{
-			lock (this)
-			{
-				EnsureOpen(true);
-			}
-		}
-		
-		/// <summary> Prints a message to the infoStream (if non-null),
-		/// prefixed with the identifying information for this
-		/// writer and the thread that's calling it.
-		/// </summary>
-		public virtual void  Message(System.String message)
-		{
-			if (infoStream != null)
+                    return sr;
+                }
+            }
+            
+            // Returns a ref
+            public virtual SegmentReader GetIfExists(SegmentInfo info)
+            {
+                lock (this)
+                {
+                    SegmentReader sr = readerMap[info];
+                    if (sr != null)
+                    {
+                        sr.IncRef();
+                    }
+                    return sr;
+                }
+            }
+        }
+        
+        /// <summary> Obtain the number of deleted docs for a pooled reader.
+        /// If the reader isn't being pooled, the segmentInfo's 
+        /// delCount is returned.
+        /// </summary>
+        public virtual int NumDeletedDocs(SegmentInfo info)
+        {
+            SegmentReader reader = readerPool.GetIfExists(info);
+            try
+            {
+                if (reader != null)
+                {
+                    return reader.NumDeletedDocs;
+                }
+                else
+                {
+                    return info.GetDelCount();
+                }
+            }
+            finally
+            {
+                if (reader != null)
+                {
+                    readerPool.Release(reader);
+                }
+            }
+        }
+        
+        internal virtual void  AcquireWrite()
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(writeThread != ThreadClass.Current());
+                while (writeThread != null || readCount > 0)
+                    DoWait();
+                
+                // We could have been closed while we were waiting:
+                EnsureOpen();
+                
+                writeThread = ThreadClass.Current();
+            }
+        }
+        
+        internal virtual void  ReleaseWrite()
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(ThreadClass.Current() == writeThread);
+                writeThread = null;
+                System.Threading.Monitor.PulseAll(this);
+            }
+        }
+        
+        internal virtual void  AcquireRead()
+        {
+            lock (this)
+            {
+                ThreadClass current = ThreadClass.Current();
+                while (writeThread != null && writeThread != current)
+                    DoWait();
+                
+                readCount++;
+            }
+        }
+        
+        // Allows one readLock to upgrade to a writeLock even if
+        // there are other readLocks as long as all other
+        // readLocks are also blocked in this method:
+        internal virtual void  UpgradeReadToWrite()
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(readCount > 0);
+                upgradeCount++;
+                while (readCount > upgradeCount || writeThread != null)
+                {
+                    DoWait();
+                }
+                
+                writeThread = ThreadClass.Current();
+                readCount--;
+                upgradeCount--;
+            }
+        }
+        
+        internal virtual void  ReleaseRead()
+        {
+            lock (this)
+            {
+                readCount--;
+                System.Diagnostics.Debug.Assert(readCount >= 0);
+                System.Threading.Monitor.PulseAll(this);
+            }
+        }
+        
+        internal bool IsOpen(bool includePendingClose)
+        {
+            lock (this)
+            {
+                return !(closed || (includePendingClose && closing));
+            }
+        }
+        
+        /// <summary> Used internally to throw an <see cref="AlreadyClosedException" />
+        /// if this IndexWriter has been
+        /// closed.
+        /// </summary>
+        /// <throws>  AlreadyClosedException if this IndexWriter is </throws>
+        protected internal void  EnsureOpen(bool includePendingClose)
+        {
+            lock (this)
+            {
+                if (!IsOpen(includePendingClose))
+                {
+                    throw new AlreadyClosedException("this IndexWriter is closed");
+                }
+            }
+        }
+        
+        protected internal void  EnsureOpen()
+        {
+            lock (this)
+            {
+                EnsureOpen(true);
+            }
+        }
+        
+        /// <summary> Prints a message to the infoStream (if non-null),
+        /// prefixed with the identifying information for this
+        /// writer and the thread that's calling it.
+        /// </summary>
+        public virtual void  Message(System.String message)
+        {
+            if (infoStream != null)
                 infoStream.WriteLine("IW " + messageID + " [" + DateTime.Now.ToString() + "; " + ThreadClass.Current().Name + "]: " + message);
-		}
-		
-		private void  SetMessageID(System.IO.StreamWriter infoStream)
-		{
-			lock (this)
-			{
-				if (infoStream != null && messageID == - 1)
-				{
-					lock (MESSAGE_ID_LOCK)
-					{
-						messageID = MESSAGE_ID++;
-					}
-				}
-				this.infoStream = infoStream;
-			}
-		}
-
-	    /// <summary> Casts current mergePolicy to LogMergePolicy, and throws
-	    /// an exception if the mergePolicy is not a LogMergePolicy.
-	    /// </summary>
-	    private LogMergePolicy LogMergePolicy
-	    {
-	        get
-	        {
-	            if (mergePolicy is LogMergePolicy)
-	                return (LogMergePolicy) mergePolicy;
-
-	            throw new System.ArgumentException(
-	                "this method can only be called when the merge policy is the default LogMergePolicy");
-	        }
-	    }
-
-	    /// <summary><p/>Gets or sets the current setting of whether newly flushed
-	    /// segments will use the compound file format.  Note that
-	    /// this just returns the value previously set with
-	    /// setUseCompoundFile(boolean), or the default value
-	    /// (true).  You cannot use this to query the status of
-	    /// previously flushed segments.<p/>
-	    /// 
-	    /// <p/>Note that this method is a convenience method: it
-	    /// just calls mergePolicy.getUseCompoundFile as long as
-	    /// mergePolicy is an instance of <see cref="LogMergePolicy" />.
-	    /// Otherwise an IllegalArgumentException is thrown.<p/>
-	    /// 
-	    /// </summary>
-	    public virtual bool UseCompoundFile
-	    {
-	        get { return LogMergePolicy.GetUseCompoundFile(); }
-	        set
-	        {
-	            LogMergePolicy.SetUseCompoundFile(value);
-	            LogMergePolicy.SetUseCompoundDocStore(value);
-	        }
-	    }
-
-	    /// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
-		/// </summary>
-		public virtual void  SetSimilarity(Similarity similarity)
-		{
-			EnsureOpen();
-			this.similarity = similarity;
-			docWriter.SetSimilarity(similarity);
-		}
-
-	    /// <summary>Expert: Return the Similarity implementation used by this IndexWriter.
-	    /// 
-	    /// <p/>This defaults to the current value of <see cref="Search.Similarity.Default" />.
-	    /// </summary>
-	    public virtual Similarity Similarity
-	    {
-	        get
-	        {
-	            EnsureOpen();
-	            return this.similarity;
-	        }
-	    }
+        }
+        
+        private void  SetMessageID(System.IO.StreamWriter infoStream)
+        {
+            lock (this)
+            {
+                if (infoStream != null && messageID == - 1)
+                {
+                    lock (MESSAGE_ID_LOCK)
+                    {
+                        messageID = MESSAGE_ID++;
+                    }
+                }
+                this.infoStream = infoStream;
+            }
+        }
+
+        /// <summary> Casts current mergePolicy to LogMergePolicy, and throws
+        /// an exception if the mergePolicy is not a LogMergePolicy.
+        /// </summary>
+        private LogMergePolicy LogMergePolicy
+        {
+            get
+            {
+                if (mergePolicy is LogMergePolicy)
+                    return (LogMergePolicy) mergePolicy;
+
+                throw new System.ArgumentException(
+                    "this method can only be called when the merge policy is the default LogMergePolicy");
+            }
+        }
+
+        /// <summary><p/>Gets or sets the current setting of whether newly flushed
+        /// segments will use the compound file format.  Note that
+        /// this just returns the value previously set with
+        /// setUseCompoundFile(boolean), or the default value
+        /// (true).  You cannot use this to query the status of
+        /// previously flushed segments.<p/>
+        /// 
+        /// <p/>Note that this method is a convenience method: it
+        /// just calls mergePolicy.getUseCompoundFile as long as
+        /// mergePolicy is an instance of <see cref="LogMergePolicy" />.
+        /// Otherwise an IllegalArgumentException is thrown.<p/>
+        /// 
+        /// </summary>
+        public virtual bool UseCompoundFile
+        {
+            get { return LogMergePolicy.GetUseCompoundFile(); }
+            set
+            {
+                LogMergePolicy.SetUseCompoundFile(value);
+                LogMergePolicy.SetUseCompoundDocStore(value);
+            }
+        }
+
+        /// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
+        /// </summary>
+        public virtual void  SetSimilarity(Similarity similarity)
+        {
+            EnsureOpen();
+            this.similarity = similarity;
+            docWriter.SetSimilarity(similarity);
+        }
+
+        /// <summary>Expert: Return the Similarity implementation used by this IndexWriter.
+        /// 
+        /// <p/>This defaults to the current value of <see cref="Search.Similarity.Default" />.
+        /// </summary>
+        public virtual Similarity Similarity
+        {
+            get
+            {
+                EnsureOpen();
+                return this.similarity;
+            }
+        }
 
 
         /// <summary>Expert: Gets or sets the interval between indexed terms.  Large values cause less
@@ -970,361 +970,361 @@ namespace Lucene.Net.Index
         /// </summary>
         /// <seealso cref="DEFAULT_TERM_INDEX_INTERVAL">
         /// </seealso>
-	    public virtual int TermIndexInterval
-	    {
-	        get
-	        {
-	            // We pass false because this method is called by SegmentMerger while we are in the process of closing
-	            EnsureOpen(false);
-	            return termIndexInterval;
-	        }
-	        set
-	        {
-	            EnsureOpen();
-	            this.termIndexInterval = value;
-	        }
-	    }
-
-	    /// <summary> Constructs an IndexWriter for the index in <c>d</c>.
-		/// Text will be analyzed with <c>a</c>.  If <c>create</c>
-		/// is true, then a new, empty index will be created in
-		/// <c>d</c>, replacing the index already there, if any.
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="create"><c>true</c> to create the index or overwrite
-		/// the existing one; <c>false</c> to append to the existing
-		/// index
-		/// </param>
-		/// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
-		/// via the MaxFieldLength constructor.
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be read/written to, or </throws>
-		/// <summary>  if it does not exist and <c>create</c> is
-		/// <c>false</c> or if there is any other low-level
-		/// IO error
-		/// </summary>
-		public IndexWriter(Directory d, Analyzer a, bool create, MaxFieldLength mfl)
-		{
-			InitBlock();
-			Init(d, a, create, null, mfl.Limit, null, null);
-		}
-		
-		/// <summary> Constructs an IndexWriter for the index in
-		/// <c>d</c>, first creating it if it does not
-		/// already exist.  
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
-		/// via the MaxFieldLength constructor.
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be </throws>
-		/// <summary>  read/written to or if there is any other low-level
-		/// IO error
-		/// </summary>
-		public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
-		{
-			InitBlock();
-			Init(d, a, null, mfl.Limit, null, null);
-		}
-		
-		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
-		///, for the index in <c>d</c>,
-		/// first creating it if it does not already exist.  Text
-		/// will be analyzed with <c>a</c>.
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
-		/// </param>
-		/// <param name="mfl">whether or not to limit field lengths
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be </throws>
-		/// <summary>  read/written to or if there is any other low-level
-		/// IO error
-		/// </summary>
-		public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
-		{
-			InitBlock();
-			Init(d, a, deletionPolicy, mfl.Limit, null, null);
-		}
-		
-		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
-		///, for the index in <c>d</c>.
-		/// Text will be analyzed with <c>a</c>.  If
-		/// <c>create</c> is true, then a new, empty index
-		/// will be created in <c>d</c>, replacing the index
-		/// already there, if any.
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="create"><c>true</c> to create the index or overwrite
-		/// the existing one; <c>false</c> to append to the existing
-		/// index
-		/// </param>
-		/// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
-		/// </param>
-		/// <param name="mfl"><see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />, whether or not to limit field lengths.  Value is in number of terms/tokens
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be read/written to, or </throws>
-		/// <summary>  if it does not exist and <c>create</c> is
-		/// <c>false</c> or if there is any other low-level
-		/// IO error
-		/// </summary>
-		public IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
-		{
-			InitBlock();
-			Init(d, a, create, deletionPolicy, mfl.Limit, null, null);
-		}
-		
-		/// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
-		/// and <see cref="IndexingChain" />, 
-		/// for the index in <c>d</c>.
-		/// Text will be analyzed with <c>a</c>.  If
-		/// <c>create</c> is true, then a new, empty index
-		/// will be created in <c>d</c>, replacing the index
-		/// already there, if any.
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="create"><c>true</c> to create the index or overwrite
-		/// the existing one; <c>false</c> to append to the existing
-		/// index
-		/// </param>
-		/// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
-		/// </param>
-		/// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See <see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />.
-		/// </param>
-		/// <param name="indexingChain">the <see cref="DocConsumer" /> chain to be used to 
-		/// process documents
-		/// </param>
-		/// <param name="commit">which commit to open
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be read/written to, or </throws>
-		/// <summary>  if it does not exist and <c>create</c> is
-		/// <c>false</c> or if there is any other low-level
-		/// IO error
-		/// </summary>
-		internal IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
-		{
-			InitBlock();
-			Init(d, a, create, deletionPolicy, mfl.Limit, indexingChain, commit);
-		}
-		
-		/// <summary> Expert: constructs an IndexWriter on specific commit
-		/// point, with a custom <see cref="IndexDeletionPolicy" />, for
-		/// the index in <c>d</c>.  Text will be analyzed
-		/// with <c>a</c>.
-		/// 
-		/// <p/> This is only meaningful if you've used a <see cref="IndexDeletionPolicy" />
-		/// in that past that keeps more than
-		/// just the last commit.
-		/// 
-		/// <p/>This operation is similar to <see cref="Rollback()" />,
-		/// except that method can only rollback what's been done
-		/// with the current instance of IndexWriter since its last
-		/// commit, whereas this method can rollback to an
-		/// arbitrary commit point from the past, assuming the
-		/// <see cref="IndexDeletionPolicy" /> has preserved past
-		/// commits.
-		/// 
-		/// </summary>
-		/// <param name="d">the index directory
-		/// </param>
-		/// <param name="a">the analyzer to use
-		/// </param>
-		/// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
-		/// </param>
-		/// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See <see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />.
-		/// </param>
-		/// <param name="commit">which commit to open
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  LockObtainFailedException if another writer </throws>
-		/// <summary>  has this index open (<c>write.lock</c> could not
-		/// be obtained)
-		/// </summary>
-		/// <throws>  IOException if the directory cannot be read/written to, or </throws>
-		/// <summary>  if it does not exist and <c>create</c> is
-		/// <c>false</c> or if there is any other low-level
-		/// IO error
-		/// </summary>
-		public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
-		{
-			InitBlock();
-			Init(d, a, false, deletionPolicy, mfl.Limit, null, commit);
-		}
-		
-		private void  Init(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
-		{
-			if (IndexReader.IndexExists(d))
-			{
-				Init(d, a, false, deletionPolicy, maxFieldLength, indexingChain, commit);
-			}
-			else
-			{
-				Init(d, a, true, deletionPolicy, maxFieldLength, indexingChain, commit);
-			}
-		}
-		
-		private void  Init(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
-		{
-			directory = d;
-			analyzer = a;
-			SetMessageID(defaultInfoStream);
-			this.maxFieldLength = maxFieldLength;
-			
-			if (indexingChain == null)
-				indexingChain = DocumentsWriter.DefaultIndexingChain;
-			
-			if (create)
-			{
-				// Clear the write lock in case it's leftover:
-				directory.ClearLock(WRITE_LOCK_NAME);
-			}
-			
-			Lock writeLock = directory.MakeLock(WRITE_LOCK_NAME);
-			if (!writeLock.Obtain(writeLockTimeout))
-			// obtain write lock
-			{
-				throw new LockObtainFailedException("Index locked for write: " + writeLock);
-			}
-			this.writeLock = writeLock; // save it
-
-            bool success = false;
-			try
-			{
-				if (create)
-				{
-					// Try to read first.  This is to allow create
-					// against an index that's currently open for
-					// searching.  In this case we write the next
-					// segments_N file with no segments:
-					bool doCommit;
-					try
-					{
-						segmentInfos.Read(directory);
-						segmentInfos.Clear();
-						doCommit = false;
-					}
-					catch (System.IO.IOException)
-					{
-						// Likely this means it's a fresh directory
-						doCommit = true;
-					}
-					
-					if (doCommit)
-					{
-						// Only commit if there is no segments file 
-                        // in this dir already.
-						segmentInfos.Commit(directory);
-                        synced.UnionWith(segmentInfos.Files(directory, true));
-					}
-					else
-					{
-						// Record that we have a change (zero out all
-						// segments) pending:
-						changeCount++;
-					}
-				}
-				else
-				{
-					segmentInfos.Read(directory);
-					
-					if (commit != null)
-					{
-						// Swap out all segments, but, keep metadata in
-						// SegmentInfos, like version & generation, to
-						// preserve write-once.  This is important if
-						// readers are open against the future commit
-						// points.
-						if (commit.Directory != directory)
-							throw new System.ArgumentException("IndexCommit's directory doesn't match my directory");
-						SegmentInfos oldInfos = new SegmentInfos();
-						oldInfos.Read(directory, commit.SegmentsFileName);
-						segmentInfos.Replace(oldInfos);
-						changeCount++;
-						if (infoStream != null)
-							Message("init: loaded commit \"" + commit.SegmentsFileName + "\"");
-					}
-					
-					// We assume that this segments_N was previously
-					// properly sync'd:
-                    synced.UnionWith(segmentInfos.Files(directory, true));
-				}
-				
-				SetRollbackSegmentInfos(segmentInfos);
-				
-				docWriter = new DocumentsWriter(directory, this, indexingChain);
-				docWriter.SetInfoStream(infoStream);
-				docWriter.SetMaxFieldLength(maxFieldLength);
-				
-				// Default deleter (for backwards compatibility) is
-				// KeepOnlyLastCommitDeleter:
-				deleter = new IndexFileDeleter(directory, deletionPolicy == null?new KeepOnlyLastCommitDeletionPolicy():deletionPolicy, segmentInfos, infoStream, docWriter, synced);
-				
-				if (deleter.startingCommitDeleted)
-				// Deletion policy deleted the "head" commit point.
-				// We have to mark ourself as changed so that if we
-				// are closed w/o any further changes we write a new
-				// segments_N file.
-					changeCount++;
-				
-				PushMaxBufferedDocs();
-				
-				if (infoStream != null)
-				{
-					Message("init: create=" + create);
-					MessageState();
-				}
+        public virtual int TermIndexInterval
+        {
+            get
+            {
+                // We pass false because this method is called by SegmentMerger while we are in the process of closing
+                EnsureOpen(false);
+                return termIndexInterval;
+            }
+            set
+            {
+                EnsureOpen();
+                this.termIndexInterval = value;
+            }
+        }
 
-                success = true;
-			}
-			finally
-			{
-                if (!success)
-                {
+        /// <summary> Constructs an IndexWriter for the index in <c>d</c>.
+        /// Text will be analyzed with <c>a</c>.  If <c>create</c>
+        /// is true, then a new, empty index will be created in
+        /// <c>d</c>, replacing the index already there, if any.
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="create"><c>true</c> to create the index or overwrite
+        /// the existing one; <c>false</c> to append to the existing
+        /// index
+        /// </param>
+        /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+        /// via the MaxFieldLength constructor.
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  LockObtainFailedException if another writer </throws>
+        /// <summary>  has this index open (<c>write.lock</c> could not
+        /// be obtained)
+        /// </summary>
+        /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+        /// <summary>  if it does not exist and <c>create</c> is
+        /// <c>false</c> or if there is any other low-level
+        /// IO error
+        /// </summary>
+        public IndexWriter(Directory d, Analyzer a, bool create, MaxFieldLength mfl)
+        {
+            InitBlock();
+            Init(d, a, create, null, mfl.Limit, null, null);
+        }
+        
+        /// <summary> Constructs an IndexWriter for the index in
+        /// <c>d</c>, first creating it if it does not
+        /// already exist.  
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+        /// via the MaxFieldLength constructor.
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  LockObtainFailedException if another writer </throws>
+        /// <summary>  has this index open (<c>write.lock</c> could not
+        /// be obtained)
+        /// </summary>
+        /// <throws>  IOException if the directory cannot be </throws>
+        /// <summary>  read/written to or if there is any other low-level
+        /// IO error
+        /// </summary>
+        public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
+        {
+            InitBlock();
+            Init(d, a, null, mfl.Limit, null, null);
+        }
+        
+        /// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
+        ///, for the index in <c>d</c>,
+        /// first creating it if it does not already exist.  Text
+        /// will be analyzed with <c>a</c>.
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+        /// </param>
+        /// <param name="mfl">whether or not to limit field lengths
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  LockObtainFailedException if another writer </throws>
+        /// <summary>  has this index open (<c>write.lock</c> could not
+        /// be obtained)
+        /// </summary>
+        /// <throws>  IOException if the directory cannot be </throws>
+        /// <summary>  read/written to or if there is any other low-level
+        /// IO error
+        /// </summary>
+        public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+        {
+            InitBlock();
+            Init(d, a, deletionPolicy, mfl.Limit, null, null);
+        }
+        
+        /// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
+        ///, for the index in <c>d</c>.
+        /// Text will be analyzed with <c>a</c>.  If
+        /// <c>create</c> is true, then a new, empty index
+        /// will be created in <c>d</c>, replacing the index
+        /// already there, if any.
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="create"><c>true</c> to create the index or overwrite
+        /// the existing one; <c>false</c> to append to the existing
+        /// index
+        /// </param>
+        /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+        /// </param>
+        /// <param name="mfl"><see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />, whether or not to limit field lengths.  Value is in number of terms/tokens
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  LockObtainFailedException if another writer </throws>
+        /// <summary>  has this index open (<c>write.lock</c> could not
+        /// be obtained)
+        /// </summary>
+        /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+        /// <summary>  if it does not exist and <c>create</c> is
+        /// <c>false</c> or if there is any other low-level
+        /// IO error
+        /// </summary>
+        public IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+        {
+            InitBlock();
+            Init(d, a, create, deletionPolicy, mfl.Limit, null, null);
+        }
+        
+        /// <summary> Expert: constructs an IndexWriter with a custom <see cref="IndexDeletionPolicy" />
+        /// and <see cref="IndexingChain" />, 
+        /// for the index in <c>d</c>.
+        /// Text will be analyzed with <c>a</c>.  If
+        /// <c>create</c> is true, then a new, empty index
+        /// will be created in <c>d</c>, replacing the index
+        /// already there, if any.
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="create"><c>true</c> to create the index or overwrite
+        /// the existing one; <c>false</c> to append to the existing
+        /// index
+        /// </param>
+        /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+        /// </param>
+        /// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See <see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />.
+        /// </param>
+        /// <param name="indexingChain">the <see cref="DocConsumer" /> chain to be used to 
+        /// process documents
+        /// </param>
+        /// <param name="commit">which commit to open
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  LockObtainFailedException if another writer </throws>
+        /// <summary>  has this index open (<c>write.lock</c> could not
+        /// be obtained)
+        /// </summary>
+        /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+        /// <summary>  if it does not exist and <c>create</c> is
+        /// <c>false</c> or if there is any other low-level
+        /// IO error
+        /// </summary>
+        internal IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
+        {
+            InitBlock();
+            Init(d, a, create, deletionPolicy, mfl.Limit, indexingChain, commit);
+        }
+        
+        /// <summary> Expert: constructs an IndexWriter on specific commit
+        /// point, with a custom <see cref="IndexDeletionPolicy" />, for
+        /// the index in <c>d</c>.  Text will be analyzed
+        /// with <c>a</c>.
+        /// 
+        /// <p/> This is only meaningful if you've used a <see cref="IndexDeletionPolicy" />
+        /// in that past that keeps more than
+        /// just the last commit.
+        /// 
+        /// <p/>This operation is similar to <see cref="Rollback()" />,
+        /// except that method can only rollback what's been done
+        /// with the current instance of IndexWriter since its last
+        /// commit, whereas this method can rollback to an
+        /// arbitrary commit point from the past, assuming the
+        /// <see cref="IndexDeletionPolicy" /> has preserved past
+        /// commits.
+        /// 
+        /// </summary>
+        /// <param name="d">the index directory
+        /// </param>
+        /// <param name="a">the analyzer to use
+        /// </param>
+        /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+        /// </param>
+        /// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See <see cref="Lucene.Net.Index.IndexWriter.MaxFieldLength" />.
+        /// </param>
+        /// <param name="commit">which commit to open
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        ///

<TRUNCATED>

[20/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ByteSliceReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceReader.cs b/src/core/Index/ByteSliceReader.cs
index 8b672fe..c3d2044 100644
--- a/src/core/Index/ByteSliceReader.cs
+++ b/src/core/Index/ByteSliceReader.cs
@@ -22,164 +22,164 @@ using IndexOutput = Lucene.Net.Store.IndexOutput;
 
 namespace Lucene.Net.Index
 {
-	
-	/* IndexInput that knows how to read the byte slices written
-	* by Posting and PostingVector.  We read the bytes in
-	* each slice until we hit the end of that slice at which
-	* point we read the forwarding address of the next slice
-	* and then jump to it.*/
-	public sealed class ByteSliceReader : IndexInput
-	{
-		internal ByteBlockPool pool;
-		internal int bufferUpto;
-		internal byte[] buffer;
-		public int upto;
-		internal int limit;
-		internal int level;
-		public int bufferOffset;
-		
-		public int endIndex;
-		
-		public void  Init(ByteBlockPool pool, int startIndex, int endIndex)
-		{
-			
-			System.Diagnostics.Debug.Assert(endIndex - startIndex >= 0);
-			System.Diagnostics.Debug.Assert(startIndex >= 0);
-			System.Diagnostics.Debug.Assert(endIndex >= 0);
-			
-			this.pool = pool;
-			this.endIndex = endIndex;
-			
-			level = 0;
-			bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
-			bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
-			buffer = pool.buffers[bufferUpto];
-			upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
-			
-			int firstSize = ByteBlockPool.levelSizeArray[0];
-			
-			if (startIndex + firstSize >= endIndex)
-			{
-				// There is only this one slice to read
-				limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
-			}
-			else
-				limit = upto + firstSize - 4;
-		}
-		
-		public bool Eof()
-		{
-			System.Diagnostics.Debug.Assert(upto + bufferOffset <= endIndex);
-			return upto + bufferOffset == endIndex;
-		}
-		
-		public override byte ReadByte()
-		{
-			System.Diagnostics.Debug.Assert(!Eof());
-			System.Diagnostics.Debug.Assert(upto <= limit);
-			if (upto == limit)
-				NextSlice();
-			return buffer[upto++];
-		}
-		
-		public long WriteTo(IndexOutput @out)
-		{
-			long size = 0;
-			while (true)
-			{
-				if (limit + bufferOffset == endIndex)
-				{
-					System.Diagnostics.Debug.Assert(endIndex - bufferOffset >= upto);
-					@out.WriteBytes(buffer, upto, limit - upto);
-					size += limit - upto;
-					break;
-				}
-				else
-				{
-					@out.WriteBytes(buffer, upto, limit - upto);
-					size += limit - upto;
-					NextSlice();
-				}
-			}
-			
-			return size;
-		}
-		
-		public void  NextSlice()
-		{
-			
-			// Skip to our next slice
-			int nextIndex = ((buffer[limit] & 0xff) << 24) + ((buffer[1 + limit] & 0xff) << 16) + ((buffer[2 + limit] & 0xff) << 8) + (buffer[3 + limit] & 0xff);
-			
-			level = ByteBlockPool.nextLevelArray[level];
-			int newSize = ByteBlockPool.levelSizeArray[level];
-			
-			bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
-			bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
-			
-			buffer = pool.buffers[bufferUpto];
-			upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
-			
-			if (nextIndex + newSize >= endIndex)
-			{
-				// We are advancing to the final slice
-				System.Diagnostics.Debug.Assert(endIndex - nextIndex > 0);
-				limit = endIndex - bufferOffset;
-			}
-			else
-			{
-				// This is not the final slice (subtract 4 for the
-				// forwarding address at the end of this new slice)
-				limit = upto + newSize - 4;
-			}
-		}
-		
-		public override void  ReadBytes(byte[] b, int offset, int len)
-		{
-			while (len > 0)
-			{
-				int numLeft = limit - upto;
-				if (numLeft < len)
-				{
-					// Read entire slice
-					Array.Copy(buffer, upto, b, offset, numLeft);
-					offset += numLeft;
-					len -= numLeft;
-					NextSlice();
-				}
-				else
-				{
-					// This slice is the last one
-					Array.Copy(buffer, upto, b, offset, len);
-					upto += len;
-					break;
-				}
-			}
-		}
+    
+    /* IndexInput that knows how to read the byte slices written
+    * by Posting and PostingVector.  We read the bytes in
+    * each slice until we hit the end of that slice at which
+    * point we read the forwarding address of the next slice
+    * and then jump to it.*/
+    public sealed class ByteSliceReader : IndexInput
+    {
+        internal ByteBlockPool pool;
+        internal int bufferUpto;
+        internal byte[] buffer;
+        public int upto;
+        internal int limit;
+        internal int level;
+        public int bufferOffset;
+        
+        public int endIndex;
+        
+        public void  Init(ByteBlockPool pool, int startIndex, int endIndex)
+        {
+            
+            System.Diagnostics.Debug.Assert(endIndex - startIndex >= 0);
+            System.Diagnostics.Debug.Assert(startIndex >= 0);
+            System.Diagnostics.Debug.Assert(endIndex >= 0);
+            
+            this.pool = pool;
+            this.endIndex = endIndex;
+            
+            level = 0;
+            bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+            buffer = pool.buffers[bufferUpto];
+            upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            
+            int firstSize = ByteBlockPool.levelSizeArray[0];
+            
+            if (startIndex + firstSize >= endIndex)
+            {
+                // There is only this one slice to read
+                limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            }
+            else
+                limit = upto + firstSize - 4;
+        }
+        
+        public bool Eof()
+        {
+            System.Diagnostics.Debug.Assert(upto + bufferOffset <= endIndex);
+            return upto + bufferOffset == endIndex;
+        }
+        
+        public override byte ReadByte()
+        {
+            System.Diagnostics.Debug.Assert(!Eof());
+            System.Diagnostics.Debug.Assert(upto <= limit);
+            if (upto == limit)
+                NextSlice();
+            return buffer[upto++];
+        }
+        
+        public long WriteTo(IndexOutput @out)
+        {
+            long size = 0;
+            while (true)
+            {
+                if (limit + bufferOffset == endIndex)
+                {
+                    System.Diagnostics.Debug.Assert(endIndex - bufferOffset >= upto);
+                    @out.WriteBytes(buffer, upto, limit - upto);
+                    size += limit - upto;
+                    break;
+                }
+                else
+                {
+                    @out.WriteBytes(buffer, upto, limit - upto);
+                    size += limit - upto;
+                    NextSlice();
+                }
+            }
+            
+            return size;
+        }
+        
+        public void  NextSlice()
+        {
+            
+            // Skip to our next slice
+            int nextIndex = ((buffer[limit] & 0xff) << 24) + ((buffer[1 + limit] & 0xff) << 16) + ((buffer[2 + limit] & 0xff) << 8) + (buffer[3 + limit] & 0xff);
+            
+            level = ByteBlockPool.nextLevelArray[level];
+            int newSize = ByteBlockPool.levelSizeArray[level];
+            
+            bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+            
+            buffer = pool.buffers[bufferUpto];
+            upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            
+            if (nextIndex + newSize >= endIndex)
+            {
+                // We are advancing to the final slice
+                System.Diagnostics.Debug.Assert(endIndex - nextIndex > 0);
+                limit = endIndex - bufferOffset;
+            }
+            else
+            {
+                // This is not the final slice (subtract 4 for the
+                // forwarding address at the end of this new slice)
+                limit = upto + newSize - 4;
+            }
+        }
+        
+        public override void  ReadBytes(byte[] b, int offset, int len)
+        {
+            while (len > 0)
+            {
+                int numLeft = limit - upto;
+                if (numLeft < len)
+                {
+                    // Read entire slice
+                    Array.Copy(buffer, upto, b, offset, numLeft);
+                    offset += numLeft;
+                    len -= numLeft;
+                    NextSlice();
+                }
+                else
+                {
+                    // This slice is the last one
+                    Array.Copy(buffer, upto, b, offset, len);
+                    upto += len;
+                    break;
+                }
+            }
+        }
 
-	    public override long FilePointer
-	    {
-			get { throw new NotImplementedException(); }
-	    }
+        public override long FilePointer
+        {
+            get { throw new NotImplementedException(); }
+        }
 
-	    public override long Length()
-		{
-			throw new NotImplementedException();
-		}
-		public override void  Seek(long pos)
-		{
-			throw new NotImplementedException();
-		}
+        public override long Length()
+        {
+            throw new NotImplementedException();
+        }
+        public override void  Seek(long pos)
+        {
+            throw new NotImplementedException();
+        }
 
         protected override void Dispose(bool disposing)
         {
             // Do nothing...
         }
-		
-		override public Object Clone()
-		{
+        
+        override public Object Clone()
+        {
             System.Diagnostics.Debug.Fail("Port issue:", "Let see if we need this ByteSliceReader.Clone()"); // {{Aroush-2.9}}
-			return null;
-		}
-	}
+            return null;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ByteSliceWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceWriter.cs b/src/core/Index/ByteSliceWriter.cs
index 86bbca0..0d9ecd5 100644
--- a/src/core/Index/ByteSliceWriter.cs
+++ b/src/core/Index/ByteSliceWriter.cs
@@ -19,79 +19,79 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	/// <summary> Class to write byte streams into slices of shared
-	/// byte[].  This is used by DocumentsWriter to hold the
-	/// posting list for many terms in RAM.
-	/// </summary>
-	public sealed class ByteSliceWriter
-	{
-		private byte[] slice;
-		private int upto;
-		private readonly ByteBlockPool pool;
-		
-		internal int offset0;
-		
-		public ByteSliceWriter(ByteBlockPool pool)
-		{
-			this.pool = pool;
-		}
-		
-		/// <summary> Set up the writer to write at address.</summary>
-		public void  Init(int address)
-		{
-			slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
-			System.Diagnostics.Debug.Assert(slice != null);
-			upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
-			offset0 = address;
-			System.Diagnostics.Debug.Assert(upto < slice.Length);
-		}
-		
-		/// <summary>Write byte into byte slice stream </summary>
-		public void  WriteByte(byte b)
-		{
-			System.Diagnostics.Debug.Assert(slice != null);
-			if (slice[upto] != 0)
-			{
-				upto = pool.AllocSlice(slice, upto);
-				slice = pool.buffer;
-				offset0 = pool.byteOffset;
-				System.Diagnostics.Debug.Assert(slice != null);
-			}
-			slice[upto++] = b;
-			System.Diagnostics.Debug.Assert(upto != slice.Length);
-		}
-		
-		public void  WriteBytes(byte[] b, int offset, int len)
-		{
-			int offsetEnd = offset + len;
-			while (offset < offsetEnd)
-			{
-				if (slice[upto] != 0)
-				{
-					// End marker
-					upto = pool.AllocSlice(slice, upto);
-					slice = pool.buffer;
-					offset0 = pool.byteOffset;
-				}
-				
-				slice[upto++] = b[offset++];
-				System.Diagnostics.Debug.Assert(upto != slice.Length);
-			}
-		}
+    /// <summary> Class to write byte streams into slices of shared
+    /// byte[].  This is used by DocumentsWriter to hold the
+    /// posting list for many terms in RAM.
+    /// </summary>
+    public sealed class ByteSliceWriter
+    {
+        private byte[] slice;
+        private int upto;
+        private readonly ByteBlockPool pool;
+        
+        internal int offset0;
+        
+        public ByteSliceWriter(ByteBlockPool pool)
+        {
+            this.pool = pool;
+        }
+        
+        /// <summary> Set up the writer to write at address.</summary>
+        public void  Init(int address)
+        {
+            slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+            System.Diagnostics.Debug.Assert(slice != null);
+            upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
+            offset0 = address;
+            System.Diagnostics.Debug.Assert(upto < slice.Length);
+        }
+        
+        /// <summary>Write byte into byte slice stream </summary>
+        public void  WriteByte(byte b)
+        {
+            System.Diagnostics.Debug.Assert(slice != null);
+            if (slice[upto] != 0)
+            {
+                upto = pool.AllocSlice(slice, upto);
+                slice = pool.buffer;
+                offset0 = pool.byteOffset;
+                System.Diagnostics.Debug.Assert(slice != null);
+            }
+            slice[upto++] = b;
+            System.Diagnostics.Debug.Assert(upto != slice.Length);
+        }
+        
+        public void  WriteBytes(byte[] b, int offset, int len)
+        {
+            int offsetEnd = offset + len;
+            while (offset < offsetEnd)
+            {
+                if (slice[upto] != 0)
+                {
+                    // End marker
+                    upto = pool.AllocSlice(slice, upto);
+                    slice = pool.buffer;
+                    offset0 = pool.byteOffset;
+                }
+                
+                slice[upto++] = b[offset++];
+                System.Diagnostics.Debug.Assert(upto != slice.Length);
+            }
+        }
 
-	    public int Address
-	    {
-	        get { return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK); }
-	    }
+        public int Address
+        {
+            get { return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK); }
+        }
 
-	    public void  WriteVInt(int i)
-		{
-			while ((i & ~ 0x7F) != 0)
-			{
-				WriteByte((byte) ((i & 0x7f) | 0x80));
-				i = Number.URShift(i, 7);
-			}
-			WriteByte((byte) i);
-		}
-	}
+        public void  WriteVInt(int i)
+        {
+            while ((i & ~ 0x7F) != 0)
+            {
+                WriteByte((byte) ((i & 0x7f) | 0x80));
+                i = Number.URShift(i, 7);
+            }
+            WriteByte((byte) i);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/CharBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CharBlockPool.cs b/src/core/Index/CharBlockPool.cs
index 0631fe0..9a35743 100644
--- a/src/core/Index/CharBlockPool.cs
+++ b/src/core/Index/CharBlockPool.cs
@@ -19,51 +19,51 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class CharBlockPool
-	{
-		private void  InitBlock()
-		{
-			charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
-		}
-		
-		public char[][] buffers = new char[10][];
-		internal int numBuffer;
-		
-		internal int bufferUpto = - 1; // Which buffer we are upto
-		public int charUpto; // Where we are in head buffer
-		
-		public char[] buffer; // Current head buffer
-		public int charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE; // Current head offset
-		private readonly DocumentsWriter docWriter;
-		
-		public CharBlockPool(DocumentsWriter docWriter)
-		{
-			InitBlock();
-			this.docWriter = docWriter;
-		}
-		
-		public void  Reset()
-		{
-			docWriter.RecycleCharBlocks(buffers, 1 + bufferUpto);
-			bufferUpto = - 1;
-			charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
-			charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE;
-		}
-		
-		public void  NextBuffer()
-		{
-			if (1 + bufferUpto == buffers.Length)
-			{
-				var newBuffers = new char[(int) (buffers.Length * 1.5)][];
-				Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
-				buffers = newBuffers;
-			}
-			buffer = buffers[1 + bufferUpto] = docWriter.GetCharBlock();
-			bufferUpto++;
-			
-			charUpto = 0;
-			charOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
-		}
-	}
+    
+    sealed class CharBlockPool
+    {
+        private void  InitBlock()
+        {
+            charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+        }
+        
+        public char[][] buffers = new char[10][];
+        internal int numBuffer;
+        
+        internal int bufferUpto = - 1; // Which buffer we are upto
+        public int charUpto; // Where we are in head buffer
+        
+        public char[] buffer; // Current head buffer
+        public int charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE; // Current head offset
+        private readonly DocumentsWriter docWriter;
+        
+        public CharBlockPool(DocumentsWriter docWriter)
+        {
+            InitBlock();
+            this.docWriter = docWriter;
+        }
+        
+        public void  Reset()
+        {
+            docWriter.RecycleCharBlocks(buffers, 1 + bufferUpto);
+            bufferUpto = - 1;
+            charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+            charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE;
+        }
+        
+        public void  NextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                var newBuffers = new char[(int) (buffers.Length * 1.5)][];
+                Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = docWriter.GetCharBlock();
+            bufferUpto++;
+            
+            charUpto = 0;
+            charOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
+        }
+    }
 }
\ No newline at end of file


[51/51] [partial] git commit: Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
Mass convert mixed tabs to spaces


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/62f018ab
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/62f018ab
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/62f018ab

Branch: refs/heads/master
Commit: 62f018abd9bba75e3d1c3f1a825a71d460d2d329
Parents: 1d08bae
Author: ccurrens <cu...@gmail.com>
Authored: Wed Apr 3 10:32:44 2013 -0700
Committer: ccurrens <cu...@gmail.com>
Committed: Wed Apr 3 10:32:44 2013 -0700

----------------------------------------------------------------------
 src/contrib/Analyzers/AR/ArabicAnalyzer.cs         |    4 +-
 src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs  |    2 +-
 .../Analyzers/AR/ArabicNormalizationFilter.cs      |    2 +-
 src/contrib/Analyzers/AR/ArabicNormalizer.cs       |    2 +-
 src/contrib/Analyzers/AR/ArabicStemFilter.cs       |    2 +-
 src/contrib/Analyzers/AR/ArabicStemmer.cs          |    2 +-
 src/contrib/Analyzers/BR/BrazilianAnalyzer.cs      |    2 +-
 .../Compound/CompoundWordTokenFilterBase.cs        |    2 +-
 .../Analyzers/Compound/Hyphenation/ByteVector.cs   |    2 +-
 .../Analyzers/Compound/Hyphenation/CharVector.cs   |    2 +-
 .../Analyzers/Compound/Hyphenation/Hyphen.cs       |    2 +-
 .../Analyzers/Compound/Hyphenation/Hyphenation.cs  |    2 +-
 .../Compound/Hyphenation/HyphenationException.cs   |    2 +-
 .../Compound/Hyphenation/HyphenationTree.cs        |    2 +-
 .../Compound/Hyphenation/PatternConsumer.cs        |    2 +-
 .../Compound/Hyphenation/PatternParser.cs          |    2 +-
 .../Analyzers/Compound/Hyphenation/TernaryTree.cs  |    2 +-
 .../Compound/HyphenationCompoundWordTokenFilter.cs |    2 +-
 src/contrib/Analyzers/Cz/CzechAnalyzer.cs          |  120 +-
 src/contrib/Analyzers/De/GermanAnalyzer.cs         |   30 +-
 src/contrib/Analyzers/De/GermanStemmer.cs          |  490 +-
 src/contrib/Analyzers/El/GreekAnalyzer.cs          |    2 +-
 src/contrib/Analyzers/Filters/ChainedFilter.cs     |    2 +-
 src/contrib/Analyzers/Fr/ElisionFilter.cs          |    2 +-
 src/contrib/Analyzers/Fr/FrenchAnalyzer.cs         |    2 +-
 src/contrib/Analyzers/Fr/FrenchStemmer.cs          | 1212 +-
 src/contrib/Analyzers/Hunspell/HunspellStem.cs     |    2 +-
 .../Analyzers/Miscellaneous/EmptyTokenStream.cs    |    2 +-
 .../InjectablePrefixAwareTokenFilter.cs            |    2 +-
 .../PrefixAndSuffixAwareTokenFilter.cs             |    2 +-
 .../Miscellaneous/PrefixAwareTokenStream.cs        |    2 +-
 .../Miscellaneous/SingleTokenTokenStream.cs        |    2 +-
 .../Analyzers/NGram/EdgeNGramTokenFilter.cs        |    2 +-
 src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs  |    2 +-
 src/contrib/Analyzers/NGram/NGramTokenFilter.cs    |    2 +-
 src/contrib/Analyzers/NGram/NGramTokenizer.cs      |    2 +-
 src/contrib/Analyzers/Payloads/AbstractEncoder.cs  |    2 +-
 .../Payloads/DelimitedPayloadTokenFilter.cs        |    2 +-
 src/contrib/Analyzers/Payloads/FloatEncoder.cs     |    2 +-
 src/contrib/Analyzers/Payloads/IdentityEncoder.cs  |    2 +-
 src/contrib/Analyzers/Payloads/IntegerEncoder.cs   |    2 +-
 src/contrib/Analyzers/Payloads/PayloadEncoder.cs   |    2 +-
 src/contrib/Analyzers/Payloads/PayloadHelper.cs    |    2 +-
 src/contrib/Analyzers/Properties/AssemblyInfo.cs   |    2 +-
 .../Analyzers/Query/QueryAutoStopWordAnalyzer.cs   |    2 +-
 src/contrib/Analyzers/Ru/RussianAnalyzer.cs        |    2 +-
 .../OneDimensionalNonWeightedTokenSettingsCodec.cs |    2 +-
 .../SimpleThreeDimensionalTokenSettingsCodec.cs    |    2 +-
 .../Analyzers/Shingle/Codec/TokenSettingsCodec.cs  |    2 +-
 ...ensionalNonWeightedSynonymTokenSettingsCodec.cs |    2 +-
 src/contrib/Analyzers/Shingle/Matrix/Column.cs     |    2 +-
 src/contrib/Analyzers/Shingle/Matrix/Matrix.cs     |    2 +-
 .../Shingle/Matrix/MatrixPermutationIterator.cs    |    2 +-
 src/contrib/Analyzers/Shingle/Matrix/Row.cs        |    2 +-
 .../Analyzers/Shingle/ShingleAnalyzerWrapper.cs    |    2 +-
 src/contrib/Analyzers/Shingle/ShingleFilter.cs     |    2 +-
 .../Analyzers/Shingle/ShingleMatrixFilter.cs       |    2 +-
 src/contrib/Analyzers/Shingle/TokenPositioner.cs   |    2 +-
 src/contrib/Analyzers/WordlistLoader.cs            |    6 +-
 .../Distributed/Configuration/CurrentIndex.cs      |  946 +-
 .../Configuration/DistributedSearcher.cs           |   32 +-
 .../DistributedSearcherConfigurationHandler.cs     |   24 +-
 .../Configuration/DistributedSearchers.cs          |   44 +-
 .../Distributed/Configuration/LuceneServerIndex.cs |   98 +-
 .../LuceneServerIndexConfigurationHandler.cs       |   30 +-
 .../Configuration/LuceneServerIndexes.cs           |   60 +-
 .../DistributedSearch/Distributed/Enumerations.cs  |   40 +-
 .../Distributed/Indexing/DeleteIndexDocument.cs    |   16 +-
 .../Distributed/Indexing/FileNameComparer.cs       |   38 +-
 .../Distributed/Indexing/IndexDocument.cs          |  170 +-
 .../Distributed/Indexing/IndexSet.cs               |  226 +-
 .../Indexing/IndexSetConfigurationHandler.cs       |   20 +-
 .../Distributed/Indexing/IndexSets.cs              |  276 +-
 .../Distributed/Search/DistributedSearchable.cs    |   26 +-
 .../LuceneMonitor/LuceneMonitor.cs                 |  272 +-
 .../LuceneMonitor/ProjectInstaller.cs              |  228 +-
 .../Properties/AssemblyInfo.cs                     |    2 +-
 src/contrib/FastVectorHighlighter/StringUtils.cs   |    2 +-
 .../FastVectorHighlighter/VectorHighlightMapper.cs |    2 +-
 src/contrib/Highlighter/DefaultEncoder.cs          |   16 +-
 src/contrib/Highlighter/GradientFormatter.cs       |  350 +-
 src/contrib/Highlighter/Highlighter.cs             |   12 +-
 src/contrib/Highlighter/IEncoder.cs                |   12 +-
 src/contrib/Highlighter/IFormatter.cs              |   18 +-
 src/contrib/Highlighter/NullFragmenter.cs          |   24 +-
 src/contrib/Highlighter/QueryTermExtractor.cs      |  148 +-
 src/contrib/Highlighter/SimpleHTMLFormatter.cs     |   86 +-
 src/contrib/Highlighter/TokenSources.cs            |    4 +-
 src/contrib/Highlighter/WeightedSpanTerm.cs        |    2 +-
 src/contrib/Queries/BooleanFilter.cs               |    2 +-
 src/contrib/Queries/BoostingQuery.cs               |    2 +-
 src/contrib/Queries/DuplicateFilter.cs             |    2 +-
 src/contrib/Queries/FilterClause.cs                |    2 +-
 src/contrib/Queries/FuzzyLikeThisQuery.cs          |    2 +-
 src/contrib/Queries/Properties/AssemblyInfo.cs     |    2 +-
 src/contrib/Queries/Similar/MoreLikeThis.cs        |   46 +-
 src/contrib/Queries/Similar/MoreLikeThisQuery.cs   |    2 +-
 src/contrib/Queries/Similar/SimilarityQueries.cs   |    2 +-
 src/contrib/Queries/TermsFilter.cs                 |    2 +-
 src/contrib/Regex/CSharpRegexCapabilities.cs       |  124 +-
 src/contrib/Regex/IRegexCapabilities.cs            |   54 +-
 src/contrib/Regex/IRegexQueryCapable.cs            |   18 +-
 src/contrib/Regex/Properties/AssemblyInfo.cs       |    2 +-
 src/contrib/Regex/RegexQuery.cs                    |   96 +-
 src/contrib/Regex/RegexTermEnum.cs                 |  106 +-
 src/contrib/Regex/SpanRegexQuery.cs                |  236 +-
 src/contrib/SimpleFacetedSearch/Extensions.cs      |    2 +-
 src/contrib/SimpleFacetedSearch/FacetName.cs       |    2 +-
 .../SimpleFacetedSearch/FieldValuesBitSets.cs      |    2 +-
 src/contrib/SimpleFacetedSearch/Hits.cs            |    2 +-
 src/contrib/SimpleFacetedSearch/HitsPerFacet.cs    |    2 +-
 .../SimpleFacetedSearch/Properties/AssemblyInfo.cs |    2 +-
 .../SimpleFacetedSearch/SimpleFacetedSearch.cs     |    2 +-
 .../Lucene.Net/Analysis/Snowball/SnowballFilter.cs |   86 +-
 src/contrib/Snowball/SF/Snowball/Among.cs          |   72 +-
 .../Snowball/SF/Snowball/Ext/DanishStemmer.cs      |  790 +-
 .../Snowball/SF/Snowball/Ext/DutchStemmer.cs       | 1830 ++--
 .../Snowball/SF/Snowball/Ext/EnglishStemmer.cs     | 2648 ++--
 .../Snowball/SF/Snowball/Ext/FinnishStemmer.cs     | 2114 ++--
 .../Snowball/SF/Snowball/Ext/FrenchStemmer.cs      | 3120 +++---
 .../Snowball/SF/Snowball/Ext/German2Stemmer.cs     | 1572 ++--
 .../Snowball/SF/Snowball/Ext/GermanStemmer.cs      | 1508 ++--
 .../Snowball/SF/Snowball/Ext/HungarianStemmer.cs   |  392 +-
 .../Snowball/SF/Snowball/Ext/ItalianStemmer.cs     | 2268 ++--
 src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs  | 4814 ++++----
 .../Snowball/SF/Snowball/Ext/LovinsStemmer.cs      | 3594 +++---
 .../Snowball/SF/Snowball/Ext/NorwegianStemmer.cs   |  620 +-
 .../Snowball/SF/Snowball/Ext/PorterStemmer.cs      | 1952 ++--
 .../Snowball/SF/Snowball/Ext/PortugueseStemmer.cs  |  406 +-
 .../Snowball/SF/Snowball/Ext/RomanianStemmer.cs    |  464 +-
 .../Snowball/SF/Snowball/Ext/RussianStemmer.cs     | 1390 +-
 .../Snowball/SF/Snowball/Ext/SpanishStemmer.cs     | 2256 ++--
 .../Snowball/SF/Snowball/Ext/SwedishStemmer.cs     |  628 +-
 .../Snowball/SF/Snowball/Ext/TurkishStemmer.cs     |  294 +-
 .../Snowball/SF/Snowball/SnowballProgram.cs        |  944 +-
 src/contrib/Snowball/SF/Snowball/TestApp.cs        |  150 +-
 src/contrib/Spatial/BBox/AreaSimilarity.cs         |  368 +-
 src/contrib/Spatial/BBox/BBoxSimilarity.cs         |   10 +-
 .../Spatial/BBox/BBoxSimilarityValueSource.cs      |  134 +-
 src/contrib/Spatial/BBox/BBoxStrategy.cs           |  700 +-
 src/contrib/Spatial/BBox/DistanceSimilarity.cs     |    2 +-
 .../Prefix/PointPrefixTreeFieldCacheProvider.cs    |   34 +-
 src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs   |  192 +-
 .../Spatial/Prefix/RecursivePrefixTreeFilter.cs    |  286 +-
 .../Spatial/Prefix/RecursivePrefixTreeStrategy.cs  |   64 +-
 .../Spatial/Prefix/TermQueryPrefixTreeStrategy.cs  |   50 +-
 .../Spatial/Prefix/Tree/GeohashPrefixTree.cs       |    2 +-
 src/contrib/Spatial/Prefix/Tree/Node.cs            |  378 +-
 src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs  |  562 +-
 .../Spatial/Prefix/Tree/SpatialPrefixTree.cs       |  434 +-
 .../Prefix/Tree/SpatialPrefixTreeFactory.cs        |   82 +-
 src/contrib/Spatial/Properties/AssemblyInfo.cs     |    2 +-
 src/contrib/Spatial/Queries/SpatialArgs.cs         |  106 +-
 src/contrib/Spatial/Queries/SpatialArgsParser.cs   |  132 +-
 src/contrib/Spatial/Queries/SpatialOperation.cs    |  168 +-
 .../Spatial/Queries/UnsupportedSpatialOperation.cs |   16 +-
 src/contrib/Spatial/SpatialStrategy.cs             |  184 +-
 src/contrib/Spatial/Util/Bits.cs                   |  124 +-
 .../Spatial/Util/CachingDoubleValueSource.cs       |  156 +-
 .../Spatial/Util/CompatibilityExtensions.cs        |  322 +-
 src/contrib/Spatial/Util/FixedBitSet.cs            |  844 +-
 src/contrib/Spatial/Util/FunctionQuery.cs          |  380 +-
 .../Spatial/Util/ReciprocalFloatFunction.cs        |    2 +-
 src/contrib/Spatial/Util/ShapeFieldCache.cs        |   48 +-
 .../Util/ShapeFieldCacheDistanceValueSource.cs     |  112 +-
 .../Spatial/Util/ShapeFieldCacheProvider.cs        |  110 +-
 src/contrib/Spatial/Util/TermsEnumCompatibility.cs |  200 +-
 src/contrib/Spatial/Util/TermsFilter.cs            |  164 +-
 src/contrib/Spatial/Util/ValueSourceFilter.cs      |   48 +-
 src/contrib/Spatial/Vector/DistanceValueSource.cs  |  136 +-
 src/contrib/Spatial/Vector/PointVectorStrategy.cs  |  310 +-
 src/contrib/SpellChecker/Spell/IDictionary.cs      |    2 +-
 src/contrib/SpellChecker/Spell/LuceneDictionary.cs |    8 +-
 .../SpellChecker/Spell/PlainTextDictionary.cs      |   10 +-
 src/contrib/SpellChecker/Spell/SpellChecker.cs     |    6 +-
 src/contrib/SpellChecker/Spell/SuggestWord.cs      |   14 +-
 src/contrib/SpellChecker/Spell/SuggestWordQueue.cs |    4 +-
 src/contrib/SpellChecker/Spell/TRStringDistance.cs |   34 +-
 src/contrib/WordNet/SynExpand/SynExpand.cs         |   74 +-
 src/contrib/WordNet/SynLookup/SynLookup.cs         |  188 +-
 src/contrib/WordNet/Syns2Index/Syns2Index.cs       |  510 +-
 src/core/Analysis/ASCIIFoldingFilter.cs            | 6494 +++++-----
 src/core/Analysis/Analyzer.cs                      |  214 +-
 src/core/Analysis/BaseCharFilter.cs                |    2 +-
 src/core/Analysis/CachingTokenFilter.cs            |  128 +-
 src/core/Analysis/CharArraySet.cs                  |    6 +-
 src/core/Analysis/CharFilter.cs                    |  100 +-
 src/core/Analysis/CharReader.cs                    |  104 +-
 src/core/Analysis/CharStream.cs                    |   44 +-
 src/core/Analysis/CharTokenizer.cs                 |  210 +-
 src/core/Analysis/ISOLatin1AccentFilter.cs         |  638 +-
 src/core/Analysis/KeywordAnalyzer.cs               |   66 +-
 src/core/Analysis/KeywordTokenizer.cs              |  148 +-
 src/core/Analysis/LengthFilter.cs                  |   72 +-
 src/core/Analysis/LetterTokenizer.cs               |   70 +-
 src/core/Analysis/LowerCaseFilter.cs               |   52 +-
 src/core/Analysis/LowerCaseTokenizer.cs            |   76 +-
 src/core/Analysis/MappingCharFilter.cs             |  286 +-
 src/core/Analysis/NormalizeCharMap.cs              |   92 +-
 src/core/Analysis/NumericTokenStream.cs            |  466 +-
 src/core/Analysis/PerFieldAnalyzerWrapper.cs       |  200 +-
 src/core/Analysis/PorterStemFilter.cs              |   78 +-
 src/core/Analysis/PorterStemmer.cs                 | 1394 +-
 src/core/Analysis/SimpleAnalyzer.cs                |   50 +-
 src/core/Analysis/Standard/StandardAnalyzer.cs     |  260 +-
 src/core/Analysis/Standard/StandardFilter.cs       |  122 +-
 src/core/Analysis/Standard/StandardTokenizer.cs    |  388 +-
 .../Analysis/Standard/StandardTokenizerImpl.cs     | 1300 +-
 src/core/Analysis/StopAnalyzer.cs                  |  200 +-
 src/core/Analysis/StopFilter.cs                    |  282 +-
 src/core/Analysis/TeeSinkTokenFilter.cs            |  458 +-
 src/core/Analysis/Token.cs                         | 1502 ++--
 src/core/Analysis/TokenFilter.cs                   |   66 +-
 src/core/Analysis/TokenStream.cs                   |  246 +-
 .../Analysis/Tokenattributes/FlagsAttribute.cs     |  114 +-
 .../Analysis/Tokenattributes/IFlagsAttribute.cs    |   34 +-
 .../Analysis/Tokenattributes/IOffsetAttribute.cs   |   38 +-
 .../Analysis/Tokenattributes/IPayloadAttribute.cs  |   14 +-
 .../Tokenattributes/IPositionIncrementAttribute.cs |   72 +-
 .../Analysis/Tokenattributes/ITermAttribute.cs     |  160 +-
 .../Analysis/Tokenattributes/ITypeAttribute.cs     |   14 +-
 .../Analysis/Tokenattributes/OffsetAttribute.cs    |  140 +-
 .../Analysis/Tokenattributes/PayloadAttribute.cs   |  138 +-
 .../Tokenattributes/PositionIncrementAttribute.cs  |  156 +-
 src/core/Analysis/Tokenattributes/TermAttribute.cs |  482 +-
 src/core/Analysis/Tokenattributes/TypeAttribute.cs |  114 +-
 src/core/Analysis/Tokenizer.cs                     |  140 +-
 src/core/Analysis/WhitespaceAnalyzer.cs            |   46 +-
 src/core/Analysis/WhitespaceTokenizer.cs           |   64 +-
 src/core/Analysis/WordlistLoader.cs                |  206 +-
 src/core/Document/AbstractField.cs                 |  528 +-
 src/core/Document/CompressionTools.cs              |  228 +-
 src/core/Document/DateField.cs                     |  178 +-
 src/core/Document/DateTools.cs                     |  436 +-
 src/core/Document/Document.cs                      |  668 +-
 src/core/Document/FieldSelector.cs                 |   26 +-
 src/core/Document/FieldSelectorResult.cs           |    2 +-
 src/core/Document/Fieldable.cs                     |  318 +-
 src/core/Document/LoadFirstFieldSelector.cs        |   28 +-
 src/core/Document/MapFieldSelector.cs              |   80 +-
 src/core/Document/NumberTools.cs                   |  244 +-
 src/core/Document/NumericField.cs                  |  506 +-
 src/core/Document/SetBasedFieldSelector.cs         |   88 +-
 src/core/Index/AllTermDocs.cs                      |   22 +-
 src/core/Index/BufferedDeletes.cs                  |  278 +-
 src/core/Index/ByteBlockPool.cs                    |  248 +-
 src/core/Index/ByteSliceReader.cs                  |  304 +-
 src/core/Index/ByteSliceWriter.cs                  |  146 +-
 src/core/Index/CharBlockPool.cs                    |   94 +-
 src/core/Index/CheckIndex.cs                       | 1924 ++--
 src/core/Index/CompoundFileReader.cs               |  484 +-
 src/core/Index/CompoundFileWriter.cs               |  288 +-
 src/core/Index/ConcurrentMergeScheduler.cs         |  892 +-
 src/core/Index/DefaultSkipListReader.cs            |  206 +-
 src/core/Index/DefaultSkipListWriter.cs            |  238 +-
 src/core/Index/DirectoryReader.cs                  |  164 +-
 src/core/Index/DocConsumer.cs                      |   18 +-
 src/core/Index/DocConsumerPerThread.cs             |   30 +-
 src/core/Index/DocFieldConsumer.cs                 |   64 +-
 src/core/Index/DocFieldConsumerPerField.cs         |   14 +-
 src/core/Index/DocFieldConsumerPerThread.cs        |   16 +-
 src/core/Index/DocFieldConsumers.cs                |  372 +-
 src/core/Index/DocFieldConsumersPerField.cs        |   66 +-
 src/core/Index/DocFieldConsumersPerThread.cs       |  120 +-
 src/core/Index/DocFieldProcessor.cs                |  130 +-
 src/core/Index/DocFieldProcessorPerField.cs        |   52 +-
 src/core/Index/DocFieldProcessorPerThread.cs       |  836 +-
 src/core/Index/DocInverter.cs                      |  128 +-
 src/core/Index/DocInverterPerField.cs              |  408 +-
 src/core/Index/DocInverterPerThread.cs             |  158 +-
 src/core/Index/DocumentsWriter.cs                  | 3644 +++---
 src/core/Index/DocumentsWriterThreadState.cs       |   68 +-
 src/core/Index/FieldInfo.cs                        |  166 +-
 src/core/Index/FieldInfos.cs                       |  862 +-
 src/core/Index/FieldInvertState.cs                 |  160 +-
 src/core/Index/FieldSortedTermVectorMapper.cs      |   88 +-
 src/core/Index/FieldsReader.cs                     | 1110 +-
 src/core/Index/FieldsWriter.cs                     |  508 +-
 src/core/Index/FilterIndexReader.cs                |  598 +-
 src/core/Index/FormatPostingsDocsConsumer.cs       |   28 +-
 src/core/Index/FormatPostingsDocsWriter.cs         |  200 +-
 src/core/Index/FormatPostingsFieldsConsumer.cs     |   34 +-
 src/core/Index/FormatPostingsFieldsWriter.cs       |   94 +-
 src/core/Index/FormatPostingsPositionsConsumer.cs  |   24 +-
 src/core/Index/FormatPostingsPositionsWriter.cs    |  140 +-
 src/core/Index/FormatPostingsTermsConsumer.cs      |   56 +-
 src/core/Index/FormatPostingsTermsWriter.cs        |  102 +-
 src/core/Index/FreqProxFieldMergeState.cs          |  188 +-
 src/core/Index/FreqProxTermsWriter.cs              |  544 +-
 src/core/Index/FreqProxTermsWriterPerField.cs      |  340 +-
 src/core/Index/FreqProxTermsWriterPerThread.cs     |   60 +-
 src/core/Index/IndexCommit.cs                      |  162 +-
 src/core/Index/IndexDeletionPolicy.cs              |  108 +-
 src/core/Index/IndexFileDeleter.cs                 | 1204 +-
 src/core/Index/IndexFileNameFilter.cs              |  158 +-
 src/core/Index/IndexFileNames.cs                   |  276 +-
 src/core/Index/IndexReader.cs                      | 2402 ++--
 src/core/Index/IndexWriter.cs                      |10286 +++++++-------
 src/core/Index/IntBlockPool.cs                     |  114 +-
 src/core/Index/InvertedDocConsumer.cs              |   58 +-
 src/core/Index/InvertedDocConsumerPerField.cs      |   46 +-
 src/core/Index/InvertedDocConsumerPerThread.cs     |   16 +-
 src/core/Index/InvertedDocEndConsumer.cs           |   16 +-
 src/core/Index/InvertedDocEndConsumerPerField.cs   |   12 +-
 src/core/Index/InvertedDocEndConsumerPerThread.cs  |   16 +-
 src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs |   58 +-
 src/core/Index/LogByteSizeMergePolicy.cs           |  132 +-
 src/core/Index/LogDocMergePolicy.cs                |   84 +-
 src/core/Index/LogMergePolicy.cs                   |  982 +-
 src/core/Index/MergeDocIDRemapper.cs               |  208 +-
 src/core/Index/MergePolicy.cs                      |  562 +-
 src/core/Index/MergeScheduler.cs                   |   52 +-
 src/core/Index/MultiLevelSkipListReader.cs         |  546 +-
 src/core/Index/MultiLevelSkipListWriter.cs         |  292 +-
 src/core/Index/MultipleTermPositions.cs            |  410 +-
 src/core/Index/NormsWriter.cs                      |  344 +-
 src/core/Index/NormsWriterPerField.cs              |  130 +-
 src/core/Index/NormsWriterPerThread.cs             |   66 +-
 src/core/Index/ParallelReader.cs                   | 1454 +-
 src/core/Index/Payload.cs                          |  370 +-
 src/core/Index/PositionBasedTermVectorMapper.cs    |  294 +-
 src/core/Index/RawPostingList.cs                   |   48 +-
 src/core/Index/ReadOnlyDirectoryReader.cs          |   30 +-
 src/core/Index/ReadOnlySegmentReader.cs            |   40 +-
 src/core/Index/ReusableStringReader.cs             |   10 +-
 src/core/Index/SegmentInfo.cs                      | 1566 ++--
 src/core/Index/SegmentInfos.cs                     | 1898 ++--
 src/core/Index/SegmentMergeInfo.cs                 |  142 +-
 src/core/Index/SegmentMergeQueue.cs                |   36 +-
 src/core/Index/SegmentMerger.cs                    | 1748 ++--
 src/core/Index/SegmentReader.cs                    | 2794 ++--
 src/core/Index/SegmentTermDocs.cs                  |  460 +-
 src/core/Index/SegmentTermEnum.cs                  |  438 +-
 src/core/Index/SegmentTermPositionVector.cs        |  102 +-
 src/core/Index/SegmentTermPositions.cs             |  386 +-
 src/core/Index/SegmentTermVector.cs                |  150 +-
 src/core/Index/SegmentWriteState.cs                |   56 +-
 src/core/Index/SerialMergeScheduler.cs             |   56 +-
 src/core/Index/SnapshotDeletionPolicy.cs           |  312 +-
 src/core/Index/SortedTermVectorMapper.cs           |  200 +-
 src/core/Index/StoredFieldsWriter.cs               |  464 +-
 src/core/Index/StoredFieldsWriterPerThread.cs      |  138 +-
 src/core/Index/Term.cs                             |  256 +-
 src/core/Index/TermBuffer.cs                       |  276 +-
 src/core/Index/TermDocs.cs                         |  116 +-
 src/core/Index/TermEnum.cs                         |   40 +-
 src/core/Index/TermFreqVector.cs                   |   96 +-
 src/core/Index/TermInfo.cs                         |   94 +-
 src/core/Index/TermInfosReader.cs                  |  544 +-
 src/core/Index/TermInfosWriter.cs                  |  424 +-
 src/core/Index/TermPositionVector.cs               |   56 +-
 src/core/Index/TermPositions.cs                    |  104 +-
 src/core/Index/TermVectorEntry.cs                  |  144 +-
 .../Index/TermVectorEntryFreqSortedComparator.cs   |   42 +-
 src/core/Index/TermVectorMapper.cs                 |  174 +-
 src/core/Index/TermVectorOffsetInfo.cs             |  138 +-
 src/core/Index/TermVectorsReader.cs                | 1196 +-
 src/core/Index/TermVectorsTermsWriter.cs           |  666 +-
 src/core/Index/TermVectorsTermsWriterPerField.cs   |  496 +-
 src/core/Index/TermVectorsTermsWriterPerThread.cs  |  164 +-
 src/core/Index/TermVectorsWriter.cs                |  434 +-
 src/core/Index/TermsHash.cs                        |  416 +-
 src/core/Index/TermsHashConsumer.cs                |   34 +-
 src/core/Index/TermsHashConsumerPerField.cs        |   20 +-
 src/core/Index/TermsHashConsumerPerThread.cs       |   16 +-
 src/core/Index/TermsHashPerField.cs                | 1214 +-
 src/core/Index/TermsHashPerThread.cs               |  236 +-
 src/core/LZOCompressor.cs                          |  202 +-
 src/core/LucenePackage.cs                          |   18 +-
 src/core/Messages/INLSException.cs                 |   28 +-
 src/core/Messages/Message.cs                       |   24 +-
 src/core/Messages/MessageImpl.cs                   |  102 +-
 src/core/Messages/NLS.cs                           |  438 +-
 src/core/QueryParser/CharStream.cs                 |  178 +-
 src/core/QueryParser/FastCharStream.cs             |  254 +-
 src/core/QueryParser/ParseException.cs             |  422 +-
 src/core/QueryParser/QueryParserConstants.cs       |  168 +-
 src/core/QueryParser/QueryParserTokenManager.cs    | 2872 ++--
 src/core/QueryParser/Token.cs                      |  210 +-
 src/core/QueryParser/TokenMgrError.cs              |  294 +-
 src/core/Search/BooleanClause.cs                   |  136 +-
 src/core/Search/BooleanQuery.cs                    | 1040 +-
 src/core/Search/BooleanScorer.cs                   |  752 +-
 src/core/Search/BooleanScorer2.cs                  |  740 +-
 src/core/Search/CachingSpanFilter.cs               |   96 +-
 src/core/Search/CachingWrapperFilter.cs            |  120 +-
 src/core/Search/Collector.cs                       |  292 +-
 src/core/Search/ComplexExplanation.cs              |  100 +-
 src/core/Search/ConjunctionScorer.cs               |  246 +-
 src/core/Search/ConstantScoreQuery.cs              |  398 +-
 src/core/Search/DefaultSimilarity.cs               |  158 +-
 src/core/Search/DisjunctionMaxQuery.cs             |  574 +-
 src/core/Search/DisjunctionMaxScorer.cs            |  386 +-
 src/core/Search/DisjunctionSumScorer.cs            |  502 +-
 src/core/Search/DocIdSet.cs                        |  170 +-
 src/core/Search/DocIdSetIterator.cs                |  134 +-
 src/core/Search/ExactPhraseScorer.cs               |   86 +-
 src/core/Search/Explanation.cs                     |  256 +-
 src/core/Search/FieldCacheRangeFilter.cs           |  488 +-
 src/core/Search/FieldCacheTermsFilter.cs           |  386 +-
 src/core/Search/FieldComparator.cs                 | 1878 ++--
 src/core/Search/FieldComparatorSource.cs           |   46 +-
 src/core/Search/FieldDoc.cs                        |  120 +-
 src/core/Search/FieldDocSortedHitQueue.cs          |  168 +-
 src/core/Search/FieldValueHitQueue.cs              |  402 +-
 src/core/Search/Filter.cs                          |   58 +-
 src/core/Search/FilterManager.cs                   |  298 +-
 src/core/Search/FilteredDocIdSet.cs                |  164 +-
 src/core/Search/FilteredDocIdSetIterator.cs        |  148 +-
 src/core/Search/FilteredQuery.cs                   |  520 +-
 src/core/Search/FilteredTermEnum.cs                |  164 +-
 src/core/Search/Function/ByteFieldSource.cs        |  216 +-
 src/core/Search/Function/CustomScoreQuery.cs       |  850 +-
 src/core/Search/Function/DocValues.cs              |  348 +-
 src/core/Search/Function/FieldCacheSource.cs       |  170 +-
 src/core/Search/Function/FieldScoreQuery.cs        |  234 +-
 src/core/Search/Function/FloatFieldSource.cs       |  206 +-
 src/core/Search/Function/IntFieldSource.cs         |  216 +-
 src/core/Search/Function/OrdFieldSource.cs         |  238 +-
 src/core/Search/Function/ReverseOrdFieldSource.cs  |  260 +-
 src/core/Search/Function/ShortFieldSource.cs       |  216 +-
 src/core/Search/Function/ValueSource.cs            |   90 +-
 src/core/Search/Function/ValueSourceQuery.cs       |  296 +-
 src/core/Search/FuzzyQuery.cs                      |  364 +-
 src/core/Search/FuzzyTermEnum.cs                   |  382 +-
 src/core/Search/HitQueue.cs                        |  132 +-
 src/core/Search/IndexSearcher.cs                   |  512 +-
 src/core/Search/MatchAllDocsQuery.cs               |  336 +-
 src/core/Search/MultiPhraseQuery.cs                |  726 +-
 src/core/Search/MultiSearcher.cs                   |  676 +-
 src/core/Search/MultiTermQuery.cs                  |  840 +-
 src/core/Search/MultiTermQueryWrapperFilter.cs     |  158 +-
 src/core/Search/NumericRangeQuery.cs               |  890 +-
 src/core/Search/ParallelMultiSearcher.cs           |  252 +-
 src/core/Search/Payloads/AveragePayloadFunction.cs |   82 +-
 src/core/Search/Payloads/MaxPayloadFunction.cs     |   78 +-
 src/core/Search/Payloads/MinPayloadFunction.cs     |   74 +-
 src/core/Search/Payloads/PayloadFunction.cs        |  112 +-
 src/core/Search/Payloads/PayloadNearQuery.cs       |  484 +-
 src/core/Search/Payloads/PayloadSpanUtil.cs        |  318 +-
 src/core/Search/Payloads/PayloadTermQuery.cs       |  428 +-
 src/core/Search/PhrasePositions.cs                 |  138 +-
 src/core/Search/PhraseQuery.cs                     |  650 +-
 src/core/Search/PhraseQueue.cs                     |   38 +-
 src/core/Search/PhraseScorer.cs                    |  380 +-
 src/core/Search/PositiveScoresOnlyCollector.cs     |   82 +-
 src/core/Search/PrefixFilter.cs                    |   50 +-
 src/core/Search/PrefixQuery.cs                     |  142 +-
 src/core/Search/PrefixTermEnum.cs                  |   88 +-
 src/core/Search/Query.cs                           |  406 +-
 src/core/Search/QueryTermVector.cs                 |  260 +-
 src/core/Search/QueryWrapperFilter.cs              |  162 +-
 src/core/Search/ReqExclScorer.cs                   |  232 +-
 src/core/Search/ReqOptSumScorer.cs                 |  130 +-
 src/core/Search/ScoreCachingWrappingScorer.cs      |  128 +-
 src/core/Search/ScoreDoc.cs                        |   38 +-
 src/core/Search/Scorer.cs                          |  158 +-
 src/core/Search/Searchable.cs                      |  278 +-
 src/core/Search/Searcher.cs                        |  302 +-
 src/core/Search/Similarity.cs                      | 1278 +-
 src/core/Search/SimilarityDelegator.cs             |  106 +-
 src/core/Search/SingleTermEnum.cs                  |    2 +-
 src/core/Search/SloppyPhraseScorer.cs              |  438 +-
 src/core/Search/Sort.cs                            |  340 +-
 src/core/Search/SortField.cs                       |  918 +-
 src/core/Search/SpanFilter.cs                      |   46 +-
 src/core/Search/SpanFilterResult.cs                |  170 +-
 src/core/Search/SpanQueryFilter.cs                 |  160 +-
 src/core/Search/Spans/FieldMaskingSpanQuery.cs     |  256 +-
 src/core/Search/Spans/NearSpansOrdered.cs          |  784 +-
 src/core/Search/Spans/NearSpansUnordered.cs        |  754 +-
 src/core/Search/Spans/SpanFirstQuery.cs            |  350 +-
 src/core/Search/Spans/SpanNearQuery.cs             |  350 +-
 src/core/Search/Spans/SpanNotQuery.cs              |  448 +-
 src/core/Search/Spans/SpanOrQuery.cs               |  600 +-
 src/core/Search/Spans/SpanQuery.cs                 |   32 +-
 src/core/Search/Spans/SpanScorer.cs                |  198 +-
 src/core/Search/Spans/SpanTermQuery.cs             |  142 +-
 src/core/Search/Spans/SpanWeight.cs                |  206 +-
 src/core/Search/Spans/Spans.cs                     |  126 +-
 src/core/Search/Spans/TermSpans.cs                 |  192 +-
 src/core/Search/TermQuery.cs                       |  346 +-
 src/core/Search/TermRangeFilter.cs                 |  214 +-
 src/core/Search/TermRangeQuery.cs                  |  410 +-
 src/core/Search/TermRangeTermEnum.cs               |  268 +-
 src/core/Search/TermScorer.cs                      |  328 +-
 src/core/Search/TopDocs.cs                         |   40 +-
 src/core/Search/TopDocsCollector.cs                |  252 +-
 src/core/Search/TopFieldCollector.cs               | 2206 ++--
 src/core/Search/TopFieldDocs.cs                    |   50 +-
 src/core/Search/TopScoreDocCollector.cs            |  278 +-
 src/core/Search/Weight.cs                          |  176 +-
 src/core/Search/WildcardQuery.cs                   |  176 +-
 src/core/Search/WildcardTermEnum.cs                |  334 +-
 src/core/Store/BufferedIndexInput.cs               |  428 +-
 src/core/Store/BufferedIndexOutput.cs              |  258 +-
 src/core/Store/CheckSumIndexInput.cs               |   92 +-
 src/core/Store/CheckSumIndexOutput.cs              |  146 +-
 src/core/Store/Directory.cs                        |  418 +-
 src/core/Store/FSDirectory.cs                      |  704 +-
 src/core/Store/FSLockFactory.cs                    |   38 +-
 src/core/Store/FileSwitchDirectory.cs              |  218 +-
 src/core/Store/IndexInput.cs                       |  504 +-
 src/core/Store/IndexOutput.cs                      |  468 +-
 src/core/Store/Lock.cs                             |  274 +-
 src/core/Store/LockFactory.cs                      |   94 +-
 src/core/Store/LockStressTest.cs                   |  212 +-
 src/core/Store/LockVerifyServer.cs                 |  176 +-
 src/core/Store/MMapDirectory.cs                    |  826 +-
 src/core/Store/NIOFSDirectory.cs                   |   40 +-
 src/core/Store/NoLockFactory.cs                    |  104 +-
 src/core/Store/RAMDirectory.cs                     |  392 +-
 src/core/Store/RAMFile.cs                          |  206 +-
 src/core/Store/RAMInputStream.cs                   |  218 +-
 src/core/Store/RAMOutputStream.cs                  |  294 +-
 src/core/Store/SimpleFSDirectory.cs                |  338 +-
 src/core/Store/SimpleFSLockFactory.cs              |  360 +-
 src/core/Store/SingleInstanceLockFactory.cs        |  146 +-
 src/core/Store/VerifyingLockFactory.cs             |  286 +-
 .../Support/Compatibility/ConcurrentDictionary.cs  |    2 +-
 src/core/Support/Compatibility/Func.cs             |    2 +-
 src/core/Support/Compatibility/ISet.cs             |    2 +-
 src/core/Support/Compatibility/SetFactory.cs       |    2 +-
 src/core/Support/Compatibility/SortedSet.cs        |    2 +-
 src/core/Support/Compatibility/ThreadLocal.cs      |    2 +-
 src/core/Support/Compatibility/WrappedHashSet.cs   |    2 +-
 src/core/Support/Cryptography.cs                   |    2 +-
 src/core/Util/ArrayUtil.cs                         |  518 +-
 src/core/Util/Attribute.cs                         |  212 +-
 src/core/Util/AttributeSource.cs                   | 1030 +-
 src/core/Util/AverageGuessMemoryModel.cs           |  112 +-
 src/core/Util/BitUtil.cs                           | 1740 ++--
 src/core/Util/BitVector.cs                         |  544 +-
 src/core/Util/Cache/Cache.cs                       |  180 +-
 src/core/Util/Cache/SimpleLRUCache.cs              |   88 +-
 src/core/Util/Cache/SimpleMapCache.cs              |  192 +-
 src/core/Util/CloseableThreadLocal-old.cs          |   82 +-
 src/core/Util/Constants.cs                         |   74 +-
 src/core/Util/DocIdBitSet.cs                       |  116 +-
 src/core/Util/FieldCacheSanityChecker.cs           |  738 +-
 src/core/Util/IAttribute.cs                        |   10 +-
 src/core/Util/IndexableBinaryStringTools.cs        |  312 +-
 src/core/Util/MapOfSets.cs                         |   70 +-
 src/core/Util/MemoryModel.cs                       |   38 +-
 src/core/Util/NumericUtils.cs                      |  908 +-
 src/core/Util/OpenBitSet.cs                        | 1764 ++--
 src/core/Util/OpenBitSetDISI.cs                    |  176 +-
 src/core/Util/OpenBitSetIterator.cs                |  416 +-
 src/core/Util/PriorityQueue.cs                     |  438 +-
 src/core/Util/RamUsageEstimator.cs                 |  390 +-
 src/core/Util/ReaderUtil.cs                        |  192 +-
 src/core/Util/ScorerDocQueue.cs                    |  490 +-
 src/core/Util/SimpleStringInterner.cs              |  144 +-
 src/core/Util/SmallFloat.cs                        |  260 +-
 src/core/Util/SortedVIntList.cs                    |  520 +-
 src/core/Util/SorterTemplate.cs                    |  404 +-
 src/core/Util/StringHelper.cs                      |  132 +-
 src/core/Util/StringInterner.cs                    |   44 +-
 src/core/Util/ToStringUtils.cs                     |   28 +-
 src/core/Util/UnicodeUtil.cs                       |  966 +-
 src/core/Util/Version.cs                           |   62 +-
 src/demo/DeleteFiles/DeleteFiles.cs                |   48 +-
 src/demo/Demo.Common/FileDocument.cs               |   82 +-
 src/demo/Demo.Common/HTML/Entities.cs              |  648 +-
 src/demo/Demo.Common/HTML/HTMLParser.cs            | 1998 ++--
 src/demo/Demo.Common/HTML/HTMLParserConstants.cs   |   86 +-
 .../Demo.Common/HTML/HTMLParserTokenManager.cs     | 3892 +++---
 src/demo/Demo.Common/HTML/ParseException.cs        |  414 +-
 src/demo/Demo.Common/HTML/ParserThread.cs          |   90 +-
 src/demo/Demo.Common/HTML/SimpleCharStream.cs      |  840 +-
 src/demo/Demo.Common/HTML/Tags.cs                  |   86 +-
 src/demo/Demo.Common/HTML/Test.cs                  |   84 +-
 src/demo/Demo.Common/HTML/Token.cs                 |  140 +-
 src/demo/Demo.Common/HTML/TokenMgrError.cs         |  276 +-
 src/demo/Demo.Common/HTMLDocument.cs               |   74 +-
 src/demo/IndexFiles/IndexFiles.cs                  |  102 +-
 src/demo/IndexHtml/IndexHtml.cs                    |  216 +-
 src/demo/SearchFiles/SearchFiles.cs                |  642 +-
 test/contrib/Analyzers/Cn/TestChineseTokenizer.cs  |    2 +-
 test/contrib/Analyzers/El/GreekAnalyzerTest.cs     |   44 +-
 .../contrib/Analyzers/Filters/ChainedFilterTest.cs |    2 +-
 test/contrib/Analyzers/Fr/TestFrenchAnalyzer.cs    |    2 +-
 .../Analyzers/Hunspell/HunspellDictionaryLoader.cs |    2 +-
 .../Analyzers/Hunspell/TestHunspellStemFilter.cs   |    2 +-
 .../Analyzers/Hunspell/TestHunspellStemmer.cs      |    2 +-
 .../TestPrefixAndSuffixAwareTokenFilter.cs         |    2 +-
 .../Miscellaneous/TestPrefixAwareTokenFilter.cs    |    2 +-
 .../Analyzers/NGram/TestNGramTokenFilter.cs        |    2 +-
 test/contrib/Analyzers/NGram/TestNGramTokenizer.cs |    2 +-
 test/contrib/Analyzers/Properties/AssemblyInfo.cs  |    2 +-
 test/contrib/Analyzers/Ru/TestRussianAnalyzer.cs   |    2 +-
 .../Shingle/ShingleAnalyzerWrapperTest.cs          |    2 +-
 .../contrib/Analyzers/Shingle/ShingleFilterTest.cs |    2 +-
 .../Analyzers/Shingle/TestShingleMatrixFilter.cs   |    2 +-
 .../contrib/Core/Analysis/Ext/Analysis.Ext.Test.cs |    2 +-
 .../FastVectorHighlighter/FieldPhraseListTest.cs   |    2 +-
 .../FastVectorHighlighter/FieldQueryTest.cs        |    2 +-
 .../FastVectorHighlighter/FieldTermStackTest.cs    |    2 +-
 .../FastVectorHighlighter/IndexTimeSynonymTest.cs  |    2 +-
 .../Properties/AssemblyInfo.cs                     |    2 +-
 .../SimpleFragListBuilderTest.cs                   |    2 +-
 .../SimpleFragmentsBuilderTest.cs                  |    2 +-
 .../FastVectorHighlighter/StringUtilsTest.cs       |    2 +-
 test/contrib/FastVectorHighlighter/Support.cs      |    2 +-
 test/contrib/Queries/BooleanFilterTest.cs          |    2 +-
 test/contrib/Queries/BoostingQueryTest.cs          |    2 +-
 test/contrib/Queries/DuplicateFilterTest.cs        |    2 +-
 test/contrib/Queries/FuzzyLikeThisQueryTest.cs     |    2 +-
 test/contrib/Queries/Properties/AssemblyInfo.cs    |    2 +-
 test/contrib/Queries/Similar/TestMoreLikeThis.cs   |    2 +-
 test/contrib/Queries/TermsFilterTest.cs            |    2 +-
 test/contrib/Regex/Properties/AssemblyInfo.cs      |    2 +-
 test/contrib/Regex/TestRegexQuery.cs               |    2 +-
 .../SimpleFacetedSearch/Properties/AssemblyInfo.cs |    2 +-
 .../SimpleFacetedSearch/TestSimpleFacetedSearch.cs |    2 +-
 .../Lucene.Net/Analysis/Snowball/TestSnowball.cs   |  128 +-
 test/contrib/Spatial/BBox/TestBBoxStrategy.cs      |   54 +-
 test/contrib/Spatial/CheckHits.cs                  |    2 +-
 .../Spatial/Compatibility/TermsFilterTest.cs       |  170 +-
 .../Spatial/Compatibility/TestFixedBitSet.cs       |  668 +-
 test/contrib/Spatial/DistanceStrategyTest.cs       |    2 +-
 test/contrib/Spatial/PortedSolr3Test.cs            |    6 +-
 test/contrib/Spatial/Prefix/NtsPolygonTest.cs      |   54 +-
 .../Prefix/TestRecursivePrefixTreeStrategy.cs      |    2 +-
 .../Prefix/TestTermQueryPrefixGridStrategy.cs      |   62 +-
 .../Spatial/Prefix/Tree/SpatialPrefixTreeTest.cs   |   70 +-
 test/contrib/Spatial/Properties/AssemblyInfo.cs    |    2 +-
 .../Spatial/Queries/SpatialArgsParserTest.cs       |   80 +-
 test/contrib/Spatial/SpatialMatchConcern.cs        |   28 +-
 test/contrib/Spatial/SpatialTestCase.cs            |  326 +-
 test/contrib/Spatial/SpatialTestQuery.cs           |  134 +-
 test/contrib/Spatial/StrategyTestCase.cs           |    2 +-
 test/contrib/Spatial/TestCartesian.cs              |  562 +-
 test/contrib/Spatial/TestTestFramework.cs          |   14 +-
 .../Spatial/Vector/TestTwoDoublesStrategy.cs       |   56 +-
 test/contrib/SpellChecker/Util/English.cs          |  260 +-
 test/core/Analysis/BaseTokenStreamTestCase.cs      |   46 +-
 test/core/Analysis/TestASCIIFoldingFilter.cs       |  370 +-
 test/core/Analysis/TestAnalyzers.cs                |  266 +-
 test/core/Analysis/TestCachingTokenFilter.cs       |  208 +-
 test/core/Analysis/TestCharArraySet.cs             |  192 +-
 test/core/Analysis/TestCharFilter.cs               |  104 +-
 test/core/Analysis/TestISOLatin1AccentFilter.cs    |  184 +-
 test/core/Analysis/TestKeywordAnalyzer.cs          |  130 +-
 test/core/Analysis/TestLengthFilter.cs             |   36 +-
 test/core/Analysis/TestMappingCharFilter.cs        |  292 +-
 test/core/Analysis/TestNumericTokenStream.cs       |   66 +-
 test/core/Analysis/TestPerFieldAnalzyerWrapper.cs  |   34 +-
 test/core/Analysis/TestStandardAnalyzer.cs         |  410 +-
 test/core/Analysis/TestStopAnalyzer.cs             |  102 +-
 test/core/Analysis/TestStopFilter.cs               |  240 +-
 test/core/Analysis/TestToken.cs                    |  408 +-
 .../Tokenattributes/TestSimpleAttributeImpls.cs    |  238 +-
 .../Tokenattributes/TestTermAttributeImpl.cs       |  330 +-
 test/core/Document/TestBinaryDocument.cs           |  162 +-
 test/core/Document/TestDateTools.cs                |  292 +-
 test/core/Document/TestDocument.cs                 |  458 +-
 test/core/Document/TestNumberTools.cs              |  152 +-
 test/core/Index/DocHelper.cs                       |  458 +-
 test/core/Index/MockIndexInput.cs                  |   82 +-
 test/core/Index/TestAddIndexesNoOptimize.cs        | 1082 +-
 test/core/Index/TestAtomicUpdate.cs                |  354 +-
 test/core/Index/TestBackwardsCompatibility.cs      |  988 +-
 test/core/Index/TestByteSlices.cs                  |  160 +-
 test/core/Index/TestCheckIndex.cs                  |  154 +-
 test/core/Index/TestCompoundFile.cs                | 1214 +-
 test/core/Index/TestConcurrentMergeScheduler.cs    |  172 +-
 test/core/Index/TestCrash.cs                       |  342 +-
 test/core/Index/TestDeletionPolicy.cs              | 1638 ++--
 test/core/Index/TestDirectoryReader.cs             |  360 +-
 test/core/Index/TestDoc.cs                         |  456 +-
 test/core/Index/TestDocumentWriter.cs              |  744 +-
 test/core/Index/TestFieldInfos.cs                  |  140 +-
 test/core/Index/TestFieldsReader.cs                |  828 +-
 test/core/Index/TestFilterIndexReader.cs           |  242 +-
 test/core/Index/TestIndexFileDeleter.cs            |  424 +-
 test/core/Index/TestIndexInput.cs                  |  132 +-
 test/core/Index/TestIndexReader.cs                 | 3434 +++---
 test/core/Index/TestIndexReaderClone.cs            |  894 +-
 test/core/Index/TestIndexReaderCloneNorms.cs       |  658 +-
 test/core/Index/TestIndexReaderReopen.cs           | 2564 ++--
 test/core/Index/TestIndexWriter.cs                 |    2 +-
 test/core/Index/TestIndexWriterDelete.cs           |  988 +-
 test/core/Index/TestIndexWriterExceptions.cs       |  480 +-
 test/core/Index/TestIndexWriterLockRelease.cs      |  208 +-
 test/core/Index/TestIndexWriterMergePolicy.cs      |  500 +-
 test/core/Index/TestIndexWriterMerging.cs          |  146 +-
 test/core/Index/TestIndexWriterReader.cs           | 2052 ++--
 test/core/Index/TestIsCurrent.cs                   |    2 +-
 test/core/Index/TestLazyBug.cs                     |  226 +-
 test/core/Index/TestLazyProxSkipping.cs            |  412 +-
 test/core/Index/TestMultiLevelSkipList.cs          |  292 +-
 test/core/Index/TestMultiReader.cs                 |   54 +-
 test/core/Index/TestNRTReaderWithThreads.cs        |  226 +-
 test/core/Index/TestNewestSegment.cs               |    2 +-
 test/core/Index/TestNorms.cs                       |  486 +-
 test/core/Index/TestOmitTf.cs                      | 1000 +-
 test/core/Index/TestParallelReader.cs              |  500 +-
 test/core/Index/TestParallelTermEnum.cs            |  310 +-
 test/core/Index/TestPayloads.cs                    | 1250 +-
 .../Index/TestPositionBasedTermVectorMapper.cs     |  162 +-
 test/core/Index/TestRollback.cs                    |    2 +-
 test/core/Index/TestSegmentMerger.cs               |  186 +-
 test/core/Index/TestSegmentReader.cs               |  380 +-
 test/core/Index/TestSegmentTermDocs.cs             |  464 +-
 test/core/Index/TestSegmentTermEnum.cs             |  192 +-
 test/core/Index/TestSnapshotDeletionPolicy.cs      |  450 +-
 test/core/Index/TestStressIndexing.cs              |  372 +-
 test/core/Index/TestStressIndexing2.cs             | 1500 ++--
 test/core/Index/TestTerm.cs                        |   40 +-
 test/core/Index/TestTermVectorsReader.cs           |  988 +-
 test/core/Index/TestTermdocPerf.cs                 |  264 +-
 test/core/Index/TestThreadedOptimize.cs            |  314 +-
 test/core/Index/TestTransactionRollback.cs         |  472 +-
 test/core/Index/TestTransactions.cs                |  510 +-
 test/core/Index/TestWordlistLoader.cs              |   68 +-
 test/core/Messages/MessagesTestBundle.cs           |   52 +-
 test/core/Messages/TestNLS.cs                      |  114 +-
 test/core/QueryParser/TestMultiAnalyzer.cs         |  628 +-
 test/core/QueryParser/TestMultiFieldQueryParser.cs |  524 +-
 test/core/QueryParser/TestQueryParser.cs           | 1906 ++--
 test/core/Search/BaseTestRangeFilter.cs            |  288 +-
 test/core/Search/CheckHits.cs                      |  988 +-
 test/core/Search/Function/FunctionTestSetup.cs     |  240 +-
 .../core/Search/Function/JustCompileSearchSpans.cs |  150 +-
 test/core/Search/Function/TestCustomScoreQuery.cs  |  390 +-
 test/core/Search/Function/TestDocValues.cs         |  174 +-
 test/core/Search/Function/TestFieldScoreQuery.cs   |  454 +-
 test/core/Search/Function/TestOrdValues.cs         |  494 +-
 test/core/Search/JustCompileSearch.cs              |  834 +-
 test/core/Search/MockFilter.cs                     |   44 +-
 test/core/Search/Payloads/PayloadHelper.cs         |  260 +-
 test/core/Search/Payloads/TestPayloadNearQuery.cs  |  542 +-
 test/core/Search/Payloads/TestPayloadTermQuery.cs  |  672 +-
 test/core/Search/QueryUtils.cs                     |  848 +-
 test/core/Search/SingleDocTestFilter.cs            |   36 +-
 test/core/Search/Spans/JustCompileSearchSpans.cs   |  232 +-
 test/core/Search/Spans/TestBasics.cs               |  692 +-
 .../core/Search/Spans/TestFieldMaskingSpanQuery.cs |  540 +-
 test/core/Search/Spans/TestNearSpansOrdered.cs     |  312 +-
 test/core/Search/Spans/TestPayloadSpans.cs         | 1120 +-
 test/core/Search/Spans/TestSpanExplanations.cs     |  428 +-
 .../Spans/TestSpanExplanationsOfNonMatches.cs      |   32 +-
 test/core/Search/Spans/TestSpans.cs                |  972 +-
 test/core/Search/Spans/TestSpansAdvanced.cs        |  290 +-
 test/core/Search/Spans/TestSpansAdvanced2.cs       |  168 +-
 test/core/Search/TestBoolean2.cs                   |  478 +-
 test/core/Search/TestBooleanMinShouldMatch.cs      |  794 +-
 test/core/Search/TestBooleanOr.cs                  |  258 +-
 test/core/Search/TestBooleanPrefixQuery.cs         |  158 +-
 test/core/Search/TestBooleanQuery.cs               |  134 +-
 test/core/Search/TestBooleanScorer.cs              |  198 +-
 test/core/Search/TestCachingSpanFilter.cs          |    2 +-
 test/core/Search/TestComplexExplanations.cs        |  542 +-
 .../Search/TestComplexExplanationsOfNonMatches.cs  |   30 +-
 test/core/Search/TestCustomSearcherSort.cs         |  532 +-
 test/core/Search/TestDateFilter.cs                 |  244 +-
 test/core/Search/TestDateSort.cs                   |  170 +-
 test/core/Search/TestDisjunctionMaxQuery.cs        |  960 +-
 test/core/Search/TestDocBoost.cs                   |  184 +-
 test/core/Search/TestDocIdSet.cs                   |  368 +-
 test/core/Search/TestElevationComparator.cs        |  368 +-
 test/core/Search/TestExplanations.cs               |  454 +-
 test/core/Search/TestFieldCache.cs                 |  230 +-
 test/core/Search/TestFieldCacheRangeFilter.cs      |  980 +-
 test/core/Search/TestFieldCacheTermsFilter.cs      |   90 +-
 test/core/Search/TestFilteredQuery.cs              |  384 +-
 test/core/Search/TestFilteredSearch.cs             |   26 +-
 test/core/Search/TestFuzzyQuery.cs                 |  332 +-
 test/core/Search/TestMatchAllDocsQuery.cs          |  228 +-
 test/core/Search/TestMultiPhraseQuery.cs           |  384 +-
 test/core/Search/TestMultiSearcher.cs              |  798 +-
 test/core/Search/TestMultiSearcherRanking.cs       |  298 +-
 test/core/Search/TestMultiTermConstantScore.cs     | 1314 +-
 test/core/Search/TestMultiThreadTermVectors.cs     |  356 +-
 .../Search/TestMultiValuedNumericRangeQuery.cs     |  100 +-
 test/core/Search/TestNot.cs                        |   50 +-
 test/core/Search/TestNumericRangeQuery32.cs        |  922 +-
 test/core/Search/TestNumericRangeQuery64.cs        | 1052 +-
 test/core/Search/TestParallelMultiSearcher.cs      |   16 +-
 test/core/Search/TestPhrasePrefixQuery.cs          |  132 +-
 test/core/Search/TestPhraseQuery.cs                | 1128 +-
 test/core/Search/TestPositionIncrement.cs          |  556 +-
 .../core/Search/TestPositiveScoresOnlyCollector.cs |  142 +-
 test/core/Search/TestPrefixFilter.cs               |  154 +-
 test/core/Search/TestPrefixInBooleanQuery.cs       |  172 +-
 test/core/Search/TestPrefixQuery.cs                |   62 +-
 test/core/Search/TestQueryTermVector.cs            |   78 +-
 test/core/Search/TestQueryWrapperFilter.cs         |   78 +-
 test/core/Search/TestScoreCachingWrappingScorer.cs |  202 +-
 test/core/Search/TestScorerPerf.cs                 |  710 +-
 test/core/Search/TestSetNorm.cs                    |  168 +-
 test/core/Search/TestSimilarity.cs                 |  454 +-
 test/core/Search/TestSimpleExplanations.cs         |  840 +-
 .../Search/TestSimpleExplanationsOfNonMatches.cs   |   30 +-
 test/core/Search/TestSloppyPhraseQuery.cs          |  274 +-
 test/core/Search/TestSort.cs                       | 2266 ++--
 test/core/Search/TestSpanQueryFilter.cs            |  118 +-
 test/core/Search/TestTermRangeFilter.cs            |  606 +-
 test/core/Search/TestTermRangeQuery.cs             |  640 +-
 test/core/Search/TestTermScorer.cs                 |  358 +-
 test/core/Search/TestTermVectors.cs                |  858 +-
 test/core/Search/TestThreadSafe.cs                 |  364 +-
 test/core/Search/TestTimeLimitingCollector.cs      |  732 +-
 test/core/Search/TestTopDocsCollector.cs           |  366 +-
 test/core/Search/TestTopScoreDocCollector.cs       |   24 +-
 test/core/Search/TestWildcard.cs                   |  474 +-
 test/core/Store/MockRAMInputStream.cs              |   82 +-
 test/core/Store/MockRAMOutputStream.cs             |  164 +-
 test/core/Store/TestBufferedIndexInput.cs          |  678 +-
 test/core/Store/TestDirectory.cs                   |  398 +-
 test/core/Store/TestFileSwitchDirectory.cs         |   92 +-
 test/core/Store/TestHugeRamFile.cs                 |  186 +-
 test/core/Store/TestLock.cs                        |   80 +-
 test/core/Store/TestLockFactory.cs                 |  920 +-
 test/core/Store/TestRAMDirectory.cs                |  342 +-
 test/core/Store/TestWindowsMMap.cs                 |  204 +-
 test/core/Store/_TestHelper.cs                     |  100 +-
 test/core/Support/TestExceptionSerialization.cs    |    2 +-
 test/core/SupportClassException.cs                 |    2 +-
 test/core/TestDemo.cs                              |   86 +-
 test/core/TestMergeSchedulerExternal.cs            |  268 +-
 test/core/TestSearch.cs                            |  186 +-
 test/core/TestSearchForDuplicates.cs               |  252 +-
 test/core/Util/ArrayUtilTest.cs                    |  138 +-
 test/core/Util/Cache/TestSimpleLRUCache.cs         |   96 +-
 test/core/Util/English.cs                          |  260 +-
 test/core/Util/LuceneTestCase.cs                   |  406 +-
 test/core/Util/Paths.cs                            |   14 +-
 test/core/Util/TestBitVector.cs                    |  568 +-
 test/core/Util/TestCloseableThreadLocal.cs         |  102 +-
 test/core/Util/TestFieldCacheSanityChecker.cs      |  314 +-
 test/core/Util/TestIndexableBinaryStringTools.cs   |  176 +-
 test/core/Util/TestNumericUtils.cs                 |  616 +-
 test/core/Util/TestOpenBitSet.cs                   |  370 +-
 test/core/Util/TestPriorityQueue.cs                |  172 +-
 test/core/Util/TestRamUsageEstimator.cs            |   84 +-
 test/core/Util/TestSmallFloat.cs                   |  220 +-
 test/core/Util/TestSortedVIntList.cs               |  434 +-
 test/core/Util/TestStringHelper.cs                 |   48 +-
 test/core/Util/TestStringIntern.cs                 |  196 +-
 test/core/Util/TestVersion.cs                      |    2 +-
 test/core/Util/_TestUtil.cs                        |  198 +-
 843 files changed, 143453 insertions(+), 143453 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicAnalyzer.cs b/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
index 1e043b2..88e9736 100644
--- a/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
+++ b/src/contrib/Analyzers/AR/ArabicAnalyzer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -153,7 +153,7 @@ namespace Lucene.Net.Analysis.AR
          * Creates a <see cref="TokenStream"/> which tokenizes all the text in the provided <see cref="TextReader"/>.
          *
          * <returns>A <see cref="TokenStream"/> built from an <see cref="ArabicLetterTokenizer"/> filtered with
-         * 			<see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, <see cref="ArabicNormalizationFilter"/>
+         *             <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, <see cref="ArabicNormalizationFilter"/>
          *            and <see cref="ArabicStemFilter"/>.</returns>
          */
         public override TokenStream TokenStream(string fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs b/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
index eb08d46..780d8aa 100644
--- a/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
+++ b/src/contrib/Analyzers/AR/ArabicLetterTokenizer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs b/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
index 2ec7bcf..28b7044 100644
--- a/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
+++ b/src/contrib/Analyzers/AR/ArabicNormalizationFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicNormalizer.cs b/src/contrib/Analyzers/AR/ArabicNormalizer.cs
index c038c8d..da8c8af 100644
--- a/src/contrib/Analyzers/AR/ArabicNormalizer.cs
+++ b/src/contrib/Analyzers/AR/ArabicNormalizer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicStemFilter.cs b/src/contrib/Analyzers/AR/ArabicStemFilter.cs
index 563c654..b3e89c1 100644
--- a/src/contrib/Analyzers/AR/ArabicStemFilter.cs
+++ b/src/contrib/Analyzers/AR/ArabicStemFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/AR/ArabicStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/AR/ArabicStemmer.cs b/src/contrib/Analyzers/AR/ArabicStemmer.cs
index 8beef1b..7ee08bb 100644
--- a/src/contrib/Analyzers/AR/ArabicStemmer.cs
+++ b/src/contrib/Analyzers/AR/ArabicStemmer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs b/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
index 0c0218b..6e6ee25 100644
--- a/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
+++ b/src/contrib/Analyzers/BR/BrazilianAnalyzer.cs
@@ -196,7 +196,7 @@ namespace Lucene.Net.Analysis.BR
          * Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
          *
          * @return  A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
-         * 			{@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}, and 
+         *             {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}, and 
          *          {@link BrazilianStemFilter}.
          */
         public override TokenStream TokenStream(String fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs b/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
index 6ae819a..af3f702 100644
--- a/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
+++ b/src/contrib/Analyzers/Compound/CompoundWordTokenFilterBase.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs b/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
index 4a8f6f1..6ceaaed 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/ByteVector.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs b/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
index 522c923..657d299 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/CharVector.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs b/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
index 373e6d4..6bbdd61 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/Hyphen.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs b/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
index 79ba6a3..ef4c699 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/Hyphenation.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs b/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
index 6266d76..bd7ebab 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/HyphenationException.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs b/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
index 1f4b914..ab015ce 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/HyphenationTree.cs
@@ -1,4 +1,4 @@
-////*
+////*
 // * Licensed to the Apache Software Foundation (ASF) under one or more
 // * contributor license agreements.  See the NOTICE file distributed with
 // * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs b/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
index fa6ed1e..1f92e0d 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/PatternConsumer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs b/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
index 5c79517..28fa1c0 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/PatternParser.cs
@@ -1,4 +1,4 @@
-//using System;
+//using System;
 //using System.Collections;
 //using System.Collections.Generic;
 //using System.IO;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs b/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
index 5ca380c..fbbca07 100644
--- a/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
+++ b/src/contrib/Analyzers/Compound/Hyphenation/TernaryTree.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs b/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
index 51c49fa..6ca528b 100644
--- a/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
+++ b/src/contrib/Analyzers/Compound/HyphenationCompoundWordTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Cz/CzechAnalyzer.cs b/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
index baf3dbf..48f5aa9 100644
--- a/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
+++ b/src/contrib/Analyzers/Cz/CzechAnalyzer.cs
@@ -41,12 +41,12 @@ namespace Lucene.Net.Analysis.Cz
  */
 public sealed class CzechAnalyzer : Analyzer {
 
-	/*
-	 * List of typical stopwords.
-	 * @deprecated use {@link #getDefaultStopSet()} instead
-	 */
+    /*
+     * List of typical stopwords.
+     * @deprecated use {@link #getDefaultStopSet()} instead
+     */
   // TODO make this private in 3.1
-	public static readonly String[] CZECH_STOP_WORDS = {
+    public static readonly String[] CZECH_STOP_WORDS = {
         "a","s","k","o","i","u","v","z","dnes","cz","t\u00edmto","bude\u0161","budem",
         "byli","jse\u0161","m\u016fj","sv\u00fdm","ta","tomto","tohle","tuto","tyto",
         "jej","zda","pro\u010d","m\u00e1te","tato","kam","tohoto","kdo","kte\u0159\u00ed",
@@ -66,37 +66,37 @@ public sealed class CzechAnalyzer : Analyzer {
         "j\u00ed","ji","m\u011b","mne","jemu","tomu","t\u011bm","t\u011bmu","n\u011bmu","n\u011bmu\u017e",
         "jeho\u017e","j\u00ed\u017e","jeliko\u017e","je\u017e","jako\u017e","na\u010de\u017e",
     };
-	
-	/*
-	 * Returns a set of default Czech-stopwords 
-	 * @return a set of default Czech-stopwords 
-	 */
-	public static ISet<string> getDefaultStopSet(){
-	  return DefaultSetHolder.DEFAULT_SET;
-	}
-	
-	private static class DefaultSetHolder {
-	  internal static ISet<string> DEFAULT_SET = CharArraySet.UnmodifiableSet(new CharArraySet(
+    
+    /*
+     * Returns a set of default Czech-stopwords 
+     * @return a set of default Czech-stopwords 
+     */
+    public static ISet<string> getDefaultStopSet(){
+      return DefaultSetHolder.DEFAULT_SET;
+    }
+    
+    private static class DefaultSetHolder {
+      internal static ISet<string> DEFAULT_SET = CharArraySet.UnmodifiableSet(new CharArraySet(
           (IEnumerable<string>)CZECH_STOP_WORDS, false));
-	}
+    }
 
-	/*
-	 * Contains the stopwords used with the {@link StopFilter}.
-	 */
-	// TODO make this final in 3.1
-	private ISet<string> stoptable;
+    /*
+     * Contains the stopwords used with the {@link StopFilter}.
+     */
+    // TODO make this final in 3.1
+    private ISet<string> stoptable;
   private readonly Version matchVersion;
 
-	/*
-	 * Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}).
-	 */
-	public CzechAnalyzer(Version matchVersion) 
+    /*
+     * Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}).
+     */
+    public CzechAnalyzer(Version matchVersion) 
     : this(matchVersion, DefaultSetHolder.DEFAULT_SET)
     {
     
-	}
-	
-	/*
+    }
+    
+    /*
    * Builds an analyzer with the given stop words and stemming exclusion words
    * 
    * @param matchVersion
@@ -110,15 +110,15 @@ public sealed class CzechAnalyzer : Analyzer {
   }
 
 
-	/*
-	 * Builds an analyzer with the given stop words.
-	 * @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
-	 */
+    /*
+     * Builds an analyzer with the given stop words.
+     * @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
+     */
   public CzechAnalyzer(Version matchVersion, params string[] stopwords) 
   : this(matchVersion, StopFilter.MakeStopSet( stopwords ))
   {
     
-	}
+    }
 
   /*
    * Builds an analyzer with the given stop words.
@@ -129,17 +129,17 @@ public sealed class CzechAnalyzer : Analyzer {
   : this(matchVersion, (ISet<string>)stopwords)
   {
     
-	}
+    }
 
-	/*
-	 * Builds an analyzer with the given stop words.
-	 * @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
-	 */
+    /*
+     * Builds an analyzer with the given stop words.
+     * @deprecated use {@link #CzechAnalyzer(Version, Set)} instead
+     */
   public CzechAnalyzer(Version matchVersion, FileInfo stopwords ) 
       : this(matchVersion, WordlistLoader.GetWordSet( stopwords ))
   {
     
-	}
+    }
 
     /*
      * Loads stopwords hash from resource stream (file, database...).
@@ -173,34 +173,34 @@ public sealed class CzechAnalyzer : Analyzer {
         }
     }
 
-	/*
-	 * Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
-	 *
-	 * @return  A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
-	 * 			{@link StandardFilter}, {@link LowerCaseFilter}, and {@link StopFilter}
-	 */
-	public override sealed TokenStream TokenStream( String fieldName, TextReader reader ) {
+    /*
+     * Creates a {@link TokenStream} which tokenizes all the text in the provided {@link Reader}.
+     *
+     * @return  A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
+     *             {@link StandardFilter}, {@link LowerCaseFilter}, and {@link StopFilter}
+     */
+    public override sealed TokenStream TokenStream( String fieldName, TextReader reader ) {
                 TokenStream result = new StandardTokenizer( matchVersion, reader );
-		result = new StandardFilter( result );
-		result = new LowerCaseFilter( result );
-		result = new StopFilter( StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion),
+        result = new StandardFilter( result );
+        result = new LowerCaseFilter( result );
+        result = new StopFilter( StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion),
                                          result, stoptable );
-		return result;
-	}
-	
-	private class SavedStreams {
-	    protected internal Tokenizer source;
-	    protected internal TokenStream result;
-	};
-	
-	/*
+        return result;
+    }
+    
+    private class SavedStreams {
+        protected internal Tokenizer source;
+        protected internal TokenStream result;
+    };
+    
+    /*
      * Returns a (possibly reused) {@link TokenStream} which tokenizes all the text in 
      * the provided {@link Reader}.
      *
      * @return  A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
      *          {@link StandardFilter}, {@link LowerCaseFilter}, and {@link StopFilter}
      */
-	public override TokenStream ReusableTokenStream(String fieldName, TextReader reader)
+    public override TokenStream ReusableTokenStream(String fieldName, TextReader reader)
     {
       SavedStreams streams = (SavedStreams) PreviousTokenStream;
       if (streams == null) {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/De/GermanAnalyzer.cs b/src/contrib/Analyzers/De/GermanAnalyzer.cs
index d6946b6..5f068e1 100644
--- a/src/contrib/Analyzers/De/GermanAnalyzer.cs
+++ b/src/contrib/Analyzers/De/GermanAnalyzer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -44,20 +44,20 @@ namespace Lucene.Net.Analysis.De
         /// </summary>
         //TODO: make this private in 3.1
         private static readonly String[] GERMAN_STOP_WORDS = 
-		{
-			"einer", "eine", "eines", "einem", "einen",
-			"der", "die", "das", "dass", "daß",
-			"du", "er", "sie", "es",
-			"was", "wer", "wie", "wir",
-			"und", "oder", "ohne", "mit",
-			"am", "im", "in", "aus", "auf",
-			"ist", "sein", "war", "wird",
-			"ihr", "ihre", "ihres",
-			"als", "für", "von",
-			"dich", "dir", "mich", "mir",
-			"mein", "kein",
-			"durch", "wegen"
-		};
+        {
+            "einer", "eine", "eines", "einem", "einen",
+            "der", "die", "das", "dass", "daß",
+            "du", "er", "sie", "es",
+            "was", "wer", "wie", "wir",
+            "und", "oder", "ohne", "mit",
+            "am", "im", "in", "aus", "auf",
+            "ist", "sein", "war", "wird",
+            "ihr", "ihre", "ihres",
+            "als", "für", "von",
+            "dich", "dir", "mich", "mir",
+            "mein", "kein",
+            "durch", "wegen"
+        };
 
         /// <summary>
         /// Returns a set of default German-stopwords 


[21/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/Fieldable.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Fieldable.cs b/src/core/Document/Fieldable.cs
index 89d37d1..daa47f2 100644
--- a/src/core/Document/Fieldable.cs
+++ b/src/core/Document/Fieldable.cs
@@ -22,32 +22,32 @@ using FieldInvertState = Lucene.Net.Index.FieldInvertState;
 
 namespace Lucene.Net.Documents
 {
-	/// <summary> Synonymous with <see cref="Field" />.
-	/// 
-	/// <p/><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
-	/// This means new methods may be added from version to version.  This change only affects the Fieldable API; other backwards
-	/// compatibility promises remain intact. For example, Lucene can still
-	/// read and write indices created within the same major version.
-	/// <p/>
-	/// 
-	/// 
-	/// </summary>
-	public interface IFieldable
-	{
+    /// <summary> Synonymous with <see cref="Field" />.
+    /// 
+    /// <p/><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
+    /// This means new methods may be added from version to version.  This change only affects the Fieldable API; other backwards
+    /// compatibility promises remain intact. For example, Lucene can still
+    /// read and write indices created within the same major version.
+    /// <p/>
+    /// 
+    /// 
+    /// </summary>
+    public interface IFieldable
+    {
         /// <summary>Gets or sets the boost factor for hits for this field.  This value will be
         /// multiplied into the score of all hits on this this field of this
         /// document.
-	    /// 
-	    /// <p/>The boost is multiplied by <see cref="Lucene.Net.Documents.Document.Boost" /> of the document
-	    /// containing this field.  If a document has multiple fields with the same
-	    /// name, all such values are multiplied together.  This product is then
-	    /// used to compute the norm factor for the field.  By
-	    /// default, in the <see cref="Lucene.Net.Search.Similarity.ComputeNorm(String,Lucene.Net.Index.FieldInvertState)"/>
-	    /// method, the boost value is multiplied
-	    /// by the <see cref="Lucene.Net.Search.Similarity.LengthNorm(String,int)"/>
-	    /// and then rounded by <see cref="Lucene.Net.Search.Similarity.EncodeNorm(float)" /> before it is stored in the
-	    /// index.  One should attempt to ensure that this product does not overflow
-	    /// the range of that encoding.
+        /// 
+        /// <p/>The boost is multiplied by <see cref="Lucene.Net.Documents.Document.Boost" /> of the document
+        /// containing this field.  If a document has multiple fields with the same
+        /// name, all such values are multiplied together.  This product is then
+        /// used to compute the norm factor for the field.  By
+        /// default, in the <see cref="Lucene.Net.Search.Similarity.ComputeNorm(String,Lucene.Net.Index.FieldInvertState)"/>
+        /// method, the boost value is multiplied
+        /// by the <see cref="Lucene.Net.Search.Similarity.LengthNorm(String,int)"/>
+        /// and then rounded by <see cref="Lucene.Net.Search.Similarity.EncodeNorm(float)" /> before it is stored in the
+        /// index.  One should attempt to ensure that this product does not overflow
+        /// the range of that encoding.
         /// 
         /// <p/>The default value is 1.0.
         /// 
@@ -56,78 +56,78 @@ namespace Lucene.Net.Documents
         /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
         /// this field was indexed.
         /// 
-	    /// </summary>
-	    /// <seealso cref="Lucene.Net.Documents.Document.Boost">
-	    /// </seealso>
-	    /// <seealso cref="Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
-	    /// </seealso>
-	    /// <seealso cref="Lucene.Net.Search.Similarity.EncodeNorm(float)">
-	    /// </seealso>
-	    float Boost { get; set; }
-
-	    /// <summary>Returns the name of the field as an interned string.
-	    /// For example "date", "title", "body", ...
-	    /// </summary>
-	    string Name { get; }
-
-	    /// <summary>The value of the field as a String, or null.
-	    /// <p/>
-	    /// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
-	    /// unless isBinary()==true, in which case GetBinaryValue() will be used.
-	    /// 
-	    /// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
-	    /// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
-	    /// else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens.
-	    /// </summary>
-	    string StringValue { get; }
-
-	    /// <summary>The value of the field as a Reader, which can be used at index time to generate indexed tokens.</summary>
-	    /// <seealso cref="StringValue()">
-	    /// </seealso>
-	    TextReader ReaderValue { get; }
-
-	    /// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
-	    /// <seealso cref="StringValue()">
-	    /// </seealso>
-	    TokenStream TokenStreamValue { get; }
-
-	    /// <summary>True if the value of the field is to be stored in the index for return
-	    /// with search hits. 
-	    /// </summary>
-	    bool IsStored { get; }
-
-	    /// <summary>True if the value of the field is to be indexed, so that it may be
-	    /// searched on. 
-	    /// </summary>
-	    bool IsIndexed { get; }
-
-	    /// <summary>True if the value of the field should be tokenized as text prior to
-	    /// indexing.  Un-tokenized fields are indexed as a single word and may not be
-	    /// Reader-valued. 
-	    /// </summary>
-	    bool IsTokenized { get; }
-
-	    /// <summary>True if the term or terms used to index this field are stored as a term
-	    /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
-	    /// These methods do not provide access to the original content of the field,
-	    /// only to terms used to index it. If the original content must be
-	    /// preserved, use the <c>stored</c> attribute instead.
-	    /// 
-	    /// </summary>
-	    /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
-	    /// </seealso>
-	    bool IsTermVectorStored { get; }
-
-	    /// <summary> True if terms are stored as term vector together with their offsets 
-	    /// (start and end positon in source text).
-	    /// </summary>
-	    bool IsStoreOffsetWithTermVector { get; }
-
-	    /// <summary> True if terms are stored as term vector together with their token positions.</summary>
-	    bool IsStorePositionWithTermVector { get; }
-
-	    /// <summary>True if the value of the field is stored as binary </summary>
-	    bool IsBinary { get; }
+        /// </summary>
+        /// <seealso cref="Lucene.Net.Documents.Document.Boost">
+        /// </seealso>
+        /// <seealso cref="Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
+        /// </seealso>
+        /// <seealso cref="Lucene.Net.Search.Similarity.EncodeNorm(float)">
+        /// </seealso>
+        float Boost { get; set; }
+
+        /// <summary>Returns the name of the field as an interned string.
+        /// For example "date", "title", "body", ...
+        /// </summary>
+        string Name { get; }
+
+        /// <summary>The value of the field as a String, or null.
+        /// <p/>
+        /// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
+        /// unless isBinary()==true, in which case GetBinaryValue() will be used.
+        /// 
+        /// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
+        /// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
+        /// else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens.
+        /// </summary>
+        string StringValue { get; }
+
+        /// <summary>The value of the field as a Reader, which can be used at index time to generate indexed tokens.</summary>
+        /// <seealso cref="StringValue()">
+        /// </seealso>
+        TextReader ReaderValue { get; }
+
+        /// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
+        /// <seealso cref="StringValue()">
+        /// </seealso>
+        TokenStream TokenStreamValue { get; }
+
+        /// <summary>True if the value of the field is to be stored in the index for return
+        /// with search hits. 
+        /// </summary>
+        bool IsStored { get; }
+
+        /// <summary>True if the value of the field is to be indexed, so that it may be
+        /// searched on. 
+        /// </summary>
+        bool IsIndexed { get; }
+
+        /// <summary>True if the value of the field should be tokenized as text prior to
+        /// indexing.  Un-tokenized fields are indexed as a single word and may not be
+        /// Reader-valued. 
+        /// </summary>
+        bool IsTokenized { get; }
+
+        /// <summary>True if the term or terms used to index this field are stored as a term
+        /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
+        /// These methods do not provide access to the original content of the field,
+        /// only to terms used to index it. If the original content must be
+        /// preserved, use the <c>stored</c> attribute instead.
+        /// 
+        /// </summary>
+        /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
+        /// </seealso>
+        bool IsTermVectorStored { get; }
+
+        /// <summary> True if terms are stored as term vector together with their offsets 
+        /// (start and end positon in source text).
+        /// </summary>
+        bool IsStoreOffsetWithTermVector { get; }
+
+        /// <summary> True if terms are stored as term vector together with their token positions.</summary>
+        bool IsStorePositionWithTermVector { get; }
+
+        /// <summary>True if the value of the field is stored as binary </summary>
+        bool IsBinary { get; }
 
         /// <summary>
         /// True if norms are omitted for this indexed field.
@@ -137,69 +137,69 @@ namespace Lucene.Net.Documents
         /// This effectively disables indexing boosts and length normalization for this field.
         /// </para>
         /// </summary>
-	    bool OmitNorms { get; set; }
-
-
-	    /// <summary> Indicates whether a Field is Lazy or not.  The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
-	    /// it's values via <see cref="StringValue()" /> or <see cref="GetBinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
-	    /// retrieved the <see cref="Document" /> is still open.
-	    /// 
-	    /// </summary>
-	    /// <value> true if this field can be loaded lazily </value>
-	    bool IsLazy { get; }
-
-	    /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
-	    /// returned value is undefined
-	    /// </summary>
-	    /// <value> index of the first character in byte[] segment that represents this Field value </value>
-	    int BinaryOffset { get; }
-
-	    /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
-	    /// returned value is undefined
-	    /// </summary>
-	    /// <value> length of byte[] segment that represents this Field value </value>
-	    int BinaryLength { get; }
-
-	    /// <summary> Return the raw byte[] for the binary field.  Note that
-	    /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
-	    /// to know which range of bytes in this
-	    /// returned array belong to the field.
-	    /// </summary>
-	    /// <returns> reference to the Field value as byte[]. </returns>
-	    byte[] GetBinaryValue();
-
-	    /// <summary> Return the raw byte[] for the binary field.  Note that
+        bool OmitNorms { get; set; }
+
+
+        /// <summary> Indicates whether a Field is Lazy or not.  The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
+        /// it's values via <see cref="StringValue()" /> or <see cref="GetBinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
+        /// retrieved the <see cref="Document" /> is still open.
+        /// 
+        /// </summary>
+        /// <value> true if this field can be loaded lazily </value>
+        bool IsLazy { get; }
+
+        /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
+        /// returned value is undefined
+        /// </summary>
+        /// <value> index of the first character in byte[] segment that represents this Field value </value>
+        int BinaryOffset { get; }
+
+        /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
+        /// returned value is undefined
+        /// </summary>
+        /// <value> length of byte[] segment that represents this Field value </value>
+        int BinaryLength { get; }
+
+        /// <summary> Return the raw byte[] for the binary field.  Note that
+        /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
+        /// to know which range of bytes in this
+        /// returned array belong to the field.
+        /// </summary>
+        /// <returns> reference to the Field value as byte[]. </returns>
+        byte[] GetBinaryValue();
+
+        /// <summary> Return the raw byte[] for the binary field.  Note that
         /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
-		/// to know which range of bytes in this
-		/// returned array belong to the field.<p/>
-		/// About reuse: if you pass in the result byte[] and it is
-		/// used, likely the underlying implementation will hold
-		/// onto this byte[] and return it in future calls to
-		/// <see cref="GetBinaryValue()" /> or <see cref="GetBinaryValue()" />.
-		/// So if you subsequently re-use the same byte[] elsewhere
-		/// it will alter this Fieldable's value.
-		/// </summary>
-		/// <param name="result"> User defined buffer that will be used if
-		/// possible.  If this is null or not large enough, a new
-		/// buffer is allocated
-		/// </param>
-		/// <returns> reference to the Field value as byte[].
-		/// </returns>
-		byte[] GetBinaryValue(byte[] result);
-
-	    /// Expert:
-	    /// <para>
-	    /// If set, omit term freq, positions and payloads from
-	    /// postings for this field.
-	    /// </para>
-	    /// <para>
-	    /// <b>NOTE</b>: While this option reduces storage space
-	    /// required in the index, it also means any query
-	    /// requiring positional information, such as
-	    /// <see cref="Lucene.Net.Search.PhraseQuery"/> or 
-	    /// <see cref="Lucene.Net.Search.Spans.SpanQuery"/> 
-	    /// subclasses will silently fail to find results.
-	    /// </para>
-	    bool OmitTermFreqAndPositions { set; get; }
-	}
+        /// to know which range of bytes in this
+        /// returned array belong to the field.<p/>
+        /// About reuse: if you pass in the result byte[] and it is
+        /// used, likely the underlying implementation will hold
+        /// onto this byte[] and return it in future calls to
+        /// <see cref="GetBinaryValue()" /> or <see cref="GetBinaryValue()" />.
+        /// So if you subsequently re-use the same byte[] elsewhere
+        /// it will alter this Fieldable's value.
+        /// </summary>
+        /// <param name="result"> User defined buffer that will be used if
+        /// possible.  If this is null or not large enough, a new
+        /// buffer is allocated
+        /// </param>
+        /// <returns> reference to the Field value as byte[].
+        /// </returns>
+        byte[] GetBinaryValue(byte[] result);
+
+        /// Expert:
+        /// <para>
+        /// If set, omit term freq, positions and payloads from
+        /// postings for this field.
+        /// </para>
+        /// <para>
+        /// <b>NOTE</b>: While this option reduces storage space
+        /// required in the index, it also means any query
+        /// requiring positional information, such as
+        /// <see cref="Lucene.Net.Search.PhraseQuery"/> or 
+        /// <see cref="Lucene.Net.Search.Spans.SpanQuery"/> 
+        /// subclasses will silently fail to find results.
+        /// </para>
+        bool OmitTermFreqAndPositions { set; get; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/LoadFirstFieldSelector.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/LoadFirstFieldSelector.cs b/src/core/Document/LoadFirstFieldSelector.cs
index 4f353f6..c6a9dc3 100644
--- a/src/core/Document/LoadFirstFieldSelector.cs
+++ b/src/core/Document/LoadFirstFieldSelector.cs
@@ -18,18 +18,18 @@
 using System;
 namespace Lucene.Net.Documents
 {
-	
-	/// <summary> Load the First field and break.
-	/// <p/>
-	/// See <see cref="FieldSelectorResult.LOAD_AND_BREAK" />
-	/// </summary>
-	[Serializable]
-	public class LoadFirstFieldSelector : FieldSelector
-	{
-		
-		public virtual FieldSelectorResult Accept(System.String fieldName)
-		{
-			return FieldSelectorResult.LOAD_AND_BREAK;
-		}
-	}
+    
+    /// <summary> Load the First field and break.
+    /// <p/>
+    /// See <see cref="FieldSelectorResult.LOAD_AND_BREAK" />
+    /// </summary>
+    [Serializable]
+    public class LoadFirstFieldSelector : FieldSelector
+    {
+        
+        public virtual FieldSelectorResult Accept(System.String fieldName)
+        {
+            return FieldSelectorResult.LOAD_AND_BREAK;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/MapFieldSelector.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/MapFieldSelector.cs b/src/core/Document/MapFieldSelector.cs
index 92a8959..87d91ff 100644
--- a/src/core/Document/MapFieldSelector.cs
+++ b/src/core/Document/MapFieldSelector.cs
@@ -22,47 +22,47 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Documents
 {
-	/// <summary>A <see cref="FieldSelector" /> based on a Map of field names to <see cref="FieldSelectorResult" />s</summary>
-	[Serializable]
-	public class MapFieldSelector : FieldSelector
-	{
-		internal IDictionary<string, FieldSelectorResult> fieldSelections;
-		
-		/// <summary>Create a a MapFieldSelector</summary>
-		/// <param name="fieldSelections">maps from field names (String) to <see cref="FieldSelectorResult" />s
-		/// </param>
+    /// <summary>A <see cref="FieldSelector" /> based on a Map of field names to <see cref="FieldSelectorResult" />s</summary>
+    [Serializable]
+    public class MapFieldSelector : FieldSelector
+    {
+        internal IDictionary<string, FieldSelectorResult> fieldSelections;
+        
+        /// <summary>Create a a MapFieldSelector</summary>
+        /// <param name="fieldSelections">maps from field names (String) to <see cref="FieldSelectorResult" />s
+        /// </param>
         public MapFieldSelector(IDictionary<string, FieldSelectorResult> fieldSelections)
-		{
-			this.fieldSelections = fieldSelections;
-		}
-		
-		/// <summary>Create a a MapFieldSelector</summary>
-		/// <param name="fields">fields to LOAD.  List of Strings.  All other fields are NO_LOAD.
-		/// </param>
-		public MapFieldSelector(IList<string> fields)
-		{
-			fieldSelections = new HashMap<string, FieldSelectorResult>(fields.Count * 5 / 3);
-			foreach(var field in fields)
-				fieldSelections[field] = FieldSelectorResult.LOAD;
-		}
-		
-		/// <summary>Create a a MapFieldSelector</summary>
-		/// <param name="fields">fields to LOAD.  All other fields are NO_LOAD.
-		/// </param>
-		public MapFieldSelector(params System.String[] fields)
+        {
+            this.fieldSelections = fieldSelections;
+        }
+        
+        /// <summary>Create a a MapFieldSelector</summary>
+        /// <param name="fields">fields to LOAD.  List of Strings.  All other fields are NO_LOAD.
+        /// </param>
+        public MapFieldSelector(IList<string> fields)
+        {
+            fieldSelections = new HashMap<string, FieldSelectorResult>(fields.Count * 5 / 3);
+            foreach(var field in fields)
+                fieldSelections[field] = FieldSelectorResult.LOAD;
+        }
+        
+        /// <summary>Create a a MapFieldSelector</summary>
+        /// <param name="fields">fields to LOAD.  All other fields are NO_LOAD.
+        /// </param>
+        public MapFieldSelector(params System.String[] fields)
             : this(fields.ToList()) // TODO: this is slow
-		{
-		}
-		
-		/// <summary>Load field according to its associated value in fieldSelections</summary>
-		/// <param name="field">a field name
-		/// </param>
-		/// <returns> the fieldSelections value that field maps to or NO_LOAD if none.
-		/// </returns>
-		public virtual FieldSelectorResult Accept(System.String field)
-		{
-		    FieldSelectorResult selection = fieldSelections[field];
+        {
+        }
+        
+        /// <summary>Load field according to its associated value in fieldSelections</summary>
+        /// <param name="field">a field name
+        /// </param>
+        /// <returns> the fieldSelections value that field maps to or NO_LOAD if none.
+        /// </returns>
+        public virtual FieldSelectorResult Accept(System.String field)
+        {
+            FieldSelectorResult selection = fieldSelections[field];
             return selection != FieldSelectorResult.INVALID ? selection : FieldSelectorResult.NO_LOAD; // TODO: See FieldSelectorResult
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/NumberTools.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/NumberTools.cs b/src/core/Document/NumberTools.cs
index f877120..9a467a7 100644
--- a/src/core/Document/NumberTools.cs
+++ b/src/core/Document/NumberTools.cs
@@ -21,145 +21,145 @@ using NumericUtils = Lucene.Net.Util.NumericUtils;
 
 namespace Lucene.Net.Documents
 {
-	
-	// do not remove this class in 3.0, it may be needed to decode old indexes!
-	
-	/// <summary> Provides support for converting longs to Strings, and back again. The strings
-	/// are structured so that lexicographic sorting order is preserved.
-	/// 
-	/// <p/>
-	/// That is, if l1 is less than l2 for any two longs l1 and l2, then
-	/// NumberTools.longToString(l1) is lexicographically less than
-	/// NumberTools.longToString(l2). (Similarly for "greater than" and "equals".)
-	/// 
-	/// <p/>
-	/// This class handles <b>all</b> long values (unlike
-	/// <see cref="Lucene.Net.Documents.DateField" />).
-	/// 
-	/// </summary>
-	/// <deprecated> For new indexes use <see cref="NumericUtils" /> instead, which
-	/// provides a sortable binary representation (prefix encoded) of numeric
-	/// values.
-	/// To index and efficiently query numeric values use <see cref="NumericField" />
-	/// and <see cref="NumericRangeQuery{T}" />.
-	/// This class is included for use with existing
-	/// indices and will be removed in a future release (possibly Lucene 4.0).
-	/// </deprecated>
+    
+    // do not remove this class in 3.0, it may be needed to decode old indexes!
+    
+    /// <summary> Provides support for converting longs to Strings, and back again. The strings
+    /// are structured so that lexicographic sorting order is preserved.
+    /// 
+    /// <p/>
+    /// That is, if l1 is less than l2 for any two longs l1 and l2, then
+    /// NumberTools.longToString(l1) is lexicographically less than
+    /// NumberTools.longToString(l2). (Similarly for "greater than" and "equals".)
+    /// 
+    /// <p/>
+    /// This class handles <b>all</b> long values (unlike
+    /// <see cref="Lucene.Net.Documents.DateField" />).
+    /// 
+    /// </summary>
+    /// <deprecated> For new indexes use <see cref="NumericUtils" /> instead, which
+    /// provides a sortable binary representation (prefix encoded) of numeric
+    /// values.
+    /// To index and efficiently query numeric values use <see cref="NumericField" />
+    /// and <see cref="NumericRangeQuery{T}" />.
+    /// This class is included for use with existing
+    /// indices and will be removed in a future release (possibly Lucene 4.0).
+    /// </deprecated>
     [Obsolete("For new indexes use NumericUtils instead, which provides a sortable binary representation (prefix encoded) of numeric values. To index and efficiently query numeric values use NumericField and NumericRangeQuery. This class is included for use with existing indices and will be removed in a future release (possibly Lucene 4.0).")]
-	public class NumberTools
-	{
-		
-		private const int RADIX = 36;
-		
-		private const char NEGATIVE_PREFIX = '-';
-		
-		// NB: NEGATIVE_PREFIX must be < POSITIVE_PREFIX
-		private const char POSITIVE_PREFIX = '0';
-		
-		//NB: this must be less than
-		/// <summary> Equivalent to longToString(Long.MIN_VALUE)</summary>
+    public class NumberTools
+    {
+        
+        private const int RADIX = 36;
+        
+        private const char NEGATIVE_PREFIX = '-';
+        
+        // NB: NEGATIVE_PREFIX must be < POSITIVE_PREFIX
+        private const char POSITIVE_PREFIX = '0';
+        
+        //NB: this must be less than
+        /// <summary> Equivalent to longToString(Long.MIN_VALUE)</summary>
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
-		public static readonly System.String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000";
+        public static readonly System.String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000";
 #else
         public static readonly System.String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000000";
 #endif
-		
-		/// <summary> Equivalent to longToString(Long.MAX_VALUE)</summary>
+        
+        /// <summary> Equivalent to longToString(Long.MAX_VALUE)</summary>
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
-		public static readonly System.String MAX_STRING_VALUE = POSITIVE_PREFIX + "1y2p0ij32e8e7";
+        public static readonly System.String MAX_STRING_VALUE = POSITIVE_PREFIX + "1y2p0ij32e8e7";
 #else
         public static readonly System.String MAX_STRING_VALUE = POSITIVE_PREFIX + "7fffffffffffffff";
 #endif
-		
-		/// <summary> The length of (all) strings returned by <see cref="LongToString" /></summary>
-		public static readonly int STR_SIZE = MIN_STRING_VALUE.Length;
-		
-		/// <summary> Converts a long to a String suitable for indexing.</summary>
-		public static System.String LongToString(long l)
-		{
-			
-			if (l == System.Int64.MinValue)
-			{
-				// special case, because long is not symmetric around zero
-				return MIN_STRING_VALUE;
-			}
-			
-			System.Text.StringBuilder buf = new System.Text.StringBuilder(STR_SIZE);
-			
-			if (l < 0)
-			{
-				buf.Append(NEGATIVE_PREFIX);
-				l = System.Int64.MaxValue + l + 1;
-			}
-			else
-			{
-				buf.Append(POSITIVE_PREFIX);
-			}
+        
+        /// <summary> The length of (all) strings returned by <see cref="LongToString" /></summary>
+        public static readonly int STR_SIZE = MIN_STRING_VALUE.Length;
+        
+        /// <summary> Converts a long to a String suitable for indexing.</summary>
+        public static System.String LongToString(long l)
+        {
+            
+            if (l == System.Int64.MinValue)
+            {
+                // special case, because long is not symmetric around zero
+                return MIN_STRING_VALUE;
+            }
+            
+            System.Text.StringBuilder buf = new System.Text.StringBuilder(STR_SIZE);
+            
+            if (l < 0)
+            {
+                buf.Append(NEGATIVE_PREFIX);
+                l = System.Int64.MaxValue + l + 1;
+            }
+            else
+            {
+                buf.Append(POSITIVE_PREFIX);
+            }
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
             System.String num = ToString(l);
 #else
             System.String num = System.Convert.ToString(l, RADIX);
 #endif
-			
-			int padLen = STR_SIZE - num.Length - buf.Length;
-			while (padLen-- > 0)
-			{
-				buf.Append('0');
-			}
-			buf.Append(num);
-			
-			return buf.ToString();
-		}
-		
-		/// <summary> Converts a String that was returned by <see cref="LongToString" /> back to a
-		/// long.
-		/// 
-		/// </summary>
-		/// <throws>  IllegalArgumentException </throws>
-		/// <summary>             if the input is null
-		/// </summary>
-		/// <throws>  NumberFormatException </throws>
-		/// <summary>             if the input does not parse (it was not a String returned by
-		/// longToString()).
-		/// </summary>
-		public static long StringToLong(System.String str)
-		{
-			if (str == null)
-			{
-				throw new System.NullReferenceException("string cannot be null");
-			}
-			if (str.Length != STR_SIZE)
-			{
-				throw new System.FormatException("string is the wrong size");
-			}
-			
-			if (str.Equals(MIN_STRING_VALUE))
-			{
-				return System.Int64.MinValue;
-			}
-			
-			char prefix = str[0];
+            
+            int padLen = STR_SIZE - num.Length - buf.Length;
+            while (padLen-- > 0)
+            {
+                buf.Append('0');
+            }
+            buf.Append(num);
+            
+            return buf.ToString();
+        }
+        
+        /// <summary> Converts a String that was returned by <see cref="LongToString" /> back to a
+        /// long.
+        /// 
+        /// </summary>
+        /// <throws>  IllegalArgumentException </throws>
+        /// <summary>             if the input is null
+        /// </summary>
+        /// <throws>  NumberFormatException </throws>
+        /// <summary>             if the input does not parse (it was not a String returned by
+        /// longToString()).
+        /// </summary>
+        public static long StringToLong(System.String str)
+        {
+            if (str == null)
+            {
+                throw new System.NullReferenceException("string cannot be null");
+            }
+            if (str.Length != STR_SIZE)
+            {
+                throw new System.FormatException("string is the wrong size");
+            }
+            
+            if (str.Equals(MIN_STRING_VALUE))
+            {
+                return System.Int64.MinValue;
+            }
+            
+            char prefix = str[0];
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
-			long l = ToLong(str.Substring(1));
+            long l = ToLong(str.Substring(1));
 #else
             long l = System.Convert.ToInt64(str.Substring(1), RADIX);
 #endif
-			
-			if (prefix == POSITIVE_PREFIX)
-			{
-				// nop
-			}
-			else if (prefix == NEGATIVE_PREFIX)
-			{
-				l = l - System.Int64.MaxValue - 1;
-			}
-			else
-			{
-				throw new System.FormatException("string does not begin with the correct prefix");
-			}
-			
-			return l;
-		}
+            
+            if (prefix == POSITIVE_PREFIX)
+            {
+                // nop
+            }
+            else if (prefix == NEGATIVE_PREFIX)
+            {
+                l = l - System.Int64.MaxValue - 1;
+            }
+            else
+            {
+                throw new System.FormatException("string does not begin with the correct prefix");
+            }
+            
+            return l;
+        }
 
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
         #region BASE36 OPS 
@@ -217,5 +217,5 @@ namespace Lucene.Net.Documents
         }
         #endregion
 #endif
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/NumericField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/NumericField.cs b/src/core/Document/NumericField.cs
index e77dee4..3358670 100644
--- a/src/core/Document/NumericField.cs
+++ b/src/core/Document/NumericField.cs
@@ -26,269 +26,269 @@ using SortField = Lucene.Net.Search.SortField;
 
 namespace Lucene.Net.Documents
 {
-	// javadocs
-	
-	/// <summary> <p/>This class provides a <see cref="Field" /> that enables indexing
-	/// of numeric values for efficient range filtering and
-	/// sorting.  Here's an example usage, adding an int value:
+    // javadocs
+    
+    /// <summary> <p/>This class provides a <see cref="Field" /> that enables indexing
+    /// of numeric values for efficient range filtering and
+    /// sorting.  Here's an example usage, adding an int value:
     /// <code>
-	/// document.add(new NumericField(name).setIntValue(value));
+    /// document.add(new NumericField(name).setIntValue(value));
     /// </code>
-	/// 
-	/// For optimal performance, re-use the
-	/// <c>NumericField</c> and <see cref="Document" /> instance for more than
-	/// one document:
-	/// 
+    /// 
+    /// For optimal performance, re-use the
+    /// <c>NumericField</c> and <see cref="Document" /> instance for more than
+    /// one document:
+    /// 
     /// <code>
-	/// NumericField field = new NumericField(name);
-	/// Document document = new Document();
-	/// document.add(field);
-	/// 
-	/// for(all documents) {
-	/// ...
-	/// field.setIntValue(value)
-	/// writer.addDocument(document);
-	/// ...
-	/// }
+    /// NumericField field = new NumericField(name);
+    /// Document document = new Document();
+    /// document.add(field);
+    /// 
+    /// for(all documents) {
+    /// ...
+    /// field.setIntValue(value)
+    /// writer.addDocument(document);
+    /// ...
+    /// }
     /// </code>
-	/// 
-	/// <p/>The .Net native types <c>int</c>, <c>long</c>,
-	/// <c>float</c> and <c>double</c> are
-	/// directly supported.  However, any value that can be
-	/// converted into these native types can also be indexed.
-	/// For example, date/time values represented by a
-	/// <see cref="System.DateTime" /> can be translated into a long
+    /// 
+    /// <p/>The .Net native types <c>int</c>, <c>long</c>,
+    /// <c>float</c> and <c>double</c> are
+    /// directly supported.  However, any value that can be
+    /// converted into these native types can also be indexed.
+    /// For example, date/time values represented by a
+    /// <see cref="System.DateTime" /> can be translated into a long
     /// value using the <c>java.util.Date.getTime</c> method.  If you
-	/// don't need millisecond precision, you can quantize the
-	/// value, either by dividing the result of
-	/// <c>java.util.Date.getTime</c> or using the separate getters
-	/// (for year, month, etc.) to construct an <c>int</c> or
-	/// <c>long</c> value.<p/>
-	/// 
-	/// <p/>To perform range querying or filtering against a
-	/// <c>NumericField</c>, use <see cref="NumericRangeQuery{T}" /> or <see cref="NumericRangeFilter{T}" />
-	///.  To sort according to a
-	/// <c>NumericField</c>, use the normal numeric sort types, eg
-	/// <see cref="SortField.INT" />  <c>NumericField</c> values
-	/// can also be loaded directly from <see cref="FieldCache" />.<p/>
-	/// 
-	/// <p/>By default, a <c>NumericField</c>'s value is not stored but
-	/// is indexed for range filtering and sorting.  You can use
-	/// the <see cref="NumericField(String,Field.Store,bool)" />
-	/// constructor if you need to change these defaults.<p/>
-	/// 
-	/// <p/>You may add the same field name as a <c>NumericField</c> to
-	/// the same document more than once.  Range querying and
-	/// filtering will be the logical OR of all values; so a range query
-	/// will hit all documents that have at least one value in
-	/// the range. However sort behavior is not defined.  If you need to sort,
-	/// you should separately index a single-valued <c>NumericField</c>.<p/>
-	/// 
-	/// <p/>A <c>NumericField</c> will consume somewhat more disk space
-	/// in the index than an ordinary single-valued field.
-	/// However, for a typical index that includes substantial
-	/// textual content per document, this increase will likely
-	/// be in the noise. <p/>
-	/// 
-	/// <p/>Within Lucene, each numeric value is indexed as a
-	/// <em>trie</em> structure, where each term is logically
-	/// assigned to larger and larger pre-defined brackets (which
-	/// are simply lower-precision representations of the value).
-	/// The step size between each successive bracket is called the
-	/// <c>precisionStep</c>, measured in bits.  Smaller
-	/// <c>precisionStep</c> values result in larger number
-	/// of brackets, which consumes more disk space in the index
-	/// but may result in faster range search performance.  The
-	/// default value, 4, was selected for a reasonable tradeoff
-	/// of disk space consumption versus performance.  You can
-	/// use the expert constructor <see cref="NumericField(String,int,Field.Store,bool)" />
-	/// if you'd
-	/// like to change the value.  Note that you must also
-	/// specify a congruent value when creating <see cref="NumericRangeQuery{T}" />
-	/// or <see cref="NumericRangeFilter{T}" />.
-	/// For low cardinality fields larger precision steps are good.
-	/// If the cardinality is &lt; 100, it is fair
-	/// to use <see cref="int.MaxValue" />, which produces one
-	/// term per value.
-	/// 
-	/// <p/>For more information on the internals of numeric trie
-	/// indexing, including the <a
-	/// href="../search/NumericRangeQuery.html#precisionStepDesc"><c>precisionStep</c></a>
-	/// configuration, see <see cref="NumericRangeQuery{T}" />. The format of
-	/// indexed values is described in <see cref="NumericUtils" />.
-	/// 
-	/// <p/>If you only need to sort by numeric value, and never
-	/// run range querying/filtering, you can index using a
+    /// don't need millisecond precision, you can quantize the
+    /// value, either by dividing the result of
+    /// <c>java.util.Date.getTime</c> or using the separate getters
+    /// (for year, month, etc.) to construct an <c>int</c> or
+    /// <c>long</c> value.<p/>
+    /// 
+    /// <p/>To perform range querying or filtering against a
+    /// <c>NumericField</c>, use <see cref="NumericRangeQuery{T}" /> or <see cref="NumericRangeFilter{T}" />
+    ///.  To sort according to a
+    /// <c>NumericField</c>, use the normal numeric sort types, eg
+    /// <see cref="SortField.INT" />  <c>NumericField</c> values
+    /// can also be loaded directly from <see cref="FieldCache" />.<p/>
+    /// 
+    /// <p/>By default, a <c>NumericField</c>'s value is not stored but
+    /// is indexed for range filtering and sorting.  You can use
+    /// the <see cref="NumericField(String,Field.Store,bool)" />
+    /// constructor if you need to change these defaults.<p/>
+    /// 
+    /// <p/>You may add the same field name as a <c>NumericField</c> to
+    /// the same document more than once.  Range querying and
+    /// filtering will be the logical OR of all values; so a range query
+    /// will hit all documents that have at least one value in
+    /// the range. However sort behavior is not defined.  If you need to sort,
+    /// you should separately index a single-valued <c>NumericField</c>.<p/>
+    /// 
+    /// <p/>A <c>NumericField</c> will consume somewhat more disk space
+    /// in the index than an ordinary single-valued field.
+    /// However, for a typical index that includes substantial
+    /// textual content per document, this increase will likely
+    /// be in the noise. <p/>
+    /// 
+    /// <p/>Within Lucene, each numeric value is indexed as a
+    /// <em>trie</em> structure, where each term is logically
+    /// assigned to larger and larger pre-defined brackets (which
+    /// are simply lower-precision representations of the value).
+    /// The step size between each successive bracket is called the
+    /// <c>precisionStep</c>, measured in bits.  Smaller
+    /// <c>precisionStep</c> values result in larger number
+    /// of brackets, which consumes more disk space in the index
+    /// but may result in faster range search performance.  The
+    /// default value, 4, was selected for a reasonable tradeoff
+    /// of disk space consumption versus performance.  You can
+    /// use the expert constructor <see cref="NumericField(String,int,Field.Store,bool)" />
+    /// if you'd
+    /// like to change the value.  Note that you must also
+    /// specify a congruent value when creating <see cref="NumericRangeQuery{T}" />
+    /// or <see cref="NumericRangeFilter{T}" />.
+    /// For low cardinality fields larger precision steps are good.
+    /// If the cardinality is &lt; 100, it is fair
+    /// to use <see cref="int.MaxValue" />, which produces one
+    /// term per value.
+    /// 
+    /// <p/>For more information on the internals of numeric trie
+    /// indexing, including the <a
+    /// href="../search/NumericRangeQuery.html#precisionStepDesc"><c>precisionStep</c></a>
+    /// configuration, see <see cref="NumericRangeQuery{T}" />. The format of
+    /// indexed values is described in <see cref="NumericUtils" />.
+    /// 
+    /// <p/>If you only need to sort by numeric value, and never
+    /// run range querying/filtering, you can index using a
     /// <c>precisionStep</c> of <see cref="int.MaxValue" />.
-	/// This will minimize disk space consumed. <p/>
-	/// 
-	/// <p/>More advanced users can instead use <see cref="NumericTokenStream" />
-	/// directly, when indexing numbers. This
-	/// class is a wrapper around this token stream type for
-	/// easier, more intuitive usage.<p/>
-	/// 
-	/// <p/><b>NOTE:</b> This class is only used during
-	/// indexing. When retrieving the stored field value from a
-	/// <see cref="Document" /> instance after search, you will get a
-	/// conventional <see cref="IFieldable" /> instance where the numeric
-	/// values are returned as <see cref="String" />s (according to
-	/// <c>toString(value)</c> of the used data type).
-	/// 
-	/// <p/><font color="red"><b>NOTE:</b> This API is
-	/// experimental and might change in incompatible ways in the
-	/// next release.</font>
-	/// 
-	/// </summary>
-	/// <since> 2.9
-	/// </since>
-	[Serializable]
-	public sealed class NumericField:AbstractField
-	{
-		
-		new private readonly NumericTokenStream tokenStream;
-		
-		/// <summary> Creates a field for numeric values using the default <c>precisionStep</c>
-		/// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The instance is not yet initialized with
-		/// a numeric value, before indexing a document containing this field,
-		/// set a value using the various set<em>???</em>Value() methods.
-		/// This constructor creates an indexed, but not stored field.
-		/// </summary>
-		/// <param name="name">the field name
-		/// </param>
-		public NumericField(System.String name):this(name, NumericUtils.PRECISION_STEP_DEFAULT, Field.Store.NO, true)
-		{
-		}
-		
-		/// <summary> Creates a field for numeric values using the default <c>precisionStep</c>
-		/// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The instance is not yet initialized with
-		/// a numeric value, before indexing a document containing this field,
-		/// set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		/// <param name="name">the field name
-		/// </param>
-		/// <param name="store">if the field should be stored in plain text form
-		/// (according to <c>toString(value)</c> of the used data type)
-		/// </param>
-		/// <param name="index">if the field should be indexed using <see cref="NumericTokenStream" />
-		/// </param>
-		public NumericField(System.String name, Field.Store store, bool index):this(name, NumericUtils.PRECISION_STEP_DEFAULT, store, index)
-		{
-		}
-		
-		/// <summary> Creates a field for numeric values with the specified
-		/// <c>precisionStep</c>. The instance is not yet initialized with
-		/// a numeric value, before indexing a document containing this field,
-		/// set a value using the various set<em>???</em>Value() methods.
-		/// This constructor creates an indexed, but not stored field.
-		/// </summary>
-		/// <param name="name">the field name
-		/// </param>
-		/// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
-		/// </param>
-		public NumericField(System.String name, int precisionStep):this(name, precisionStep, Field.Store.NO, true)
-		{
-		}
-		
-		/// <summary> Creates a field for numeric values with the specified
-		/// <c>precisionStep</c>. The instance is not yet initialized with
-		/// a numeric value, before indexing a document containing this field,
-		/// set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		/// <param name="name">the field name
-		/// </param>
-		/// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
-		/// </param>
-		/// <param name="store">if the field should be stored in plain text form
-		/// (according to <c>toString(value)</c> of the used data type)
-		/// </param>
-		/// <param name="index">if the field should be indexed using <see cref="NumericTokenStream" />
-		/// </param>
-		public NumericField(System.String name, int precisionStep, Field.Store store, bool index):base(name, store, index?Field.Index.ANALYZED_NO_NORMS:Field.Index.NO, Field.TermVector.NO)
-		{
-			OmitTermFreqAndPositions = true;
-			tokenStream = new NumericTokenStream(precisionStep);
-		}
+    /// This will minimize disk space consumed. <p/>
+    /// 
+    /// <p/>More advanced users can instead use <see cref="NumericTokenStream" />
+    /// directly, when indexing numbers. This
+    /// class is a wrapper around this token stream type for
+    /// easier, more intuitive usage.<p/>
+    /// 
+    /// <p/><b>NOTE:</b> This class is only used during
+    /// indexing. When retrieving the stored field value from a
+    /// <see cref="Document" /> instance after search, you will get a
+    /// conventional <see cref="IFieldable" /> instance where the numeric
+    /// values are returned as <see cref="String" />s (according to
+    /// <c>toString(value)</c> of the used data type).
+    /// 
+    /// <p/><font color="red"><b>NOTE:</b> This API is
+    /// experimental and might change in incompatible ways in the
+    /// next release.</font>
+    /// 
+    /// </summary>
+    /// <since> 2.9
+    /// </since>
+    [Serializable]
+    public sealed class NumericField:AbstractField
+    {
+        
+        new private readonly NumericTokenStream tokenStream;
+        
+        /// <summary> Creates a field for numeric values using the default <c>precisionStep</c>
+        /// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The instance is not yet initialized with
+        /// a numeric value, before indexing a document containing this field,
+        /// set a value using the various set<em>???</em>Value() methods.
+        /// This constructor creates an indexed, but not stored field.
+        /// </summary>
+        /// <param name="name">the field name
+        /// </param>
+        public NumericField(System.String name):this(name, NumericUtils.PRECISION_STEP_DEFAULT, Field.Store.NO, true)
+        {
+        }
+        
+        /// <summary> Creates a field for numeric values using the default <c>precisionStep</c>
+        /// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The instance is not yet initialized with
+        /// a numeric value, before indexing a document containing this field,
+        /// set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        /// <param name="name">the field name
+        /// </param>
+        /// <param name="store">if the field should be stored in plain text form
+        /// (according to <c>toString(value)</c> of the used data type)
+        /// </param>
+        /// <param name="index">if the field should be indexed using <see cref="NumericTokenStream" />
+        /// </param>
+        public NumericField(System.String name, Field.Store store, bool index):this(name, NumericUtils.PRECISION_STEP_DEFAULT, store, index)
+        {
+        }
+        
+        /// <summary> Creates a field for numeric values with the specified
+        /// <c>precisionStep</c>. The instance is not yet initialized with
+        /// a numeric value, before indexing a document containing this field,
+        /// set a value using the various set<em>???</em>Value() methods.
+        /// This constructor creates an indexed, but not stored field.
+        /// </summary>
+        /// <param name="name">the field name
+        /// </param>
+        /// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
+        /// </param>
+        public NumericField(System.String name, int precisionStep):this(name, precisionStep, Field.Store.NO, true)
+        {
+        }
+        
+        /// <summary> Creates a field for numeric values with the specified
+        /// <c>precisionStep</c>. The instance is not yet initialized with
+        /// a numeric value, before indexing a document containing this field,
+        /// set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        /// <param name="name">the field name
+        /// </param>
+        /// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
+        /// </param>
+        /// <param name="store">if the field should be stored in plain text form
+        /// (according to <c>toString(value)</c> of the used data type)
+        /// </param>
+        /// <param name="index">if the field should be indexed using <see cref="NumericTokenStream" />
+        /// </param>
+        public NumericField(System.String name, int precisionStep, Field.Store store, bool index):base(name, store, index?Field.Index.ANALYZED_NO_NORMS:Field.Index.NO, Field.TermVector.NO)
+        {
+            OmitTermFreqAndPositions = true;
+            tokenStream = new NumericTokenStream(precisionStep);
+        }
 
-	    /// <summary>Returns a <see cref="NumericTokenStream" /> for indexing the numeric value. </summary>
-	    public override TokenStream TokenStreamValue
-	    {
-	        get { return IsIndexed ? tokenStream : null; }
-	    }
+        /// <summary>Returns a <see cref="NumericTokenStream" /> for indexing the numeric value. </summary>
+        public override TokenStream TokenStreamValue
+        {
+            get { return IsIndexed ? tokenStream : null; }
+        }
 
-	    /// <summary>Returns always <c>null</c> for numeric fields </summary>
-		public override byte[] GetBinaryValue(byte[] result)
-		{
-			return null;
-		}
+        /// <summary>Returns always <c>null</c> for numeric fields </summary>
+        public override byte[] GetBinaryValue(byte[] result)
+        {
+            return null;
+        }
 
-	    /// <summary>Returns always <c>null</c> for numeric fields </summary>
-	    public override TextReader ReaderValue
-	    {
-	        get { return null; }
-	    }
+        /// <summary>Returns always <c>null</c> for numeric fields </summary>
+        public override TextReader ReaderValue
+        {
+            get { return null; }
+        }
 
-	    /// <summary>Returns the numeric value as a string (how it is stored, when <see cref="Field.Store.YES" /> is chosen). </summary>
-	    public override string StringValue
-	    {
-	        get { return (fieldsData == null) ? null : fieldsData.ToString(); }
-	    }
+        /// <summary>Returns the numeric value as a string (how it is stored, when <see cref="Field.Store.YES" /> is chosen). </summary>
+        public override string StringValue
+        {
+            get { return (fieldsData == null) ? null : fieldsData.ToString(); }
+        }
 
-	    /// <summary>Returns the current numeric value as a subclass of <see cref="Number" />, <c>null</c> if not yet initialized. </summary>
-	    public ValueType NumericValue
-	    {
-	        get { return (System.ValueType) fieldsData; }
-	    }
+        /// <summary>Returns the current numeric value as a subclass of <see cref="Number" />, <c>null</c> if not yet initialized. </summary>
+        public ValueType NumericValue
+        {
+            get { return (System.ValueType) fieldsData; }
+        }
 
-	    /// <summary> Initializes the field with the supplied <c>long</c> value.</summary>
-		/// <param name="value_Renamed">the numeric value
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>document.add(new NumericField(name, precisionStep).SetLongValue(value))</c>
-		/// </returns>
-		public NumericField SetLongValue(long value_Renamed)
-		{
-			tokenStream.SetLongValue(value_Renamed);
-			fieldsData = value_Renamed;
-			return this;
-		}
-		
-		/// <summary> Initializes the field with the supplied <c>int</c> value.</summary>
-		/// <param name="value_Renamed">the numeric value
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>document.add(new NumericField(name, precisionStep).setIntValue(value))</c>
-		/// </returns>
-		public NumericField SetIntValue(int value_Renamed)
-		{
-			tokenStream.SetIntValue(value_Renamed);
-			fieldsData = value_Renamed;
-			return this;
-		}
-		
-		/// <summary> Initializes the field with the supplied <c>double</c> value.</summary>
-		/// <param name="value_Renamed">the numeric value
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>document.add(new NumericField(name, precisionStep).setDoubleValue(value))</c>
-		/// </returns>
-		public NumericField SetDoubleValue(double value_Renamed)
-		{
-			tokenStream.SetDoubleValue(value_Renamed);
-			fieldsData = value_Renamed;
-			return this;
-		}
-		
-		/// <summary> Initializes the field with the supplied <c>float</c> value.</summary>
-		/// <param name="value_Renamed">the numeric value
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>document.add(new NumericField(name, precisionStep).setFloatValue(value))</c>
-		/// </returns>
-		public NumericField SetFloatValue(float value_Renamed)
-		{
-			tokenStream.SetFloatValue(value_Renamed);
-			fieldsData = value_Renamed;
-			return this;
-		}
-	}
+        /// <summary> Initializes the field with the supplied <c>long</c> value.</summary>
+        /// <param name="value_Renamed">the numeric value
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>document.add(new NumericField(name, precisionStep).SetLongValue(value))</c>
+        /// </returns>
+        public NumericField SetLongValue(long value_Renamed)
+        {
+            tokenStream.SetLongValue(value_Renamed);
+            fieldsData = value_Renamed;
+            return this;
+        }
+        
+        /// <summary> Initializes the field with the supplied <c>int</c> value.</summary>
+        /// <param name="value_Renamed">the numeric value
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>document.add(new NumericField(name, precisionStep).setIntValue(value))</c>
+        /// </returns>
+        public NumericField SetIntValue(int value_Renamed)
+        {
+            tokenStream.SetIntValue(value_Renamed);
+            fieldsData = value_Renamed;
+            return this;
+        }
+        
+        /// <summary> Initializes the field with the supplied <c>double</c> value.</summary>
+        /// <param name="value_Renamed">the numeric value
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>document.add(new NumericField(name, precisionStep).setDoubleValue(value))</c>
+        /// </returns>
+        public NumericField SetDoubleValue(double value_Renamed)
+        {
+            tokenStream.SetDoubleValue(value_Renamed);
+            fieldsData = value_Renamed;
+            return this;
+        }
+        
+        /// <summary> Initializes the field with the supplied <c>float</c> value.</summary>
+        /// <param name="value_Renamed">the numeric value
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>document.add(new NumericField(name, precisionStep).setFloatValue(value))</c>
+        /// </returns>
+        public NumericField SetFloatValue(float value_Renamed)
+        {
+            tokenStream.SetFloatValue(value_Renamed);
+            fieldsData = value_Renamed;
+            return this;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/SetBasedFieldSelector.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/SetBasedFieldSelector.cs b/src/core/Document/SetBasedFieldSelector.cs
index 14e3e02..f7c2055 100644
--- a/src/core/Document/SetBasedFieldSelector.cs
+++ b/src/core/Document/SetBasedFieldSelector.cs
@@ -20,50 +20,50 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Documents
 {
-	/// <summary> Declare what fields to load normally and what fields to load lazily
-	/// 
-	/// 
-	/// </summary>
-	[Serializable]
-	public class SetBasedFieldSelector : FieldSelector
-	{
-		private ISet<string> fieldsToLoad;
-		private ISet<string> lazyFieldsToLoad;
-		
-		/// <summary> Pass in the Set of <see cref="Field" /> names to load and the Set of <see cref="Field" /> names to load lazily.  If both are null, the
-		/// Document will not have any <see cref="Field" /> on it.  
-		/// </summary>
-		/// <param name="fieldsToLoad">A Set of <see cref="String" /> field names to load.  May be empty, but not null
-		/// </param>
-		/// <param name="lazyFieldsToLoad">A Set of <see cref="String" /> field names to load lazily.  May be empty, but not null  
-		/// </param>
-		public SetBasedFieldSelector(ISet<string> fieldsToLoad, ISet<string> lazyFieldsToLoad)
-		{
-			this.fieldsToLoad = fieldsToLoad;
-			this.lazyFieldsToLoad = lazyFieldsToLoad;
-		}
+    /// <summary> Declare what fields to load normally and what fields to load lazily
+    /// 
+    /// 
+    /// </summary>
+    [Serializable]
+    public class SetBasedFieldSelector : FieldSelector
+    {
+        private ISet<string> fieldsToLoad;
+        private ISet<string> lazyFieldsToLoad;
+        
+        /// <summary> Pass in the Set of <see cref="Field" /> names to load and the Set of <see cref="Field" /> names to load lazily.  If both are null, the
+        /// Document will not have any <see cref="Field" /> on it.  
+        /// </summary>
+        /// <param name="fieldsToLoad">A Set of <see cref="String" /> field names to load.  May be empty, but not null
+        /// </param>
+        /// <param name="lazyFieldsToLoad">A Set of <see cref="String" /> field names to load lazily.  May be empty, but not null  
+        /// </param>
+        public SetBasedFieldSelector(ISet<string> fieldsToLoad, ISet<string> lazyFieldsToLoad)
+        {
+            this.fieldsToLoad = fieldsToLoad;
+            this.lazyFieldsToLoad = lazyFieldsToLoad;
+        }
 
         /// <summary> Indicate whether to load the field with the given name or not. If the <see cref="AbstractField.Name()" /> is not in either of the 
-		/// initializing Sets, then <see cref="Lucene.Net.Documents.FieldSelectorResult.NO_LOAD" /> is returned.  If a Field name
-		/// is in both <c>fieldsToLoad</c> and <c>lazyFieldsToLoad</c>, lazy has precedence.
-		/// 
-		/// </summary>
-		/// <param name="fieldName">The <see cref="Field" /> name to check
-		/// </param>
-		/// <returns> The <see cref="FieldSelectorResult" />
-		/// </returns>
-		public virtual FieldSelectorResult Accept(System.String fieldName)
-		{
-			FieldSelectorResult result = FieldSelectorResult.NO_LOAD;
-			if (fieldsToLoad.Contains(fieldName) == true)
-			{
-				result = FieldSelectorResult.LOAD;
-			}
-			if (lazyFieldsToLoad.Contains(fieldName) == true)
-			{
-				result = FieldSelectorResult.LAZY_LOAD;
-			}
-			return result;
-		}
-	}
+        /// initializing Sets, then <see cref="Lucene.Net.Documents.FieldSelectorResult.NO_LOAD" /> is returned.  If a Field name
+        /// is in both <c>fieldsToLoad</c> and <c>lazyFieldsToLoad</c>, lazy has precedence.
+        /// 
+        /// </summary>
+        /// <param name="fieldName">The <see cref="Field" /> name to check
+        /// </param>
+        /// <returns> The <see cref="FieldSelectorResult" />
+        /// </returns>
+        public virtual FieldSelectorResult Accept(System.String fieldName)
+        {
+            FieldSelectorResult result = FieldSelectorResult.NO_LOAD;
+            if (fieldsToLoad.Contains(fieldName) == true)
+            {
+                result = FieldSelectorResult.LOAD;
+            }
+            if (lazyFieldsToLoad.Contains(fieldName) == true)
+            {
+                result = FieldSelectorResult.LAZY_LOAD;
+            }
+            return result;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/AllTermDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AllTermDocs.cs b/src/core/Index/AllTermDocs.cs
index da5f16d..a3e51ab 100644
--- a/src/core/Index/AllTermDocs.cs
+++ b/src/core/Index/AllTermDocs.cs
@@ -21,16 +21,16 @@ namespace Lucene.Net.Index
 {
 
     class AllTermDocs : AbstractAllTermDocs
-	{
-		protected internal BitVector deletedDocs;
-				
-		protected internal AllTermDocs(SegmentReader parent) : base(parent.MaxDoc)
-		{
-			lock (parent)
-			{
-				this.deletedDocs = parent.deletedDocs;
-			}
-		}
+    {
+        protected internal BitVector deletedDocs;
+                
+        protected internal AllTermDocs(SegmentReader parent) : base(parent.MaxDoc)
+        {
+            lock (parent)
+            {
+                this.deletedDocs = parent.deletedDocs;
+            }
+        }
 
         protected override void Dispose(bool disposing)
         {
@@ -41,5 +41,5 @@ namespace Lucene.Net.Index
         {
             return deletedDocs != null && deletedDocs.Get(doc);
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/BufferedDeletes.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BufferedDeletes.cs b/src/core/Index/BufferedDeletes.cs
index 52ef1df..5b10ce7 100644
--- a/src/core/Index/BufferedDeletes.cs
+++ b/src/core/Index/BufferedDeletes.cs
@@ -21,22 +21,22 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Holds buffered deletes, by docID, term or query.  We
-	/// hold two instances of this class: one for the deletes
-	/// prior to the last flush, the other for deletes after
-	/// the last flush.  This is so if we need to abort
-	/// (discard all buffered docs) we can also discard the
-	/// buffered deletes yet keep the deletes done during
-	/// previously flushed segments. 
-	/// </summary>
-	class BufferedDeletes
-	{
-		internal int numTerms;
+    
+    /// <summary>Holds buffered deletes, by docID, term or query.  We
+    /// hold two instances of this class: one for the deletes
+    /// prior to the last flush, the other for deletes after
+    /// the last flush.  This is so if we need to abort
+    /// (discard all buffered docs) we can also discard the
+    /// buffered deletes yet keep the deletes done during
+    /// previously flushed segments. 
+    /// </summary>
+    class BufferedDeletes
+    {
+        internal int numTerms;
         internal IDictionary<Term,Num> terms = null;
-		internal IDictionary<Query, int> queries = new HashMap<Query, int>();
-		internal List<int> docIDs = new List<int>();
-		internal long bytesUsed;
+        internal IDictionary<Query, int> queries = new HashMap<Query, int>();
+        internal List<int> docIDs = new List<int>();
+        internal long bytesUsed;
         internal  bool doTermSort;
 
         public BufferedDeletes(bool doTermSort)
@@ -53,91 +53,91 @@ namespace Lucene.Net.Index
             }
         }
                 
-		
-		// Number of documents a delete term applies to.
-		internal sealed class Num
-		{
-			internal int num;
-			
-			internal Num(int num)
-			{
-				this.num = num;
-			}
-			
-			internal int GetNum()
-			{
-				return num;
-			}
-			
-			internal void  SetNum(int num)
-			{
-				// Only record the new number if it's greater than the
-				// current one.  This is important because if multiple
-				// threads are replacing the same doc at nearly the
-				// same time, it's possible that one thread that got a
-				// higher docID is scheduled before the other
-				// threads.
-				if (num > this.num)
-					this.num = num;
-			}
-		}
-		
-		internal virtual int Size()
-		{
-			// We use numTerms not terms.size() intentionally, so
-			// that deletes by the same term multiple times "count",
-			// ie if you ask to flush every 1000 deletes then even
-			// dup'd terms are counted towards that 1000
-			return numTerms + queries.Count + docIDs.Count;
-		}
-		
-		internal virtual void  Update(BufferedDeletes @in)
-		{
-			numTerms += @in.numTerms;
-			bytesUsed += @in.bytesUsed;
-		    foreach (KeyValuePair<Term, Num> term in @in.terms)
+        
+        // Number of documents a delete term applies to.
+        internal sealed class Num
+        {
+            internal int num;
+            
+            internal Num(int num)
+            {
+                this.num = num;
+            }
+            
+            internal int GetNum()
+            {
+                return num;
+            }
+            
+            internal void  SetNum(int num)
+            {
+                // Only record the new number if it's greater than the
+                // current one.  This is important because if multiple
+                // threads are replacing the same doc at nearly the
+                // same time, it's possible that one thread that got a
+                // higher docID is scheduled before the other
+                // threads.
+                if (num > this.num)
+                    this.num = num;
+            }
+        }
+        
+        internal virtual int Size()
+        {
+            // We use numTerms not terms.size() intentionally, so
+            // that deletes by the same term multiple times "count",
+            // ie if you ask to flush every 1000 deletes then even
+            // dup'd terms are counted towards that 1000
+            return numTerms + queries.Count + docIDs.Count;
+        }
+        
+        internal virtual void  Update(BufferedDeletes @in)
+        {
+            numTerms += @in.numTerms;
+            bytesUsed += @in.bytesUsed;
+            foreach (KeyValuePair<Term, Num> term in @in.terms)
             {
                 terms[term.Key] = term.Value;
-		    }
+            }
             foreach (KeyValuePair<Query, int> term in @in.queries)
             {
                 queries[term.Key] = term.Value;
             }
 
-			docIDs.AddRange(@in.docIDs);
-			@in.Clear();
-		}
-		
-		internal virtual void  Clear()
-		{
-			terms.Clear();
-			queries.Clear();
-			docIDs.Clear();
-			numTerms = 0;
-			bytesUsed = 0;
-		}
-		
-		internal virtual void  AddBytesUsed(long b)
-		{
-			bytesUsed += b;
-		}
-		
-		internal virtual bool Any()
-		{
-			return terms.Count > 0 || docIDs.Count > 0 || queries.Count > 0;
-		}
-		
-		// Remaps all buffered deletes based on a completed
-		// merge
-		internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
-		{
-			lock (this)
-			{
-				IDictionary<Term, Num> newDeleteTerms;
-				
-				// Remap delete-by-term
-				if (terms.Count > 0)
-				{
+            docIDs.AddRange(@in.docIDs);
+            @in.Clear();
+        }
+        
+        internal virtual void  Clear()
+        {
+            terms.Clear();
+            queries.Clear();
+            docIDs.Clear();
+            numTerms = 0;
+            bytesUsed = 0;
+        }
+        
+        internal virtual void  AddBytesUsed(long b)
+        {
+            bytesUsed += b;
+        }
+        
+        internal virtual bool Any()
+        {
+            return terms.Count > 0 || docIDs.Count > 0 || queries.Count > 0;
+        }
+        
+        // Remaps all buffered deletes based on a completed
+        // merge
+        internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
+        {
+            lock (this)
+            {
+                IDictionary<Term, Num> newDeleteTerms;
+                
+                // Remap delete-by-term
+                if (terms.Count > 0)
+                {
                     if (doTermSort)
                     {
                         newDeleteTerms = new SortedDictionary<Term, Num>();
@@ -146,51 +146,51 @@ namespace Lucene.Net.Index
                     {
                         newDeleteTerms = new HashMap<Term, Num>();
                     }
-					foreach(var entry in terms)
-					{
-						Num num = entry.Value;
-						newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
-					}
-				}
-				else
-					newDeleteTerms = null;
-				
-				// Remap delete-by-docID
-				List<int> newDeleteDocIDs;
-				
-				if (docIDs.Count > 0)
-				{
-					newDeleteDocIDs = new List<int>(docIDs.Count);
-					foreach(int num in docIDs)
-					{
-						newDeleteDocIDs.Add(mapper.Remap(num));
-					}
-				}
-				else
-					newDeleteDocIDs = null;
-				
-				// Remap delete-by-query
-				HashMap<Query, int> newDeleteQueries;
-				
-				if (queries.Count > 0)
-				{
+                    foreach(var entry in terms)
+                    {
+                        Num num = entry.Value;
+                        newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
+                    }
+                }
+                else
+                    newDeleteTerms = null;
+                
+                // Remap delete-by-docID
+                List<int> newDeleteDocIDs;
+                
+                if (docIDs.Count > 0)
+                {
+                    newDeleteDocIDs = new List<int>(docIDs.Count);
+                    foreach(int num in docIDs)
+                    {
+                        newDeleteDocIDs.Add(mapper.Remap(num));
+                    }
+                }
+                else
+                    newDeleteDocIDs = null;
+                
+                // Remap delete-by-query
+                HashMap<Query, int> newDeleteQueries;
+                
+                if (queries.Count > 0)
+                {
                     newDeleteQueries = new HashMap<Query, int>(queries.Count);
-					foreach(var entry in queries)
-					{
-						int num = entry.Value;
-						newDeleteQueries[entry.Key] = mapper.Remap(num);
-					}
-				}
-				else
-					newDeleteQueries = null;
-				
-				if (newDeleteTerms != null)
-					terms = newDeleteTerms;
-				if (newDeleteDocIDs != null)
-					docIDs = newDeleteDocIDs;
-				if (newDeleteQueries != null)
-					queries = newDeleteQueries;
-			}
-		}
-	}
+                    foreach(var entry in queries)
+                    {
+                        int num = entry.Value;
+                        newDeleteQueries[entry.Key] = mapper.Remap(num);
+                    }
+                }
+                else
+                    newDeleteQueries = null;
+                
+                if (newDeleteTerms != null)
+                    terms = newDeleteTerms;
+                if (newDeleteDocIDs != null)
+                    docIDs = newDeleteDocIDs;
+                if (newDeleteQueries != null)
+                    queries = newDeleteQueries;
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ByteBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteBlockPool.cs b/src/core/Index/ByteBlockPool.cs
index 041c756..50afde0 100644
--- a/src/core/Index/ByteBlockPool.cs
+++ b/src/core/Index/ByteBlockPool.cs
@@ -38,135 +38,135 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed public class ByteBlockPool
-	{
-		private void  InitBlock()
-		{
-			byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE;
-		}
-		
-		public /*internal*/ abstract class Allocator
-		{
-			public /*internal*/ abstract void  RecycleByteBlocks(byte[][] blocks, int start, int end);
+    
+    sealed public class ByteBlockPool
+    {
+        private void  InitBlock()
+        {
+            byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE;
+        }
+        
+        public /*internal*/ abstract class Allocator
+        {
+            public /*internal*/ abstract void  RecycleByteBlocks(byte[][] blocks, int start, int end);
             public /*internal*/ abstract void RecycleByteBlocks(IList<byte[]> blocks);
-			public /*internal*/ abstract byte[] GetByteBlock(bool trackAllocations);
-		}
-		
-		public byte[][] buffers = new byte[10][];
-		
-		internal int bufferUpto = - 1; // Which buffer we are upto
-		public int byteUpto; // Where we are in head buffer
-		
-		public byte[] buffer; // Current head buffer
-		public int byteOffset = - DocumentsWriter.BYTE_BLOCK_SIZE; // Current head offset
-		
-		private readonly bool trackAllocations;
-		private readonly Allocator allocator;
-		
-		public ByteBlockPool(Allocator allocator, bool trackAllocations)
-		{
-			InitBlock();
-			this.allocator = allocator;
-			this.trackAllocations = trackAllocations;
-		}
-		
-		public void  Reset()
-		{
-			if (bufferUpto != - 1)
-			{
-				// We allocated at least one buffer
-				
-				for (int i = 0; i < bufferUpto; i++)
-				// Fully zero fill buffers that we fully used
-					System.Array.Clear(buffers[i], 0, buffers[i].Length);
-				
-				// Partial zero fill the final buffer
-				System.Array.Clear(buffers[bufferUpto], 0, byteUpto);
-				
-				if (bufferUpto > 0)
-				// Recycle all but the first buffer
-					allocator.RecycleByteBlocks(buffers, 1, 1 + bufferUpto);
-				
-				// Re-use the first buffer
-				bufferUpto = 0;
-				byteUpto = 0;
-				byteOffset = 0;
-				buffer = buffers[0];
-			}
-		}
-		
-		public void  NextBuffer()
-		{
-			if (1 + bufferUpto == buffers.Length)
-			{
-				var newBuffers = new byte[(int) (buffers.Length * 1.5)][];
-				Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
-				buffers = newBuffers;
-			}
-			buffer = buffers[1 + bufferUpto] = allocator.GetByteBlock(trackAllocations);
-			bufferUpto++;
-			
-			byteUpto = 0;
-			byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
-		}
-		
-		public int NewSlice(int size)
-		{
-			if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - size)
-				NextBuffer();
-			int upto = byteUpto;
-			byteUpto += size;
-			buffer[byteUpto - 1] = 16;
-			return upto;
-		}
-		
-		// Size of each slice.  These arrays should be at most 16
-		// elements (index is encoded with 4 bits).  First array
-		// is just a compact way to encode X+1 with a max.  Second
-		// array is the length of each slice, ie first slice is 5
-		// bytes, next slice is 14 bytes, etc.
-		internal static readonly int[] nextLevelArray = new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
-		internal static readonly int[] levelSizeArray = new int[]{5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
-		internal static readonly int FIRST_LEVEL_SIZE = levelSizeArray[0];
+            public /*internal*/ abstract byte[] GetByteBlock(bool trackAllocations);
+        }
+        
+        public byte[][] buffers = new byte[10][];
+        
+        internal int bufferUpto = - 1; // Which buffer we are upto
+        public int byteUpto; // Where we are in head buffer
+        
+        public byte[] buffer; // Current head buffer
+        public int byteOffset = - DocumentsWriter.BYTE_BLOCK_SIZE; // Current head offset
+        
+        private readonly bool trackAllocations;
+        private readonly Allocator allocator;
+        
+        public ByteBlockPool(Allocator allocator, bool trackAllocations)
+        {
+            InitBlock();
+            this.allocator = allocator;
+            this.trackAllocations = trackAllocations;
+        }
+        
+        public void  Reset()
+        {
+            if (bufferUpto != - 1)
+            {
+                // We allocated at least one buffer
+                
+                for (int i = 0; i < bufferUpto; i++)
+                // Fully zero fill buffers that we fully used
+                    System.Array.Clear(buffers[i], 0, buffers[i].Length);
+                
+                // Partial zero fill the final buffer
+                System.Array.Clear(buffers[bufferUpto], 0, byteUpto);
+                
+                if (bufferUpto > 0)
+                // Recycle all but the first buffer
+                    allocator.RecycleByteBlocks(buffers, 1, 1 + bufferUpto);
+                
+                // Re-use the first buffer
+                bufferUpto = 0;
+                byteUpto = 0;
+                byteOffset = 0;
+                buffer = buffers[0];
+            }
+        }
+        
+        public void  NextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                var newBuffers = new byte[(int) (buffers.Length * 1.5)][];
+                Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = allocator.GetByteBlock(trackAllocations);
+            bufferUpto++;
+            
+            byteUpto = 0;
+            byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
+        }
+        
+        public int NewSlice(int size)
+        {
+            if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - size)
+                NextBuffer();
+            int upto = byteUpto;
+            byteUpto += size;
+            buffer[byteUpto - 1] = 16;
+            return upto;
+        }
+        
+        // Size of each slice.  These arrays should be at most 16
+        // elements (index is encoded with 4 bits).  First array
+        // is just a compact way to encode X+1 with a max.  Second
+        // array is the length of each slice, ie first slice is 5
+        // bytes, next slice is 14 bytes, etc.
+        internal static readonly int[] nextLevelArray = new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
+        internal static readonly int[] levelSizeArray = new int[]{5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
+        internal static readonly int FIRST_LEVEL_SIZE = levelSizeArray[0];
         public readonly static int FIRST_LEVEL_SIZE_For_NUnit_Test = levelSizeArray[0];
-		
-		public int AllocSlice(byte[] slice, int upto)
-		{
-			
-			int level = slice[upto] & 15;
-			int newLevel = nextLevelArray[level];
-			int newSize = levelSizeArray[newLevel];
-			
-			// Maybe allocate another block
-			if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - newSize)
-				NextBuffer();
-			
-			int newUpto = byteUpto;
-			int offset = newUpto + byteOffset;
-			byteUpto += newSize;
-			
-			// Copy forward the past 3 bytes (which we are about
-			// to overwrite with the forwarding address):
-			buffer[newUpto] = slice[upto - 3];
-			buffer[newUpto + 1] = slice[upto - 2];
-			buffer[newUpto + 2] = slice[upto - 1];
-			
-			// Write forwarding address at end of last slice:
-			slice[upto - 3] = (byte) (Number.URShift(offset, 24));
-			slice[upto - 2] = (byte) (Number.URShift(offset, 16));
-			slice[upto - 1] = (byte) (Number.URShift(offset, 8));
-			slice[upto] = (byte) offset;
-			
-			// Write new level:
-			buffer[byteUpto - 1] = (byte) (16 | newLevel);
-			
-			return newUpto + 3;
-		}
+        
+        public int AllocSlice(byte[] slice, int upto)
+        {
+            
+            int level = slice[upto] & 15;
+            int newLevel = nextLevelArray[level];
+            int newSize = levelSizeArray[newLevel];
+            
+            // Maybe allocate another block
+            if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - newSize)
+                NextBuffer();
+            
+            int newUpto = byteUpto;
+            int offset = newUpto + byteOffset;
+            byteUpto += newSize;
+            
+            // Copy forward the past 3 bytes (which we are about
+            // to overwrite with the forwarding address):
+            buffer[newUpto] = slice[upto - 3];
+            buffer[newUpto + 1] = slice[upto - 2];
+            buffer[newUpto + 2] = slice[upto - 1];
+            
+            // Write forwarding address at end of last slice:
+            slice[upto - 3] = (byte) (Number.URShift(offset, 24));
+            slice[upto - 2] = (byte) (Number.URShift(offset, 16));
+            slice[upto - 1] = (byte) (Number.URShift(offset, 8));
+            slice[upto] = (byte) offset;
+            
+            // Write new level:
+            buffer[byteUpto - 1] = (byte) (16 | newLevel);
+            
+            return newUpto + 3;
+        }
 
         public static int FIRST_LEVEL_SIZE_ForNUnit
         {
             get { return FIRST_LEVEL_SIZE; }
         }
-	}
+    }
 }
\ No newline at end of file


[33/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/BBox/BBoxStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/BBox/BBoxStrategy.cs b/src/contrib/Spatial/BBox/BBoxStrategy.cs
index b97bb17..f14af08 100644
--- a/src/contrib/Spatial/BBox/BBoxStrategy.cs
+++ b/src/contrib/Spatial/BBox/BBoxStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -36,9 +36,9 @@ namespace Lucene.Net.Spatial.BBox
         public static String SUFFIX_XDL = "__xdl";
 
         /*
-		 * The Bounding Box gets stored as four fields for x/y min/max and a flag
-		 * that says if the box crosses the dateline (xdl).
-		 */
+         * The Bounding Box gets stored as four fields for x/y min/max and a flag
+         * that says if the box crosses the dateline (xdl).
+         */
         public readonly String field_bbox;
         public readonly String field_minX;
         public readonly String field_minY;
@@ -130,356 +130,356 @@ namespace Lucene.Net.Spatial.BBox
         }
 
         public override Filter MakeFilter(SpatialArgs args)
-		{
+        {
             return new QueryWrapperFilter(MakeSpatialQuery(args));
-		}
+        }
 
-		private Query MakeSpatialQuery(SpatialArgs args)
-		{
+        private Query MakeSpatialQuery(SpatialArgs args)
+        {
             var bbox = args.Shape as Rectangle;
             if (bbox == null)
                 throw new InvalidOperationException("Can only query by Rectangle, not " + args.Shape);
 
-			Query spatial = null;
-
-			// Useful for understanding Relations:
-			// http://edndoc.esri.com/arcsde/9.1/general_topics/understand_spatial_relations.htm
-			SpatialOperation op = args.Operation;
-			if (op == SpatialOperation.BBoxIntersects) spatial = MakeIntersects(bbox);
-			else if (op == SpatialOperation.BBoxWithin) spatial = MakeWithin(bbox);
-			else if (op == SpatialOperation.Contains) spatial = MakeContains(bbox);
-			else if (op == SpatialOperation.Intersects) spatial = MakeIntersects(bbox);
-			else if (op == SpatialOperation.IsEqualTo) spatial = MakeEquals(bbox);
-			else if (op == SpatialOperation.IsDisjointTo) spatial = MakeDisjoint(bbox);
-			else if (op == SpatialOperation.IsWithin) spatial = MakeWithin(bbox);
-			else if (op == SpatialOperation.Overlaps) spatial = MakeIntersects(bbox);
-			else
-			{
-				throw new UnsupportedSpatialOperation(op);
-			}
-			return spatial;
-		}
-
-		//-------------------------------------------------------------------------------
-		//
-		//-------------------------------------------------------------------------------
-
-		/// <summary>
-		/// Constructs a query to retrieve documents that fully contain the input envelope.
-		/// </summary>
-		/// <param name="bbox"></param>
-		/// <returns>The spatial query</returns>
-		protected Query MakeContains(Rectangle bbox)
-		{
-
-			// general case
-			// docMinX <= queryExtent.GetMinX() AND docMinY <= queryExtent.GetMinY() AND docMaxX >= queryExtent.GetMaxX() AND docMaxY >= queryExtent.GetMaxY()
-
-			// Y conditions
-			// docMinY <= queryExtent.GetMinY() AND docMaxY >= queryExtent.GetMaxY()
-			Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, null, bbox.GetMinY(), false, true);
-			Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, bbox.GetMaxY(), null, true, false);
-			Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.MUST);
-
-			// X conditions
-			Query xConditions = null;
-
-			// queries that do not cross the date line
-			if (!bbox.GetCrossesDateLine())
-			{
-
-				// X Conditions for documents that do not cross the date line,
-				// documents that contain the min X and max X of the query envelope,
-				// docMinX <= queryExtent.GetMinX() AND docMaxX >= queryExtent.GetMaxX()
-				Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
-				Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
-				Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.MUST);
-				Query qNonXDL = this.MakeXDL(false, qMinMax);
-
-				// X Conditions for documents that cross the date line,
-				// the left portion of the document contains the min X of the query
-				// OR the right portion of the document contains the max X of the query,
-				// docMinXLeft <= queryExtent.GetMinX() OR docMaxXRight >= queryExtent.GetMaxX()
-				Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
-				Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
-				Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.SHOULD);
-				Query qXDL = this.MakeXDL(true, qXDLLeftRight);
-
-				// apply the non-XDL and XDL conditions
-				xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
-
-				// queries that cross the date line
-			}
-			else
-			{
-
-				// No need to search for documents that do not cross the date line
-
-				// X Conditions for documents that cross the date line,
-				// the left portion of the document contains the min X of the query
-				// AND the right portion of the document contains the max X of the query,
-				// docMinXLeft <= queryExtent.GetMinX() AND docMaxXRight >= queryExtent.GetMaxX()
-				Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
-				Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
-				Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.MUST);
-
-				xConditions = this.MakeXDL(true, qXDLLeftRight);
-			}
-
-			// both X and Y conditions must occur
-			return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.MUST);
-		}
-
-		/// <summary>
-		/// Constructs a query to retrieve documents that are disjoint to the input envelope.
-		/// </summary>
-		/// <param name="bbox"></param>
-		/// <returns>the spatial query</returns>
-		Query MakeDisjoint(Rectangle bbox)
-		{
-
-			// general case
-			// docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX() OR docMinY > queryExtent.GetMaxY() OR docMaxY < queryExtent.GetMinY()
-
-			// Y conditions
-			// docMinY > queryExtent.GetMaxY() OR docMaxY < queryExtent.GetMinY()
-			Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMaxY(), null, false, false);
-			Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, null, bbox.GetMinY(), false, false);
-			Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.SHOULD);
-
-			// X conditions
-			Query xConditions = null;
-
-			// queries that do not cross the date line
-			if (!bbox.GetCrossesDateLine())
-			{
-
-				// X Conditions for documents that do not cross the date line,
-				// docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX()
-				Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
-				Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
-				Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.SHOULD);
-				Query qNonXDL = this.MakeXDL(false, qMinMax);
-
-				// X Conditions for documents that cross the date line,
-				// both the left and right portions of the document must be disjoint to the query
-				// (docMinXLeft > queryExtent.GetMaxX() OR docMaxXLeft < queryExtent.GetMinX()) AND
-				// (docMinXRight > queryExtent.GetMaxX() OR docMaxXRight < queryExtent.GetMinX())
-				// where: docMaxXLeft = 180.0, docMinXRight = -180.0
-				// (docMaxXLeft  < queryExtent.GetMinX()) equates to (180.0  < queryExtent.GetMinX()) and is ignored
-				// (docMinXRight > queryExtent.GetMaxX()) equates to (-180.0 > queryExtent.GetMaxX()) and is ignored
-				Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
-				Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
-				Query qLeftRight = this.MakeQuery(new Query[] { qMinXLeft, qMaxXRight }, Occur.MUST);
-				Query qXDL = this.MakeXDL(true, qLeftRight);
-
-				// apply the non-XDL and XDL conditions
-				xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
-
-				// queries that cross the date line
-			}
-			else
-			{
-
-				// X Conditions for documents that do not cross the date line,
-				// the document must be disjoint to both the left and right query portions
-				// (docMinX > queryExtent.GetMaxX()Left OR docMaxX < queryExtent.GetMinX()) AND (docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX()Left)
-				// where: queryExtent.GetMaxX()Left = 180.0, queryExtent.GetMinX()Left = -180.0
-				Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, 180.0, null, false, false);
-				Query qMaxXLeft = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
-				Query qMinXRight = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
-				Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, -180.0, false, false);
-				Query qLeft = this.MakeQuery(new Query[] { qMinXLeft, qMaxXLeft }, Occur.SHOULD);
-				Query qRight = this.MakeQuery(new Query[] { qMinXRight, qMaxXRight }, Occur.SHOULD);
-				Query qLeftRight = this.MakeQuery(new Query[] { qLeft, qRight }, Occur.MUST);
-
-				// No need to search for documents that do not cross the date line
-
-				xConditions = this.MakeXDL(false, qLeftRight);
-			}
-
-			// either X or Y conditions should occur
-			return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.SHOULD);
-		}
-
-		/*
-		 * Constructs a query to retrieve documents that equal the input envelope.
-		 *
-		 * @return the spatial query
-		 */
-		public Query MakeEquals(Rectangle bbox)
-		{
-
-			// docMinX = queryExtent.GetMinX() AND docMinY = queryExtent.GetMinY() AND docMaxX = queryExtent.GetMaxX() AND docMaxY = queryExtent.GetMaxY()
-			Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), bbox.GetMinX(), true, true);
-			Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMinY(), bbox.GetMinY(), true, true);
-			Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), bbox.GetMaxX(), true, true);
-			Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, bbox.GetMaxY(), bbox.GetMaxY(), true, true);
-			
-			var bq = new BooleanQuery
-			         	{
-			         		{qMinX, Occur.MUST},
-			         		{qMinY, Occur.MUST},
-			         		{qMaxX, Occur.MUST},
-			         		{qMaxY, Occur.MUST}
-			         	};
-			return bq;
-		}
-
-		/// <summary>
-		/// Constructs a query to retrieve documents that intersect the input envelope.
-		/// </summary>
-		/// <param name="bbox"></param>
-		/// <returns>the spatial query</returns>
-		Query MakeIntersects(Rectangle bbox)
-		{
-
-			// the original intersects query does not work for envelopes that cross the date line,
-			// switch to a NOT Disjoint query
-
-			// MUST_NOT causes a problem when it's the only clause type within a BooleanQuery,
-			// to get round it we add all documents as a SHOULD
-
-			// there must be an envelope, it must not be disjoint
-			Query qDisjoint = MakeDisjoint(bbox);
-			Query qIsNonXDL = this.MakeXDL(false);
-			Query qIsXDL = this.MakeXDL(true);
-			Query qHasEnv = this.MakeQuery(new Query[] { qIsNonXDL, qIsXDL }, Occur.SHOULD);
-			var qNotDisjoint = new BooleanQuery {{qHasEnv, Occur.MUST}, {qDisjoint, Occur.MUST_NOT}};
-
-			//Query qDisjoint = makeDisjoint();
-			//BooleanQuery qNotDisjoint = new BooleanQuery();
-			//qNotDisjoint.add(new MatchAllDocsQuery(),BooleanClause.Occur.SHOULD);
-			//qNotDisjoint.add(qDisjoint,BooleanClause.Occur.MUST_NOT);
-			return qNotDisjoint;
-		}
-
-		/*
-		 * Makes a boolean query based upon a collection of queries and a logical operator.
-		 *
-		 * @param queries the query collection
-		 * @param occur the logical operator
-		 * @return the query
-		 */
-		BooleanQuery MakeQuery(Query[] queries, Occur occur)
-		{
-			var bq = new BooleanQuery();
-			foreach (Query query in queries)
-			{
-				bq.Add(query, occur);
-			}
-			return bq;
-		}
-
-		/*
-		 * Constructs a query to retrieve documents are fully within the input envelope.
-		 *
-		 * @return the spatial query
-		 */
-		Query MakeWithin(Rectangle bbox)
-		{
-
-			// general case
-			// docMinX >= queryExtent.GetMinX() AND docMinY >= queryExtent.GetMinY() AND docMaxX <= queryExtent.GetMaxX() AND docMaxY <= queryExtent.GetMaxY()
-
-			// Y conditions
-			// docMinY >= queryExtent.GetMinY() AND docMaxY <= queryExtent.GetMaxY()
-			Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMinY(), null, true, false);
-			Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, null, bbox.GetMaxY(), false, true);
-			Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.MUST);
-
-			// X conditions
-			Query xConditions = null;
-
-			// X Conditions for documents that cross the date line,
-			// the left portion of the document must be within the left portion of the query,
-			// AND the right portion of the document must be within the right portion of the query
-			// docMinXLeft >= queryExtent.GetMinX() AND docMaxXLeft <= 180.0
-			// AND docMinXRight >= -180.0 AND docMaxXRight <= queryExtent.GetMaxX()
-			Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
-			Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
-			Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.MUST);
-			Query qXDL = this.MakeXDL(true, qXDLLeftRight);
-
-			// queries that do not cross the date line
-			if (!bbox.GetCrossesDateLine())
-			{
-
-				// X Conditions for documents that do not cross the date line,
-				// docMinX >= queryExtent.GetMinX() AND docMaxX <= queryExtent.GetMaxX()
-				Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
-				Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
-				Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.MUST);
-				Query qNonXDL = this.MakeXDL(false, qMinMax);
-
-				// apply the non-XDL or XDL X conditions
-				if ((bbox.GetMinX() <= -180.0) && bbox.GetMaxX() >= 180.0)
-				{
-					xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
-				}
-				else
-				{
-					xConditions = qNonXDL;
-				}
-
-				// queries that cross the date line
-			}
-			else
-			{
-
-				// X Conditions for documents that do not cross the date line
-
-				// the document should be within the left portion of the query
-				// docMinX >= queryExtent.GetMinX() AND docMaxX <= 180.0
-				Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
-				Query qMaxXLeft = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, 180.0, false, true);
-				Query qLeft = this.MakeQuery(new Query[] { qMinXLeft, qMaxXLeft }, Occur.MUST);
-
-				// the document should be within the right portion of the query
-				// docMinX >= -180.0 AND docMaxX <= queryExtent.GetMaxX()
-				Query qMinXRight = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, -180.0, null, true, false);
-				Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
-				Query qRight = this.MakeQuery(new Query[] { qMinXRight, qMaxXRight }, Occur.MUST);
-
-				// either left or right conditions should occur,
-				// apply the left and right conditions to documents that do not cross the date line
-				Query qLeftRight = this.MakeQuery(new Query[] { qLeft, qRight }, Occur.SHOULD);
-				Query qNonXDL = this.MakeXDL(false, qLeftRight);
-
-				// apply the non-XDL and XDL conditions
-				xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
-			}
-
-			// both X and Y conditions must occur
-			return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.MUST);
-		}
-
-		/*
-		 * Constructs a query to retrieve documents that do or do not cross the date line.
-		 *
-		 *
-		 * @param crossedDateLine <code>true</true> for documents that cross the date line
-		 * @return the query
-		 */
-		public Query MakeXDL(bool crossedDateLine)
-		{
-			// The 'T' and 'F' values match solr fields
-			return new TermQuery(new Term(field_xdl, crossedDateLine ? "T" : "F"));
-		}
-
-		/*
-		 * Constructs a query to retrieve documents that do or do not cross the date line
-		 * and match the supplied spatial query.
-		 *
-		 * @param crossedDateLine <code>true</true> for documents that cross the date line
-		 * @param query the spatial query
-		 * @return the query
-		 */
-		public Query MakeXDL(bool crossedDateLine, Query query)
-		{
-			var bq = new BooleanQuery
-			         	{{this.MakeXDL(crossedDateLine), Occur.MUST}, {query, Occur.MUST}};
-			return bq;
-		}
-	}
+            Query spatial = null;
+
+            // Useful for understanding Relations:
+            // http://edndoc.esri.com/arcsde/9.1/general_topics/understand_spatial_relations.htm
+            SpatialOperation op = args.Operation;
+            if (op == SpatialOperation.BBoxIntersects) spatial = MakeIntersects(bbox);
+            else if (op == SpatialOperation.BBoxWithin) spatial = MakeWithin(bbox);
+            else if (op == SpatialOperation.Contains) spatial = MakeContains(bbox);
+            else if (op == SpatialOperation.Intersects) spatial = MakeIntersects(bbox);
+            else if (op == SpatialOperation.IsEqualTo) spatial = MakeEquals(bbox);
+            else if (op == SpatialOperation.IsDisjointTo) spatial = MakeDisjoint(bbox);
+            else if (op == SpatialOperation.IsWithin) spatial = MakeWithin(bbox);
+            else if (op == SpatialOperation.Overlaps) spatial = MakeIntersects(bbox);
+            else
+            {
+                throw new UnsupportedSpatialOperation(op);
+            }
+            return spatial;
+        }
+
+        //-------------------------------------------------------------------------------
+        //
+        //-------------------------------------------------------------------------------
+
+        /// <summary>
+        /// Constructs a query to retrieve documents that fully contain the input envelope.
+        /// </summary>
+        /// <param name="bbox"></param>
+        /// <returns>The spatial query</returns>
+        protected Query MakeContains(Rectangle bbox)
+        {
+
+            // general case
+            // docMinX <= queryExtent.GetMinX() AND docMinY <= queryExtent.GetMinY() AND docMaxX >= queryExtent.GetMaxX() AND docMaxY >= queryExtent.GetMaxY()
+
+            // Y conditions
+            // docMinY <= queryExtent.GetMinY() AND docMaxY >= queryExtent.GetMaxY()
+            Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, null, bbox.GetMinY(), false, true);
+            Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, bbox.GetMaxY(), null, true, false);
+            Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.MUST);
+
+            // X conditions
+            Query xConditions = null;
+
+            // queries that do not cross the date line
+            if (!bbox.GetCrossesDateLine())
+            {
+
+                // X Conditions for documents that do not cross the date line,
+                // documents that contain the min X and max X of the query envelope,
+                // docMinX <= queryExtent.GetMinX() AND docMaxX >= queryExtent.GetMaxX()
+                Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
+                Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
+                Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.MUST);
+                Query qNonXDL = this.MakeXDL(false, qMinMax);
+
+                // X Conditions for documents that cross the date line,
+                // the left portion of the document contains the min X of the query
+                // OR the right portion of the document contains the max X of the query,
+                // docMinXLeft <= queryExtent.GetMinX() OR docMaxXRight >= queryExtent.GetMaxX()
+                Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
+                Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
+                Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.SHOULD);
+                Query qXDL = this.MakeXDL(true, qXDLLeftRight);
+
+                // apply the non-XDL and XDL conditions
+                xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
+
+                // queries that cross the date line
+            }
+            else
+            {
+
+                // No need to search for documents that do not cross the date line
+
+                // X Conditions for documents that cross the date line,
+                // the left portion of the document contains the min X of the query
+                // AND the right portion of the document contains the max X of the query,
+                // docMinXLeft <= queryExtent.GetMinX() AND docMaxXRight >= queryExtent.GetMaxX()
+                Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, null, bbox.GetMinX(), false, true);
+                Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), null, true, false);
+                Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.MUST);
+
+                xConditions = this.MakeXDL(true, qXDLLeftRight);
+            }
+
+            // both X and Y conditions must occur
+            return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.MUST);
+        }
+
+        /// <summary>
+        /// Constructs a query to retrieve documents that are disjoint to the input envelope.
+        /// </summary>
+        /// <param name="bbox"></param>
+        /// <returns>the spatial query</returns>
+        Query MakeDisjoint(Rectangle bbox)
+        {
+
+            // general case
+            // docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX() OR docMinY > queryExtent.GetMaxY() OR docMaxY < queryExtent.GetMinY()
+
+            // Y conditions
+            // docMinY > queryExtent.GetMaxY() OR docMaxY < queryExtent.GetMinY()
+            Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMaxY(), null, false, false);
+            Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, null, bbox.GetMinY(), false, false);
+            Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.SHOULD);
+
+            // X conditions
+            Query xConditions = null;
+
+            // queries that do not cross the date line
+            if (!bbox.GetCrossesDateLine())
+            {
+
+                // X Conditions for documents that do not cross the date line,
+                // docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX()
+                Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
+                Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
+                Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.SHOULD);
+                Query qNonXDL = this.MakeXDL(false, qMinMax);
+
+                // X Conditions for documents that cross the date line,
+                // both the left and right portions of the document must be disjoint to the query
+                // (docMinXLeft > queryExtent.GetMaxX() OR docMaxXLeft < queryExtent.GetMinX()) AND
+                // (docMinXRight > queryExtent.GetMaxX() OR docMaxXRight < queryExtent.GetMinX())
+                // where: docMaxXLeft = 180.0, docMinXRight = -180.0
+                // (docMaxXLeft  < queryExtent.GetMinX()) equates to (180.0  < queryExtent.GetMinX()) and is ignored
+                // (docMinXRight > queryExtent.GetMaxX()) equates to (-180.0 > queryExtent.GetMaxX()) and is ignored
+                Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
+                Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
+                Query qLeftRight = this.MakeQuery(new Query[] { qMinXLeft, qMaxXRight }, Occur.MUST);
+                Query qXDL = this.MakeXDL(true, qLeftRight);
+
+                // apply the non-XDL and XDL conditions
+                xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
+
+                // queries that cross the date line
+            }
+            else
+            {
+
+                // X Conditions for documents that do not cross the date line,
+                // the document must be disjoint to both the left and right query portions
+                // (docMinX > queryExtent.GetMaxX()Left OR docMaxX < queryExtent.GetMinX()) AND (docMinX > queryExtent.GetMaxX() OR docMaxX < queryExtent.GetMinX()Left)
+                // where: queryExtent.GetMaxX()Left = 180.0, queryExtent.GetMinX()Left = -180.0
+                Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, 180.0, null, false, false);
+                Query qMaxXLeft = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMinX(), false, false);
+                Query qMinXRight = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMaxX(), null, false, false);
+                Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, -180.0, false, false);
+                Query qLeft = this.MakeQuery(new Query[] { qMinXLeft, qMaxXLeft }, Occur.SHOULD);
+                Query qRight = this.MakeQuery(new Query[] { qMinXRight, qMaxXRight }, Occur.SHOULD);
+                Query qLeftRight = this.MakeQuery(new Query[] { qLeft, qRight }, Occur.MUST);
+
+                // No need to search for documents that do not cross the date line
+
+                xConditions = this.MakeXDL(false, qLeftRight);
+            }
+
+            // either X or Y conditions should occur
+            return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.SHOULD);
+        }
+
+        /*
+         * Constructs a query to retrieve documents that equal the input envelope.
+         *
+         * @return the spatial query
+         */
+        public Query MakeEquals(Rectangle bbox)
+        {
+
+            // docMinX = queryExtent.GetMinX() AND docMinY = queryExtent.GetMinY() AND docMaxX = queryExtent.GetMaxX() AND docMaxY = queryExtent.GetMaxY()
+            Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), bbox.GetMinX(), true, true);
+            Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMinY(), bbox.GetMinY(), true, true);
+            Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, bbox.GetMaxX(), bbox.GetMaxX(), true, true);
+            Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, bbox.GetMaxY(), bbox.GetMaxY(), true, true);
+            
+            var bq = new BooleanQuery
+                         {
+                             {qMinX, Occur.MUST},
+                             {qMinY, Occur.MUST},
+                             {qMaxX, Occur.MUST},
+                             {qMaxY, Occur.MUST}
+                         };
+            return bq;
+        }
+
+        /// <summary>
+        /// Constructs a query to retrieve documents that intersect the input envelope.
+        /// </summary>
+        /// <param name="bbox"></param>
+        /// <returns>the spatial query</returns>
+        Query MakeIntersects(Rectangle bbox)
+        {
+
+            // the original intersects query does not work for envelopes that cross the date line,
+            // switch to a NOT Disjoint query
+
+            // MUST_NOT causes a problem when it's the only clause type within a BooleanQuery,
+            // to get round it we add all documents as a SHOULD
+
+            // there must be an envelope, it must not be disjoint
+            Query qDisjoint = MakeDisjoint(bbox);
+            Query qIsNonXDL = this.MakeXDL(false);
+            Query qIsXDL = this.MakeXDL(true);
+            Query qHasEnv = this.MakeQuery(new Query[] { qIsNonXDL, qIsXDL }, Occur.SHOULD);
+            var qNotDisjoint = new BooleanQuery {{qHasEnv, Occur.MUST}, {qDisjoint, Occur.MUST_NOT}};
+
+            //Query qDisjoint = makeDisjoint();
+            //BooleanQuery qNotDisjoint = new BooleanQuery();
+            //qNotDisjoint.add(new MatchAllDocsQuery(),BooleanClause.Occur.SHOULD);
+            //qNotDisjoint.add(qDisjoint,BooleanClause.Occur.MUST_NOT);
+            return qNotDisjoint;
+        }
+
+        /*
+         * Makes a boolean query based upon a collection of queries and a logical operator.
+         *
+         * @param queries the query collection
+         * @param occur the logical operator
+         * @return the query
+         */
+        BooleanQuery MakeQuery(Query[] queries, Occur occur)
+        {
+            var bq = new BooleanQuery();
+            foreach (Query query in queries)
+            {
+                bq.Add(query, occur);
+            }
+            return bq;
+        }
+
+        /*
+         * Constructs a query to retrieve documents are fully within the input envelope.
+         *
+         * @return the spatial query
+         */
+        Query MakeWithin(Rectangle bbox)
+        {
+
+            // general case
+            // docMinX >= queryExtent.GetMinX() AND docMinY >= queryExtent.GetMinY() AND docMaxX <= queryExtent.GetMaxX() AND docMaxY <= queryExtent.GetMaxY()
+
+            // Y conditions
+            // docMinY >= queryExtent.GetMinY() AND docMaxY <= queryExtent.GetMaxY()
+            Query qMinY = NumericRangeQuery.NewDoubleRange(field_minY, precisionStep, bbox.GetMinY(), null, true, false);
+            Query qMaxY = NumericRangeQuery.NewDoubleRange(field_maxY, precisionStep, null, bbox.GetMaxY(), false, true);
+            Query yConditions = this.MakeQuery(new Query[] { qMinY, qMaxY }, Occur.MUST);
+
+            // X conditions
+            Query xConditions = null;
+
+            // X Conditions for documents that cross the date line,
+            // the left portion of the document must be within the left portion of the query,
+            // AND the right portion of the document must be within the right portion of the query
+            // docMinXLeft >= queryExtent.GetMinX() AND docMaxXLeft <= 180.0
+            // AND docMinXRight >= -180.0 AND docMaxXRight <= queryExtent.GetMaxX()
+            Query qXDLLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
+            Query qXDLRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
+            Query qXDLLeftRight = this.MakeQuery(new Query[] { qXDLLeft, qXDLRight }, Occur.MUST);
+            Query qXDL = this.MakeXDL(true, qXDLLeftRight);
+
+            // queries that do not cross the date line
+            if (!bbox.GetCrossesDateLine())
+            {
+
+                // X Conditions for documents that do not cross the date line,
+                // docMinX >= queryExtent.GetMinX() AND docMaxX <= queryExtent.GetMaxX()
+                Query qMinX = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
+                Query qMaxX = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
+                Query qMinMax = this.MakeQuery(new Query[] { qMinX, qMaxX }, Occur.MUST);
+                Query qNonXDL = this.MakeXDL(false, qMinMax);
+
+                // apply the non-XDL or XDL X conditions
+                if ((bbox.GetMinX() <= -180.0) && bbox.GetMaxX() >= 180.0)
+                {
+                    xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
+                }
+                else
+                {
+                    xConditions = qNonXDL;
+                }
+
+                // queries that cross the date line
+            }
+            else
+            {
+
+                // X Conditions for documents that do not cross the date line
+
+                // the document should be within the left portion of the query
+                // docMinX >= queryExtent.GetMinX() AND docMaxX <= 180.0
+                Query qMinXLeft = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, bbox.GetMinX(), null, true, false);
+                Query qMaxXLeft = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, 180.0, false, true);
+                Query qLeft = this.MakeQuery(new Query[] { qMinXLeft, qMaxXLeft }, Occur.MUST);
+
+                // the document should be within the right portion of the query
+                // docMinX >= -180.0 AND docMaxX <= queryExtent.GetMaxX()
+                Query qMinXRight = NumericRangeQuery.NewDoubleRange(field_minX, precisionStep, -180.0, null, true, false);
+                Query qMaxXRight = NumericRangeQuery.NewDoubleRange(field_maxX, precisionStep, null, bbox.GetMaxX(), false, true);
+                Query qRight = this.MakeQuery(new Query[] { qMinXRight, qMaxXRight }, Occur.MUST);
+
+                // either left or right conditions should occur,
+                // apply the left and right conditions to documents that do not cross the date line
+                Query qLeftRight = this.MakeQuery(new Query[] { qLeft, qRight }, Occur.SHOULD);
+                Query qNonXDL = this.MakeXDL(false, qLeftRight);
+
+                // apply the non-XDL and XDL conditions
+                xConditions = this.MakeQuery(new Query[] { qNonXDL, qXDL }, Occur.SHOULD);
+            }
+
+            // both X and Y conditions must occur
+            return this.MakeQuery(new Query[] { xConditions, yConditions }, Occur.MUST);
+        }
+
+        /*
+         * Constructs a query to retrieve documents that do or do not cross the date line.
+         *
+         *
+         * @param crossedDateLine <code>true</true> for documents that cross the date line
+         * @return the query
+         */
+        public Query MakeXDL(bool crossedDateLine)
+        {
+            // The 'T' and 'F' values match solr fields
+            return new TermQuery(new Term(field_xdl, crossedDateLine ? "T" : "F"));
+        }
+
+        /*
+         * Constructs a query to retrieve documents that do or do not cross the date line
+         * and match the supplied spatial query.
+         *
+         * @param crossedDateLine <code>true</true> for documents that cross the date line
+         * @param query the spatial query
+         * @return the query
+         */
+        public Query MakeXDL(bool crossedDateLine, Query query)
+        {
+            var bq = new BooleanQuery
+                         {{this.MakeXDL(crossedDateLine), Occur.MUST}, {query, Occur.MUST}};
+            return bq;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/BBox/DistanceSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/BBox/DistanceSimilarity.cs b/src/contrib/Spatial/BBox/DistanceSimilarity.cs
index 98273f4..9b7c88b 100644
--- a/src/contrib/Spatial/BBox/DistanceSimilarity.cs
+++ b/src/contrib/Spatial/BBox/DistanceSimilarity.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/PointPrefixTreeFieldCacheProvider.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/PointPrefixTreeFieldCacheProvider.cs b/src/contrib/Spatial/Prefix/PointPrefixTreeFieldCacheProvider.cs
index 614226c..5a7c554 100644
--- a/src/contrib/Spatial/Prefix/PointPrefixTreeFieldCacheProvider.cs
+++ b/src/contrib/Spatial/Prefix/PointPrefixTreeFieldCacheProvider.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -29,23 +29,23 @@ namespace Lucene.Net.Spatial.Prefix
     /// Note, due to the fragmented representation of Shapes in these Strategies, this implementation
     /// can only retrieve the central {@link Point} of the original Shapes.
     /// </summary>
-	public class PointPrefixTreeFieldCacheProvider : ShapeFieldCacheProvider<Point>
-	{
-		readonly SpatialPrefixTree grid; //
+    public class PointPrefixTreeFieldCacheProvider : ShapeFieldCacheProvider<Point>
+    {
+        readonly SpatialPrefixTree grid; //
 
-		public PointPrefixTreeFieldCacheProvider(SpatialPrefixTree grid, String shapeField, int defaultSize)
-			: base(shapeField, defaultSize)
-		{
-			this.grid = grid;
-		}
+        public PointPrefixTreeFieldCacheProvider(SpatialPrefixTree grid, String shapeField, int defaultSize)
+            : base(shapeField, defaultSize)
+        {
+            this.grid = grid;
+        }
 
-		//A kluge that this is a field
-		private Node scanCell = null;
+        //A kluge that this is a field
+        private Node scanCell = null;
 
-		protected override Point ReadShape(Term term)
-		{
-			scanCell = grid.GetNode(term.Text, scanCell);
-			return scanCell.IsLeaf() ? scanCell.GetShape().GetCenter() : null;
-		}
-	}
+        protected override Point ReadShape(Term term)
+        {
+            scanCell = grid.GetNode(term.Text, scanCell);
+            return scanCell.IsLeaf() ? scanCell.GetShape().GetCenter() : null;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs b/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
index e300b4d..e15f6cd 100644
--- a/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
+++ b/src/contrib/Spatial/Prefix/PrefixTreeStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -67,110 +67,110 @@ namespace Lucene.Net.Spatial.Prefix
         /// </summary>
         public double DistErrPct { get; set; }
 
-		public override AbstractField[] CreateIndexableFields(Shape shape)
-		{
-		    double distErr = SpatialArgs.CalcDistanceFromErrPct(shape, distErrPct, ctx);
-		    return CreateIndexableFields(shape, distErr);
-		}
+        public override AbstractField[] CreateIndexableFields(Shape shape)
+        {
+            double distErr = SpatialArgs.CalcDistanceFromErrPct(shape, distErrPct, ctx);
+            return CreateIndexableFields(shape, distErr);
+        }
 
         public AbstractField[] CreateIndexableFields(Shape shape, double distErr)
         {
             int detailLevel = grid.GetLevelForDistance(distErr);
             var cells = grid.GetNodes(shape, detailLevel, true);//true=intermediates cells
-			//If shape isn't a point, add a full-resolution center-point so that
+            //If shape isn't a point, add a full-resolution center-point so that
             // PointPrefixTreeFieldCacheProvider has the center-points.
-			// TODO index each center of a multi-point? Yes/no?
-			if (!(shape is Point))
-			{
-				Point ctr = shape.GetCenter();
+            // TODO index each center of a multi-point? Yes/no?
+            if (!(shape is Point))
+            {
+                Point ctr = shape.GetCenter();
                 //TODO should be smarter; don't index 2 tokens for this in CellTokenStream. Harmless though.
-				cells.Add(grid.GetNodes(ctr, grid.GetMaxLevels(), false)[0]);
-			}
-
-			//TODO is CellTokenStream supposed to be re-used somehow? see Uwe's comments:
-			//  http://code.google.com/p/lucene-spatial-playground/issues/detail?id=4
-
-			return new AbstractField[]
-			       	{
-			       		new Field(GetFieldName(), new CellTokenStream(cells.GetEnumerator()))
-			       			{OmitNorms = true, OmitTermFreqAndPositions = true}
-			       	};
-		}
-
-		/// <summary>
-		/// Outputs the tokenString of a cell, and if its a leaf, outputs it again with the leaf byte.
-		/// </summary>
-		protected class CellTokenStream : TokenStream
-		{
-			private ITermAttribute termAtt;
-			private readonly IEnumerator<Node> iter;
-
-			public CellTokenStream(IEnumerator<Node> tokens)
-			{
-				this.iter = tokens;
-				Init();
-			}
-
-			private void Init()
-			{
-				termAtt = AddAttribute<ITermAttribute>();
-			}
-
-			private string nextTokenStringNeedingLeaf;
-
-			public override bool IncrementToken()
-			{
-				ClearAttributes();
-				if (nextTokenStringNeedingLeaf != null)
-				{
-					termAtt.Append(nextTokenStringNeedingLeaf);
-					termAtt.Append((char)Node.LEAF_BYTE);
-					nextTokenStringNeedingLeaf = null;
-					return true;
-				}
-				if (iter.MoveNext())
-				{
-					Node cell = iter.Current;
-					var token = cell.GetTokenString();
-					termAtt.Append(token);
-					if (cell.IsLeaf())
-						nextTokenStringNeedingLeaf = token;
-					return true;
-				}
-				return false;
-			}
-
-			protected override void Dispose(bool disposing)
-			{
-			}
-		}
-
-		public ShapeFieldCacheProvider<Point> GetCacheProvider()
-		{
-			PointPrefixTreeFieldCacheProvider p;
-			if (!provider.TryGetValue(GetFieldName(), out p) || p == null)
-			{
-				lock (this)
-				{//double checked locking idiom is okay since provider is threadsafe
-					if (!provider.ContainsKey(GetFieldName()))
-					{
-						p = new PointPrefixTreeFieldCacheProvider(grid, GetFieldName(), defaultFieldValuesArrayLen);
-						provider[GetFieldName()] = p;
-					}
-				}
-			}
-			return p;
-		}
+                cells.Add(grid.GetNodes(ctr, grid.GetMaxLevels(), false)[0]);
+            }
+
+            //TODO is CellTokenStream supposed to be re-used somehow? see Uwe's comments:
+            //  http://code.google.com/p/lucene-spatial-playground/issues/detail?id=4
+
+            return new AbstractField[]
+                       {
+                           new Field(GetFieldName(), new CellTokenStream(cells.GetEnumerator()))
+                               {OmitNorms = true, OmitTermFreqAndPositions = true}
+                       };
+        }
+
+        /// <summary>
+        /// Outputs the tokenString of a cell, and if its a leaf, outputs it again with the leaf byte.
+        /// </summary>
+        protected class CellTokenStream : TokenStream
+        {
+            private ITermAttribute termAtt;
+            private readonly IEnumerator<Node> iter;
+
+            public CellTokenStream(IEnumerator<Node> tokens)
+            {
+                this.iter = tokens;
+                Init();
+            }
+
+            private void Init()
+            {
+                termAtt = AddAttribute<ITermAttribute>();
+            }
+
+            private string nextTokenStringNeedingLeaf;
+
+            public override bool IncrementToken()
+            {
+                ClearAttributes();
+                if (nextTokenStringNeedingLeaf != null)
+                {
+                    termAtt.Append(nextTokenStringNeedingLeaf);
+                    termAtt.Append((char)Node.LEAF_BYTE);
+                    nextTokenStringNeedingLeaf = null;
+                    return true;
+                }
+                if (iter.MoveNext())
+                {
+                    Node cell = iter.Current;
+                    var token = cell.GetTokenString();
+                    termAtt.Append(token);
+                    if (cell.IsLeaf())
+                        nextTokenStringNeedingLeaf = token;
+                    return true;
+                }
+                return false;
+            }
+
+            protected override void Dispose(bool disposing)
+            {
+            }
+        }
+
+        public ShapeFieldCacheProvider<Point> GetCacheProvider()
+        {
+            PointPrefixTreeFieldCacheProvider p;
+            if (!provider.TryGetValue(GetFieldName(), out p) || p == null)
+            {
+                lock (this)
+                {//double checked locking idiom is okay since provider is threadsafe
+                    if (!provider.ContainsKey(GetFieldName()))
+                    {
+                        p = new PointPrefixTreeFieldCacheProvider(grid, GetFieldName(), defaultFieldValuesArrayLen);
+                        provider[GetFieldName()] = p;
+                    }
+                }
+            }
+            return p;
+        }
 
         public override ValueSource MakeDistanceValueSource(Point queryPoint)
-		{
-			var p = (PointPrefixTreeFieldCacheProvider)GetCacheProvider();
+        {
+            var p = (PointPrefixTreeFieldCacheProvider)GetCacheProvider();
             return new ShapeFieldCacheDistanceValueSource(ctx, p, queryPoint);
-		}
+        }
 
-		public SpatialPrefixTree GetGrid()
-		{
-			return grid;
-		}
-	}
+        public SpatialPrefixTree GetGrid()
+        {
+            return grid;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/RecursivePrefixTreeFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/RecursivePrefixTreeFilter.cs b/src/contrib/Spatial/Prefix/RecursivePrefixTreeFilter.cs
index bb12704..ce0d0d9 100644
--- a/src/contrib/Spatial/Prefix/RecursivePrefixTreeFilter.cs
+++ b/src/contrib/Spatial/Prefix/RecursivePrefixTreeFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -26,16 +26,16 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Prefix
 {
-	/// <summary>
-	/// Performs a spatial intersection filter against a field indexed with {@link SpatialPrefixTree}, a Trie.
-	/// SPT yields terms (grids) at length 1 and at greater lengths corresponding to greater precisions.
-	/// This filter recursively traverses each grid length and uses methods on {@link Shape} to efficiently know
-	/// that all points at a prefix fit in the shape or not to either short-circuit unnecessary traversals or to efficiently
-	/// load all enclosed points.
-	/// </summary>
-	public class RecursivePrefixTreeFilter : Filter
-	{
-		/* TODOs for future:
+    /// <summary>
+    /// Performs a spatial intersection filter against a field indexed with {@link SpatialPrefixTree}, a Trie.
+    /// SPT yields terms (grids) at length 1 and at greater lengths corresponding to greater precisions.
+    /// This filter recursively traverses each grid length and uses methods on {@link Shape} to efficiently know
+    /// that all points at a prefix fit in the shape or not to either short-circuit unnecessary traversals or to efficiently
+    /// load all enclosed points.
+    /// </summary>
+    public class RecursivePrefixTreeFilter : Filter
+    {
+        /* TODOs for future:
 
 Can a polygon query shape be optimized / made-simpler at recursive depths (e.g. intersection of shape + cell box)
 
@@ -48,142 +48,142 @@ if (!scan) {
   long termsThreshold = (long) estimateNumberIndexedTerms(cell.length(),queryShape.getDocFreqExpenseThreshold(cell));
   long thisOrd = termsEnum.ord();
   scan = (termsEnum.seek(thisOrd+termsThreshold+1) == TermsEnum.SeekStatus.END
-		  || !cell.contains(termsEnum.term()));
+          || !cell.contains(termsEnum.term()));
   termsEnum.seek(thisOrd);//return to last position
 }
 
 */
 
-		private readonly String fieldName;
-		private readonly SpatialPrefixTree grid;
-		private readonly Shape queryShape;
-		private readonly int prefixGridScanLevel;//at least one less than grid.getMaxLevels()
-		private readonly int detailLevel;
-
-		public RecursivePrefixTreeFilter(String fieldName, SpatialPrefixTree grid, Shape queryShape, int prefixGridScanLevel,
-							 int detailLevel)
-		{
-			this.fieldName = fieldName;
-			this.grid = grid;
-			this.queryShape = queryShape;
-			this.prefixGridScanLevel = Math.Max(1, Math.Min(prefixGridScanLevel, grid.GetMaxLevels() - 1));
-			this.detailLevel = detailLevel;
-			Debug.Assert(detailLevel <= grid.GetMaxLevels());
-		}
-
-		public override DocIdSet GetDocIdSet(Index.IndexReader reader /*, Bits acceptDocs*/)
-		{
-			var bits = new OpenBitSet(reader.MaxDoc);
-			var terms = new TermsEnumCompatibility(reader, fieldName);
-			var term = terms.Next();
-			if (term == null)
-				return null;
-			Node scanCell = null;
-
-			//cells is treated like a stack. LinkedList conveniently has bulk add to beginning. It's in sorted order so that we
-			//  always advance forward through the termsEnum index.
-			var cells = new LinkedList<Node>(
-				grid.GetWorldNode().GetSubCells(queryShape));
-
-			//This is a recursive algorithm that starts with one or more "big" cells, and then recursively dives down into the
-			// first such cell that intersects with the query shape.  It's a depth first traversal because we don't move onto
-			// the next big cell (breadth) until we're completely done considering all smaller cells beneath it. For a given
-			// cell, if it's *within* the query shape then we can conveniently short-circuit the depth traversal and
-			// grab all documents assigned to this cell/term.  For an intersection of the cell and query shape, we either
-			// recursively step down another grid level or we decide heuristically (via prefixGridScanLevel) that there aren't
-			// that many points, and so we scan through all terms within this cell (i.e. the term starts with the cell's term),
-			// seeing which ones are within the query shape.
-			while (cells.Count > 0)
-			{
-				Node cell = cells.First.Value; cells.RemoveFirst();
-				var cellTerm = cell.GetTokenString();
-				var seekStat = terms.Seek(cellTerm);
-				if (seekStat == TermsEnumCompatibility.SeekStatus.END)
-					break;
-				if (seekStat == TermsEnumCompatibility.SeekStatus.NOT_FOUND)
-					continue;
-				if (cell.GetLevel() == detailLevel || cell.IsLeaf())
-				{
-					terms.Docs(bits);
-				}
-				else
-				{//any other intersection
-					//If the next indexed term is the leaf marker, then add all of them
-					var nextCellTerm = terms.Next();
-					Debug.Assert(nextCellTerm.Text.StartsWith(cellTerm));
-					scanCell = grid.GetNode(nextCellTerm.Text, scanCell);
-					if (scanCell.IsLeaf())
-					{
-						terms.Docs(bits);
-						term = terms.Next();//move pointer to avoid potential redundant addDocs() below
-					}
-
-					//Decide whether to continue to divide & conquer, or whether it's time to scan through terms beneath this cell.
-					// Scanning is a performance optimization trade-off.
-					bool scan = cell.GetLevel() >= prefixGridScanLevel;//simple heuristic
-
-					if (!scan)
-					{
-						//Divide & conquer
-						var lst = cell.GetSubCells(queryShape);
-						for (var i = lst.Count - 1; i >= 0; i--) //add to beginning
-						{
-							cells.AddFirst(lst[i]);
-						}
-					}
-					else
-					{
-						//Scan through all terms within this cell to see if they are within the queryShape. No seek()s.
-						for (var t = terms.Term(); t != null && t.Text.StartsWith(cellTerm); t = terms.Next())
-						{
-							scanCell = grid.GetNode(t.Text, scanCell);
-							int termLevel = scanCell.GetLevel();
-							if (termLevel > detailLevel)
-								continue;
-							if (termLevel == detailLevel || scanCell.IsLeaf())
-							{
-								//TODO should put more thought into implications of box vs point
-								Shape cShape = termLevel == grid.GetMaxLevels() ? scanCell.GetCenter() : scanCell.GetShape();
+        private readonly String fieldName;
+        private readonly SpatialPrefixTree grid;
+        private readonly Shape queryShape;
+        private readonly int prefixGridScanLevel;//at least one less than grid.getMaxLevels()
+        private readonly int detailLevel;
+
+        public RecursivePrefixTreeFilter(String fieldName, SpatialPrefixTree grid, Shape queryShape, int prefixGridScanLevel,
+                             int detailLevel)
+        {
+            this.fieldName = fieldName;
+            this.grid = grid;
+            this.queryShape = queryShape;
+            this.prefixGridScanLevel = Math.Max(1, Math.Min(prefixGridScanLevel, grid.GetMaxLevels() - 1));
+            this.detailLevel = detailLevel;
+            Debug.Assert(detailLevel <= grid.GetMaxLevels());
+        }
+
+        public override DocIdSet GetDocIdSet(Index.IndexReader reader /*, Bits acceptDocs*/)
+        {
+            var bits = new OpenBitSet(reader.MaxDoc);
+            var terms = new TermsEnumCompatibility(reader, fieldName);
+            var term = terms.Next();
+            if (term == null)
+                return null;
+            Node scanCell = null;
+
+            //cells is treated like a stack. LinkedList conveniently has bulk add to beginning. It's in sorted order so that we
+            //  always advance forward through the termsEnum index.
+            var cells = new LinkedList<Node>(
+                grid.GetWorldNode().GetSubCells(queryShape));
+
+            //This is a recursive algorithm that starts with one or more "big" cells, and then recursively dives down into the
+            // first such cell that intersects with the query shape.  It's a depth first traversal because we don't move onto
+            // the next big cell (breadth) until we're completely done considering all smaller cells beneath it. For a given
+            // cell, if it's *within* the query shape then we can conveniently short-circuit the depth traversal and
+            // grab all documents assigned to this cell/term.  For an intersection of the cell and query shape, we either
+            // recursively step down another grid level or we decide heuristically (via prefixGridScanLevel) that there aren't
+            // that many points, and so we scan through all terms within this cell (i.e. the term starts with the cell's term),
+            // seeing which ones are within the query shape.
+            while (cells.Count > 0)
+            {
+                Node cell = cells.First.Value; cells.RemoveFirst();
+                var cellTerm = cell.GetTokenString();
+                var seekStat = terms.Seek(cellTerm);
+                if (seekStat == TermsEnumCompatibility.SeekStatus.END)
+                    break;
+                if (seekStat == TermsEnumCompatibility.SeekStatus.NOT_FOUND)
+                    continue;
+                if (cell.GetLevel() == detailLevel || cell.IsLeaf())
+                {
+                    terms.Docs(bits);
+                }
+                else
+                {//any other intersection
+                    //If the next indexed term is the leaf marker, then add all of them
+                    var nextCellTerm = terms.Next();
+                    Debug.Assert(nextCellTerm.Text.StartsWith(cellTerm));
+                    scanCell = grid.GetNode(nextCellTerm.Text, scanCell);
+                    if (scanCell.IsLeaf())
+                    {
+                        terms.Docs(bits);
+                        term = terms.Next();//move pointer to avoid potential redundant addDocs() below
+                    }
+
+                    //Decide whether to continue to divide & conquer, or whether it's time to scan through terms beneath this cell.
+                    // Scanning is a performance optimization trade-off.
+                    bool scan = cell.GetLevel() >= prefixGridScanLevel;//simple heuristic
+
+                    if (!scan)
+                    {
+                        //Divide & conquer
+                        var lst = cell.GetSubCells(queryShape);
+                        for (var i = lst.Count - 1; i >= 0; i--) //add to beginning
+                        {
+                            cells.AddFirst(lst[i]);
+                        }
+                    }
+                    else
+                    {
+                        //Scan through all terms within this cell to see if they are within the queryShape. No seek()s.
+                        for (var t = terms.Term(); t != null && t.Text.StartsWith(cellTerm); t = terms.Next())
+                        {
+                            scanCell = grid.GetNode(t.Text, scanCell);
+                            int termLevel = scanCell.GetLevel();
+                            if (termLevel > detailLevel)
+                                continue;
+                            if (termLevel == detailLevel || scanCell.IsLeaf())
+                            {
+                                //TODO should put more thought into implications of box vs point
+                                Shape cShape = termLevel == grid.GetMaxLevels() ? scanCell.GetCenter() : scanCell.GetShape();
                                 if (queryShape.Relate(cShape) == SpatialRelation.DISJOINT)
-									continue;
-
-								terms.Docs(bits);
-							}
-						}//term loop
-					}
-				}
-			}//cell loop
-
-			return bits;
-		}
-
-		public override string ToString()
-		{
-			return "GeoFilter{fieldName='" + fieldName + '\'' + ", shape=" + queryShape + '}';
-		}
-
-		public override bool Equals(object o)
-		{
-			if (this == o) return true;
-			var that = o as RecursivePrefixTreeFilter;
-
-			if (that == null) return false;
-
-			if (!fieldName.Equals(that.fieldName)) return false;
-			//note that we don't need to look at grid since for the same field it should be the same
-			if (prefixGridScanLevel != that.prefixGridScanLevel) return false;
-			if (detailLevel != that.detailLevel) return false;
-			if (!queryShape.Equals(that.queryShape)) return false;
-
-			return true;
-		}
-
-		public override int GetHashCode()
-		{
-			int result = fieldName.GetHashCode();
-			result = 31 * result + queryShape.GetHashCode();
-			result = 31 * result + detailLevel;
-			return result;
-		}
-	}
+                                    continue;
+
+                                terms.Docs(bits);
+                            }
+                        }//term loop
+                    }
+                }
+            }//cell loop
+
+            return bits;
+        }
+
+        public override string ToString()
+        {
+            return "GeoFilter{fieldName='" + fieldName + '\'' + ", shape=" + queryShape + '}';
+        }
+
+        public override bool Equals(object o)
+        {
+            if (this == o) return true;
+            var that = o as RecursivePrefixTreeFilter;
+
+            if (that == null) return false;
+
+            if (!fieldName.Equals(that.fieldName)) return false;
+            //note that we don't need to look at grid since for the same field it should be the same
+            if (prefixGridScanLevel != that.prefixGridScanLevel) return false;
+            if (detailLevel != that.detailLevel) return false;
+            if (!queryShape.Equals(that.queryShape)) return false;
+
+            return true;
+        }
+
+        public override int GetHashCode()
+        {
+            int result = fieldName.GetHashCode();
+            result = 31 * result + queryShape.GetHashCode();
+            result = 31 * result + detailLevel;
+            return result;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/RecursivePrefixTreeStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/RecursivePrefixTreeStrategy.cs b/src/contrib/Spatial/Prefix/RecursivePrefixTreeStrategy.cs
index e1f5718..d6fa681 100644
--- a/src/contrib/Spatial/Prefix/RecursivePrefixTreeStrategy.cs
+++ b/src/contrib/Spatial/Prefix/RecursivePrefixTreeStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -22,41 +22,41 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Prefix
 {
-	/// <summary>
-	/// Based on {@link RecursivePrefixTreeFilter}.
-	/// </summary>
-	public class RecursivePrefixTreeStrategy : PrefixTreeStrategy
-	{
-		private int prefixGridScanLevel;
-
-		public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, string fieldName)
-			: base(grid, fieldName)
-		{
-			prefixGridScanLevel = grid.GetMaxLevels() - 4;//TODO this default constant is dependent on the prefix grid size
-		}
-
-		public void SetPrefixGridScanLevel(int prefixGridScanLevel)
-		{
-			//TODO if negative then subtract from maxlevels
-			this.prefixGridScanLevel = prefixGridScanLevel;
-		}
-
-		public override Filter MakeFilter(SpatialArgs args)
-		{
-			var op = args.Operation;
+    /// <summary>
+    /// Based on {@link RecursivePrefixTreeFilter}.
+    /// </summary>
+    public class RecursivePrefixTreeStrategy : PrefixTreeStrategy
+    {
+        private int prefixGridScanLevel;
+
+        public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, string fieldName)
+            : base(grid, fieldName)
+        {
+            prefixGridScanLevel = grid.GetMaxLevels() - 4;//TODO this default constant is dependent on the prefix grid size
+        }
+
+        public void SetPrefixGridScanLevel(int prefixGridScanLevel)
+        {
+            //TODO if negative then subtract from maxlevels
+            this.prefixGridScanLevel = prefixGridScanLevel;
+        }
+
+        public override Filter MakeFilter(SpatialArgs args)
+        {
+            var op = args.Operation;
             if (op != SpatialOperation.Intersects)
-				throw new UnsupportedSpatialOperation(op);
+                throw new UnsupportedSpatialOperation(op);
 
-			Shape shape = args.Shape;
+            Shape shape = args.Shape;
 
             int detailLevel = grid.GetLevelForDistance(args.ResolveDistErr(ctx, distErrPct));
 
-			return new RecursivePrefixTreeFilter(GetFieldName(), grid, shape, prefixGridScanLevel, detailLevel);
-		}
+            return new RecursivePrefixTreeFilter(GetFieldName(), grid, shape, prefixGridScanLevel, detailLevel);
+        }
 
-		public override string ToString()
-		{
-			return GetType().Name + "(prefixGridScanLevel:" + prefixGridScanLevel + ",SPG:(" + grid + "))";
-		}
-	}
+        public override string ToString()
+        {
+            return GetType().Name + "(prefixGridScanLevel:" + prefixGridScanLevel + ",SPG:(" + grid + "))";
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/TermQueryPrefixTreeStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/TermQueryPrefixTreeStrategy.cs b/src/contrib/Spatial/Prefix/TermQueryPrefixTreeStrategy.cs
index a658a0b..84a074a 100644
--- a/src/contrib/Spatial/Prefix/TermQueryPrefixTreeStrategy.cs
+++ b/src/contrib/Spatial/Prefix/TermQueryPrefixTreeStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -25,32 +25,32 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Prefix
 {
-	/// <summary>
-	/// A basic implementation using a large {@link TermsFilter} of all the nodes from
-	/// {@link SpatialPrefixTree#getNodes(com.spatial4j.core.shape.Shape, int, boolean)}.
-	/// </summary>
-	public class TermQueryPrefixTreeStrategy : PrefixTreeStrategy
-	{
-		public TermQueryPrefixTreeStrategy(SpatialPrefixTree grid, string fieldName)
-			: base(grid, fieldName)
-		{
-		}
+    /// <summary>
+    /// A basic implementation using a large {@link TermsFilter} of all the nodes from
+    /// {@link SpatialPrefixTree#getNodes(com.spatial4j.core.shape.Shape, int, boolean)}.
+    /// </summary>
+    public class TermQueryPrefixTreeStrategy : PrefixTreeStrategy
+    {
+        public TermQueryPrefixTreeStrategy(SpatialPrefixTree grid, string fieldName)
+            : base(grid, fieldName)
+        {
+        }
 
-		public override Filter MakeFilter(SpatialArgs args)
-		{
-			SpatialOperation op = args.Operation;
+        public override Filter MakeFilter(SpatialArgs args)
+        {
+            SpatialOperation op = args.Operation;
             if (op != SpatialOperation.Intersects)
-				throw new UnsupportedSpatialOperation(op);
+                throw new UnsupportedSpatialOperation(op);
 
-			Shape shape = args.Shape;
+            Shape shape = args.Shape;
             int detailLevel = grid.GetLevelForDistance(args.ResolveDistErr(ctx, distErrPct));
-			var cells = grid.GetNodes(shape, detailLevel, false);
-			var filter = new TermsFilter();
-			foreach (Node cell in cells)
-			{
-				filter.AddTerm(new Term(GetFieldName(), cell.GetTokenString()));
-			}
-			return filter;
-		}
-	}
+            var cells = grid.GetNodes(shape, detailLevel, false);
+            var filter = new TermsFilter();
+            foreach (Node cell in cells)
+            {
+                filter.AddTerm(new Term(GetFieldName(), cell.GetTokenString()));
+            }
+            return filter;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs b/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
index 27676e4..b05d7d2 100644
--- a/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
+++ b/src/contrib/Spatial/Prefix/Tree/GeohashPrefixTree.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/Tree/Node.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/Tree/Node.cs b/src/contrib/Spatial/Prefix/Tree/Node.cs
index 033bbb8..ce403b0 100644
--- a/src/contrib/Spatial/Prefix/Tree/Node.cs
+++ b/src/contrib/Spatial/Prefix/Tree/Node.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -24,199 +24,199 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Prefix.Tree
 {
-	public abstract class Node : IComparable<Node>
-	{
-		public static byte LEAF_BYTE = (byte)'+';//NOTE: must sort before letters & numbers
-
-		// /*
-		//Holds a byte[] and/or String representation of the cell. Both are lazy constructed from the other.
-		//Neither contains the trailing leaf byte.
-		// */
-		//private byte[] bytes;
-		//private int b_off;
-		//private int b_len;
-
-		private String token;//this is the only part of equality
-
-		protected SpatialRelation shapeRel;//set in getSubCells(filter), and via setLeaf().
-		protected readonly SpatialPrefixTree spatialPrefixTree;
-
-		protected Node(SpatialPrefixTree spatialPrefixTree, String token)
-		{
-			this.spatialPrefixTree = spatialPrefixTree;
-			this.token = token;
-			if (token.Length > 0 && token[token.Length - 1] == (char)LEAF_BYTE)
-			{
-				this.token = token.Substring(0, token.Length - 1);
-				SetLeaf();
-			}
-
-			if (GetLevel() == 0)
-				GetShape();//ensure any lazy instantiation completes to make this threadsafe
-		}
-
-		public virtual void Reset(string newToken)
-		{
-			Debug.Assert(GetLevel() != 0);
-			this.token = newToken;
-			shapeRel = SpatialRelation.NULL_VALUE;
-			b_fixLeaf();
-		}
-
-		private void b_fixLeaf()
-		{
-			if (GetLevel() == spatialPrefixTree.GetMaxLevels())
-			{
-				SetLeaf();
-			}
-		}
-
-		public SpatialRelation GetShapeRel()
-		{
-			return shapeRel;
-		}
-
-		public bool IsLeaf()
-		{
-			return shapeRel == SpatialRelation.WITHIN;
-		}
-
-		public void SetLeaf()
-		{
-			Debug.Assert(GetLevel() != 0);
-			shapeRel = SpatialRelation.WITHIN;
-		}
-
-		/*
-		 * Note: doesn't contain a trailing leaf byte.
-		 */
-		public String GetTokenString()
-		{
-			if (token == null)
-				throw new InvalidOperationException("Somehow we got a null token");
-			return token;
-		}
-
-		///// <summary>
-		///// Note: doesn't contain a trailing leaf byte.
-		///// </summary>
-		///// <returns></returns>
-		//public byte[] GetTokenBytes()
-		//{
-		//    if (bytes != null)
-		//    {
-		//        if (b_off != 0 || b_len != bytes.Length)
-		//        {
-		//            throw new IllegalStateException("Not supported if byte[] needs to be recreated.");
-		//        }
-		//    }
-		//    else
-		//    {
-		//        bytes = token.GetBytes(SpatialPrefixTree.UTF8);
-		//        b_off = 0;
-		//        b_len = bytes.Length;
-		//    }
-		//    return bytes;
-		//}
-
-		public int GetLevel()
-		{
-			return token.Length;
-			//return token != null ? token.Length : b_len;
-		}
-
-		//TODO add getParent() and update some algorithms to use this?
-		//public Cell getParent();
-
-		/*
-		 * Like {@link #getSubCells()} but with the results filtered by a shape. If that shape is a {@link com.spatial4j.core.shape.Point} then it
-		 * must call {@link #getSubCell(com.spatial4j.core.shape.Point)};
-		 * Precondition: Never called when getLevel() == maxLevel.
-		 *
-		 * @param shapeFilter an optional filter for the returned cells.
-		 * @return A set of cells (no dups), sorted. Not Modifiable.
-		 */
-		public IList<Node> GetSubCells(Shape shapeFilter)
-		{
-			//Note: Higher-performing subclasses might override to consider the shape filter to generate fewer cells.
-			var point = shapeFilter as Point;
-			if (point != null)
-			{
+    public abstract class Node : IComparable<Node>
+    {
+        public static byte LEAF_BYTE = (byte)'+';//NOTE: must sort before letters & numbers
+
+        // /*
+        //Holds a byte[] and/or String representation of the cell. Both are lazy constructed from the other.
+        //Neither contains the trailing leaf byte.
+        // */
+        //private byte[] bytes;
+        //private int b_off;
+        //private int b_len;
+
+        private String token;//this is the only part of equality
+
+        protected SpatialRelation shapeRel;//set in getSubCells(filter), and via setLeaf().
+        protected readonly SpatialPrefixTree spatialPrefixTree;
+
+        protected Node(SpatialPrefixTree spatialPrefixTree, String token)
+        {
+            this.spatialPrefixTree = spatialPrefixTree;
+            this.token = token;
+            if (token.Length > 0 && token[token.Length - 1] == (char)LEAF_BYTE)
+            {
+                this.token = token.Substring(0, token.Length - 1);
+                SetLeaf();
+            }
+
+            if (GetLevel() == 0)
+                GetShape();//ensure any lazy instantiation completes to make this threadsafe
+        }
+
+        public virtual void Reset(string newToken)
+        {
+            Debug.Assert(GetLevel() != 0);
+            this.token = newToken;
+            shapeRel = SpatialRelation.NULL_VALUE;
+            b_fixLeaf();
+        }
+
+        private void b_fixLeaf()
+        {
+            if (GetLevel() == spatialPrefixTree.GetMaxLevels())
+            {
+                SetLeaf();
+            }
+        }
+
+        public SpatialRelation GetShapeRel()
+        {
+            return shapeRel;
+        }
+
+        public bool IsLeaf()
+        {
+            return shapeRel == SpatialRelation.WITHIN;
+        }
+
+        public void SetLeaf()
+        {
+            Debug.Assert(GetLevel() != 0);
+            shapeRel = SpatialRelation.WITHIN;
+        }
+
+        /*
+         * Note: doesn't contain a trailing leaf byte.
+         */
+        public String GetTokenString()
+        {
+            if (token == null)
+                throw new InvalidOperationException("Somehow we got a null token");
+            return token;
+        }
+
+        ///// <summary>
+        ///// Note: doesn't contain a trailing leaf byte.
+        ///// </summary>
+        ///// <returns></returns>
+        //public byte[] GetTokenBytes()
+        //{
+        //    if (bytes != null)
+        //    {
+        //        if (b_off != 0 || b_len != bytes.Length)
+        //        {
+        //            throw new IllegalStateException("Not supported if byte[] needs to be recreated.");
+        //        }
+        //    }
+        //    else
+        //    {
+        //        bytes = token.GetBytes(SpatialPrefixTree.UTF8);
+        //        b_off = 0;
+        //        b_len = bytes.Length;
+        //    }
+        //    return bytes;
+        //}
+
+        public int GetLevel()
+        {
+            return token.Length;
+            //return token != null ? token.Length : b_len;
+        }
+
+        //TODO add getParent() and update some algorithms to use this?
+        //public Cell getParent();
+
+        /*
+         * Like {@link #getSubCells()} but with the results filtered by a shape. If that shape is a {@link com.spatial4j.core.shape.Point} then it
+         * must call {@link #getSubCell(com.spatial4j.core.shape.Point)};
+         * Precondition: Never called when getLevel() == maxLevel.
+         *
+         * @param shapeFilter an optional filter for the returned cells.
+         * @return A set of cells (no dups), sorted. Not Modifiable.
+         */
+        public IList<Node> GetSubCells(Shape shapeFilter)
+        {
+            //Note: Higher-performing subclasses might override to consider the shape filter to generate fewer cells.
+            var point = shapeFilter as Point;
+            if (point != null)
+            {
 #if !NET35
-				return new ReadOnlyCollectionBuilder<Node>(new[] {GetSubCell(point)}).ToReadOnlyCollection();
+                return new ReadOnlyCollectionBuilder<Node>(new[] {GetSubCell(point)}).ToReadOnlyCollection();
 #else
                 return new List<Node>(new[]{GetSubCell(point)}).AsReadOnly();
 #endif
 
-			}
+            }
 
-			var cells = GetSubCells();
-			if (shapeFilter == null)
-			{
-				return cells;
-			}
-			var copy = new List<Node>(cells.Count);//copy since cells contractually isn't modifiable
-			foreach (var cell in cells)
-			{
+            var cells = GetSubCells();
+            if (shapeFilter == null)
+            {
+                return cells;
+            }
+            var copy = new List<Node>(cells.Count);//copy since cells contractually isn't modifiable
+            foreach (var cell in cells)
+            {
                 SpatialRelation rel = cell.GetShape().Relate(shapeFilter);
-				if (rel == SpatialRelation.DISJOINT)
-					continue;
-				cell.shapeRel = rel;
-				copy.Add(cell);
-			}
-			cells = copy;
-			return cells;
-		}
-
-		/*
-		 * Performant implementations are expected to implement this efficiently by considering the current
-		 * cell's boundary.
-		 * Precondition: Never called when getLevel() == maxLevel.
-		 * Precondition: this.getShape().relate(p) != DISJOINT.
-		 */
-		public abstract Node GetSubCell(Point p);
-
-		//TODO Cell getSubCell(byte b)
-
-		/*
-		 * Gets the cells at the next grid cell level that cover this cell.
-		 * Precondition: Never called when getLevel() == maxLevel.
-		 *
-		 * @return A set of cells (no dups), sorted. Not Modifiable.
-		 */
-		public abstract IList<Node> GetSubCells();
-
-		/*
-		 * {@link #getSubCells()}.size() -- usually a constant. Should be >=2
-		 */
-		public abstract int GetSubCellsSize();
-
-		public abstract Shape GetShape();
-
-		public virtual Point GetCenter()
-		{
-			return GetShape().GetCenter();
-		}
-
-
-		public int CompareTo(Node o)
-		{
-			return System.String.CompareOrdinal(GetTokenString(), o.GetTokenString());
-		}
-
-		public override bool Equals(object obj)
-		{
-			return !(obj == null || !(obj is Node)) && GetTokenString().Equals(((Node) obj).GetTokenString());
-		}
-
-		public override int GetHashCode()
-		{
-			return GetTokenString().GetHashCode();
-		}
-
-		public override string ToString()
-		{
-			return GetTokenString() + (IsLeaf() ? new string(new[] {(char) LEAF_BYTE}) : string.Empty);
-		}
-	}
+                if (rel == SpatialRelation.DISJOINT)
+                    continue;
+                cell.shapeRel = rel;
+                copy.Add(cell);
+            }
+            cells = copy;
+            return cells;
+        }
+
+        /*
+         * Performant implementations are expected to implement this efficiently by considering the current
+         * cell's boundary.
+         * Precondition: Never called when getLevel() == maxLevel.
+         * Precondition: this.getShape().relate(p) != DISJOINT.
+         */
+        public abstract Node GetSubCell(Point p);
+
+        //TODO Cell getSubCell(byte b)
+
+        /*
+         * Gets the cells at the next grid cell level that cover this cell.
+         * Precondition: Never called when getLevel() == maxLevel.
+         *
+         * @return A set of cells (no dups), sorted. Not Modifiable.
+         */
+        public abstract IList<Node> GetSubCells();
+
+        /*
+         * {@link #getSubCells()}.size() -- usually a constant. Should be >=2
+         */
+        public abstract int GetSubCellsSize();
+
+        public abstract Shape GetShape();
+
+        public virtual Point GetCenter()
+        {
+            return GetShape().GetCenter();
+        }
+
+
+        public int CompareTo(Node o)
+        {
+            return System.String.CompareOrdinal(GetTokenString(), o.GetTokenString());
+        }
+
+        public override bool Equals(object obj)
+        {
+            return !(obj == null || !(obj is Node)) && GetTokenString().Equals(((Node) obj).GetTokenString());
+        }
+
+        public override int GetHashCode()
+        {
+            return GetTokenString().GetHashCode();
+        }
+
+        public override string ToString()
+        {
+            return GetTokenString() + (IsLeaf() ? new string(new[] {(char) LEAF_BYTE}) : string.Empty);
+        }
+    }
 }


[28/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Analyzer.cs b/src/core/Analysis/Analyzer.cs
index cea0ee3..353ea24 100644
--- a/src/core/Analysis/Analyzer.cs
+++ b/src/core/Analysis/Analyzer.cs
@@ -22,77 +22,77 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis
 {
-	/// <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
-	/// policy for extracting index terms from text.
-	/// <p/>
-	/// Typical implementations first build a Tokenizer, which breaks the stream of
-	/// characters from the Reader into raw Tokens.  One or more TokenFilters may
-	/// then be applied to the output of the Tokenizer.
-	/// </summary>
-	public abstract class Analyzer : IDisposable
-	{
-		/// <summary>Creates a TokenStream which tokenizes all the text in the provided
-		/// Reader.  Must be able to handle null field name for
-		/// backward compatibility.
-		/// </summary>
-		public abstract TokenStream TokenStream(String fieldName, System.IO.TextReader reader);
-		
-		/// <summary>Creates a TokenStream that is allowed to be re-used
-		/// from the previous time that the same thread called
-		/// this method.  Callers that do not need to use more
-		/// than one TokenStream at the same time from this
-		/// analyzer should use this method for better
-		/// performance.
-		/// </summary>
-		public virtual TokenStream ReusableTokenStream(String fieldName, System.IO.TextReader reader)
-		{
-			return TokenStream(fieldName, reader);
-		}
-		
-		private CloseableThreadLocal<Object> tokenStreams = new CloseableThreadLocal<Object>();
-	    private bool isDisposed;
+    /// <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
+    /// policy for extracting index terms from text.
+    /// <p/>
+    /// Typical implementations first build a Tokenizer, which breaks the stream of
+    /// characters from the Reader into raw Tokens.  One or more TokenFilters may
+    /// then be applied to the output of the Tokenizer.
+    /// </summary>
+    public abstract class Analyzer : IDisposable
+    {
+        /// <summary>Creates a TokenStream which tokenizes all the text in the provided
+        /// Reader.  Must be able to handle null field name for
+        /// backward compatibility.
+        /// </summary>
+        public abstract TokenStream TokenStream(String fieldName, System.IO.TextReader reader);
+        
+        /// <summary>Creates a TokenStream that is allowed to be re-used
+        /// from the previous time that the same thread called
+        /// this method.  Callers that do not need to use more
+        /// than one TokenStream at the same time from this
+        /// analyzer should use this method for better
+        /// performance.
+        /// </summary>
+        public virtual TokenStream ReusableTokenStream(String fieldName, System.IO.TextReader reader)
+        {
+            return TokenStream(fieldName, reader);
+        }
+        
+        private CloseableThreadLocal<Object> tokenStreams = new CloseableThreadLocal<Object>();
+        private bool isDisposed;
 
-	    /// <summary>Used by Analyzers that implement reusableTokenStream
-	    /// to retrieve previously saved TokenStreams for re-use
-	    /// by the same thread. 
-	    /// </summary>
-	    protected internal virtual object PreviousTokenStream
-	    {
-	        get
-	        {
-	            if (tokenStreams == null)
-	            {
-	                throw new AlreadyClosedException("this Analyzer is closed");
-	            }
-	            return tokenStreams.Get();
-	        }
-	        set
-	        {
-	            if (tokenStreams == null)
-	            {
-	                throw new AlreadyClosedException("this Analyzer is closed");
-	            }
-	            tokenStreams.Set(value);
-	        }
-	    }
+        /// <summary>Used by Analyzers that implement reusableTokenStream
+        /// to retrieve previously saved TokenStreams for re-use
+        /// by the same thread. 
+        /// </summary>
+        protected internal virtual object PreviousTokenStream
+        {
+            get
+            {
+                if (tokenStreams == null)
+                {
+                    throw new AlreadyClosedException("this Analyzer is closed");
+                }
+                return tokenStreams.Get();
+            }
+            set
+            {
+                if (tokenStreams == null)
+                {
+                    throw new AlreadyClosedException("this Analyzer is closed");
+                }
+                tokenStreams.Set(value);
+            }
+        }
 
-	    [Obsolete()]
-		protected internal bool overridesTokenStreamMethod = false;
-		
-		/// <deprecated> This is only present to preserve
-		/// back-compat of classes that subclass a core analyzer
-		/// and override tokenStream but not reusableTokenStream 
-		/// </deprecated>
-		/// <summary>
+        [Obsolete()]
+        protected internal bool overridesTokenStreamMethod = false;
+        
+        /// <deprecated> This is only present to preserve
+        /// back-compat of classes that subclass a core analyzer
+        /// and override tokenStream but not reusableTokenStream 
+        /// </deprecated>
+        /// <summary>
         /// Java uses Class&lt;? extends Analyer&gt; to constrain <typeparamref name="TClass"/> to
         /// only Types that inherit from Analyzer.  C# does not have a generic type class,
         /// ie Type&lt;t&gt;.  The method signature stays the same, and an exception may
         /// still be thrown, if the method doesn't exist.
-		/// </summary>
+        /// </summary>
         [Obsolete("This is only present to preserve back-compat of classes that subclass a core analyzer and override tokenStream but not reusableTokenStream ")]
-		protected internal virtual void SetOverridesTokenStreamMethod<TClass>()
+        protected internal virtual void SetOverridesTokenStreamMethod<TClass>()
             where TClass : Analyzer
-		{
+        {
             try
             {
                 System.Reflection.MethodInfo m = this.GetType().GetMethod("TokenStream", new[] { typeof(string), typeof(System.IO.TextReader) });
@@ -103,50 +103,50 @@ namespace Lucene.Net.Analysis
                 // can't happen, as baseClass is subclass of Analyzer
                 overridesTokenStreamMethod = false;
             }
-		}
-		
-		
-		/// <summary> Invoked before indexing a Fieldable instance if
-		/// terms have already been added to that field.  This allows custom
-		/// analyzers to place an automatic position increment gap between
-		/// Fieldable instances using the same field name.  The default value
-		/// position increment gap is 0.  With a 0 position increment gap and
-		/// the typical default token position increment of 1, all terms in a field,
-		/// including across Fieldable instances, are in successive positions, allowing
-		/// exact PhraseQuery matches, for instance, across Fieldable instance boundaries.
-		/// 
-		/// </summary>
-		/// <param name="fieldName">Fieldable name being indexed.
-		/// </param>
-		/// <returns> position increment gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
-		/// </returns>
-		public virtual int GetPositionIncrementGap(String fieldName)
-		{
-			return 0;
-		}
-		
-		/// <summary> Just like <see cref="GetPositionIncrementGap" />, except for
-		/// Token offsets instead.  By default this returns 1 for
-		/// tokenized fields and, as if the fields were joined
-		/// with an extra space character, and 0 for un-tokenized
-		/// fields.  This method is only called if the field
-		/// produced at least one token for indexing.
-		/// 
-		/// </summary>
-		/// <param name="field">the field just indexed
-		/// </param>
-		/// <returns> offset gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
-		/// </returns>
-		public virtual int GetOffsetGap(IFieldable field)
-		{
-			return field.IsTokenized ? 1 : 0;
-		}
+        }
+        
+        
+        /// <summary> Invoked before indexing a Fieldable instance if
+        /// terms have already been added to that field.  This allows custom
+        /// analyzers to place an automatic position increment gap between
+        /// Fieldable instances using the same field name.  The default value
+        /// position increment gap is 0.  With a 0 position increment gap and
+        /// the typical default token position increment of 1, all terms in a field,
+        /// including across Fieldable instances, are in successive positions, allowing
+        /// exact PhraseQuery matches, for instance, across Fieldable instance boundaries.
+        /// 
+        /// </summary>
+        /// <param name="fieldName">Fieldable name being indexed.
+        /// </param>
+        /// <returns> position increment gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
+        /// </returns>
+        public virtual int GetPositionIncrementGap(String fieldName)
+        {
+            return 0;
+        }
+        
+        /// <summary> Just like <see cref="GetPositionIncrementGap" />, except for
+        /// Token offsets instead.  By default this returns 1 for
+        /// tokenized fields and, as if the fields were joined
+        /// with an extra space character, and 0 for un-tokenized
+        /// fields.  This method is only called if the field
+        /// produced at least one token for indexing.
+        /// 
+        /// </summary>
+        /// <param name="field">the field just indexed
+        /// </param>
+        /// <returns> offset gap, added to the next token emitted from <see cref="TokenStream(String,System.IO.TextReader)" />
+        /// </returns>
+        public virtual int GetOffsetGap(IFieldable field)
+        {
+            return field.IsTokenized ? 1 : 0;
+        }
 
-		/// <summary>Frees persistent resources used by this Analyzer </summary>
-		public void  Close()
-		{
-		    Dispose();
-		}
+        /// <summary>Frees persistent resources used by this Analyzer </summary>
+        public void  Close()
+        {
+            Dispose();
+        }
 
         public virtual void Dispose()
         {
@@ -167,5 +167,5 @@ namespace Lucene.Net.Analysis
             }
             isDisposed = true;
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/BaseCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/BaseCharFilter.cs b/src/core/Analysis/BaseCharFilter.cs
index b84fce0..7c91e1c 100644
--- a/src/core/Analysis/BaseCharFilter.cs
+++ b/src/core/Analysis/BaseCharFilter.cs
@@ -68,7 +68,7 @@ namespace Lucene.Net.Analysis
 
             if (currentOff < offsets[mid])
                 return mid == 0 ? currentOff : currentOff + diffs[mid - 1];
-        	return currentOff + diffs[mid];
+            return currentOff + diffs[mid];
         }
 
         protected int LastCumulativeDiff

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CachingTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CachingTokenFilter.cs b/src/core/Analysis/CachingTokenFilter.cs
index c5f7694..3661362 100644
--- a/src/core/Analysis/CachingTokenFilter.cs
+++ b/src/core/Analysis/CachingTokenFilter.cs
@@ -17,70 +17,70 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> This class can be used if the token attributes of a TokenStream
-	/// are intended to be consumed more than once. It caches
-	/// all token attribute states locally in a List.
-	/// 
-	/// <p/>CachingTokenFilter implements the optional method
-	/// <see cref="TokenStream.Reset()" />, which repositions the
-	/// stream to the first Token. 
-	/// </summary>
-	public sealed class CachingTokenFilter : TokenFilter
-	{
+    
+    /// <summary> This class can be used if the token attributes of a TokenStream
+    /// are intended to be consumed more than once. It caches
+    /// all token attribute states locally in a List.
+    /// 
+    /// <p/>CachingTokenFilter implements the optional method
+    /// <see cref="TokenStream.Reset()" />, which repositions the
+    /// stream to the first Token. 
+    /// </summary>
+    public sealed class CachingTokenFilter : TokenFilter
+    {
         private System.Collections.Generic.LinkedList<State> cache = null;
-		private System.Collections.Generic.IEnumerator<State> iterator = null;
-		private State finalState;
-		
-		public CachingTokenFilter(TokenStream input):base(input)
-		{
-		}
+        private System.Collections.Generic.IEnumerator<State> iterator = null;
+        private State finalState;
+        
+        public CachingTokenFilter(TokenStream input):base(input)
+        {
+        }
 
-		public override bool IncrementToken()
-		{
-			if (cache == null)
-			{
-				// fill cache lazily
-				cache = new System.Collections.Generic.LinkedList<State>();
-				FillCache();
-				iterator = cache.GetEnumerator();
-			}
-			
-			if (!iterator.MoveNext())
-			{
-				// the cache is exhausted, return false
-				return false;
-			}
-			// Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
-			RestoreState(iterator.Current);
-			return true;
-		}
-		
-		public override void  End()
-		{
-			if (finalState != null)
-			{
-				RestoreState(finalState);
-			}
-		}
-		
-		public override void  Reset()
-		{
-			if (cache != null)
-			{
-				iterator = cache.GetEnumerator();
-			}
-		}
-		
-		private void  FillCache()
-		{
-			while (input.IncrementToken())
-			{
-				cache.AddLast(CaptureState());
-			}
-			// capture final state
-			input.End();
-			finalState = CaptureState();
-		}
-	}
+        public override bool IncrementToken()
+        {
+            if (cache == null)
+            {
+                // fill cache lazily
+                cache = new System.Collections.Generic.LinkedList<State>();
+                FillCache();
+                iterator = cache.GetEnumerator();
+            }
+            
+            if (!iterator.MoveNext())
+            {
+                // the cache is exhausted, return false
+                return false;
+            }
+            // Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
+            RestoreState(iterator.Current);
+            return true;
+        }
+        
+        public override void  End()
+        {
+            if (finalState != null)
+            {
+                RestoreState(finalState);
+            }
+        }
+        
+        public override void  Reset()
+        {
+            if (cache != null)
+            {
+                iterator = cache.GetEnumerator();
+            }
+        }
+        
+        private void  FillCache()
+        {
+            while (input.IncrementToken())
+            {
+                cache.AddLast(CaptureState());
+            }
+            // capture final state
+            input.End();
+            finalState = CaptureState();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CharArraySet.cs b/src/core/Analysis/CharArraySet.cs
index e7df0ba..5564f74 100644
--- a/src/core/Analysis/CharArraySet.cs
+++ b/src/core/Analysis/CharArraySet.cs
@@ -300,8 +300,8 @@ namespace Lucene.Net.Analysis
 
         public bool Contains(object item)
         {
-        	var text = item as char[];
-        	return text != null ? Contains(text, 0, text.Length) : Contains(item.ToString());
+            var text = item as char[];
+            return text != null ? Contains(text, 0, text.Length) : Contains(item.ToString());
         }
 
         public bool Add(object item)
@@ -454,7 +454,7 @@ namespace Lucene.Net.Analysis
         /// </summary>
         public class CharArraySetEnumerator : IEnumerator<string>
         {
-        	readonly CharArraySet _Creator;
+            readonly CharArraySet _Creator;
             int pos = -1;
             char[] cur;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CharFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CharFilter.cs b/src/core/Analysis/CharFilter.cs
index 039f841..d761e6f 100644
--- a/src/core/Analysis/CharFilter.cs
+++ b/src/core/Analysis/CharFilter.cs
@@ -17,42 +17,42 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Subclasses of CharFilter can be chained to filter CharStream.
-	/// They can be used as <see cref="System.IO.TextReader" /> with additional offset
-	/// correction. <see cref="Tokenizer" />s will automatically use <see cref="CorrectOffset" />
-	/// if a CharFilter/CharStream subclass is used.
-	/// 
-	/// </summary>
-	/// <version>  $Id$
-	/// 
-	/// </version>
-	public abstract class CharFilter : CharStream
-	{
+    
+    /// <summary> Subclasses of CharFilter can be chained to filter CharStream.
+    /// They can be used as <see cref="System.IO.TextReader" /> with additional offset
+    /// correction. <see cref="Tokenizer" />s will automatically use <see cref="CorrectOffset" />
+    /// if a CharFilter/CharStream subclass is used.
+    /// 
+    /// </summary>
+    /// <version>  $Id$
+    /// 
+    /// </version>
+    public abstract class CharFilter : CharStream
+    {
         private long currentPosition = -1;
-	    private bool isDisposed;
-		protected internal CharStream input;
-		
-		protected internal CharFilter(CharStream in_Renamed) : base(in_Renamed)
-		{
-			input = in_Renamed;
-		}
-		
-		/// <summary>Subclass may want to override to correct the current offset.</summary>
-		/// <param name="currentOff">current offset</param>
-		/// <returns>corrected offset</returns>
-		protected internal virtual int Correct(int currentOff)
+        private bool isDisposed;
+        protected internal CharStream input;
+        
+        protected internal CharFilter(CharStream in_Renamed) : base(in_Renamed)
         {
-			return currentOff;
-		}
-		
-		/// <summary> Chains the corrected offset through the input
-		/// CharFilter.
-		/// </summary>
-		public override int CorrectOffset(int currentOff)
-		{
-			return input.CorrectOffset(Correct(currentOff));
-		}
+            input = in_Renamed;
+        }
+        
+        /// <summary>Subclass may want to override to correct the current offset.</summary>
+        /// <param name="currentOff">current offset</param>
+        /// <returns>corrected offset</returns>
+        protected internal virtual int Correct(int currentOff)
+        {
+            return currentOff;
+        }
+        
+        /// <summary> Chains the corrected offset through the input
+        /// CharFilter.
+        /// </summary>
+        public override int CorrectOffset(int currentOff)
+        {
+            return input.CorrectOffset(Correct(currentOff));
+        }
 
         protected override void Dispose(bool disposing)
         {
@@ -70,26 +70,26 @@ namespace Lucene.Net.Analysis
             isDisposed = true;
             base.Dispose(disposing);
         }
-		
-		public override int Read(System.Char[] cbuf, int off, int len)
+        
+        public override int Read(System.Char[] cbuf, int off, int len)
         {
-			return input.Read(cbuf, off, len);
-		}
-		
-		public bool MarkSupported()
+            return input.Read(cbuf, off, len);
+        }
+        
+        public bool MarkSupported()
         {
             return input.BaseStream.CanSeek;
-		}
-		
-		public void Mark(int readAheadLimit)
+        }
+        
+        public void Mark(int readAheadLimit)
         {
             currentPosition = input.BaseStream.Position;
-			input.BaseStream.Position = readAheadLimit;
-		}
-		
-		public void Reset()
+            input.BaseStream.Position = readAheadLimit;
+        }
+        
+        public void Reset()
         {
-			input.BaseStream.Position = currentPosition;
-		}
-	}
+            input.BaseStream.Position = currentPosition;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CharReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CharReader.cs b/src/core/Analysis/CharReader.cs
index 2120bd4..7dc9f50 100644
--- a/src/core/Analysis/CharReader.cs
+++ b/src/core/Analysis/CharReader.cs
@@ -17,41 +17,41 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> CharReader is a Reader wrapper. It reads chars from
-	/// Reader and outputs <see cref="CharStream" />, defining an
-	/// identify function <see cref="CorrectOffset" /> method that
-	/// simply returns the provided offset.
-	/// </summary>
-	public sealed class CharReader:CharStream
-	{
+    
+    /// <summary> CharReader is a Reader wrapper. It reads chars from
+    /// Reader and outputs <see cref="CharStream" />, defining an
+    /// identify function <see cref="CorrectOffset" /> method that
+    /// simply returns the provided offset.
+    /// </summary>
+    public sealed class CharReader:CharStream
+    {
         private long currentPosition = -1;
 
-	    private bool isDisposed;
+        private bool isDisposed;
 
-		internal System.IO.StreamReader input;
-		
-		public static CharStream Get(System.IO.TextReader input)
-		{
-			var charStream = input as CharStream;
-			if (charStream != null)
-				return charStream;
-			
-			// {{Aroush-2.9}} isn't there a better (faster) way to do this?
-			var theString = new System.IO.MemoryStream(System.Text.Encoding.UTF8.GetBytes(input.ReadToEnd()));
-			return new CharReader(new System.IO.StreamReader(theString));
-			//return input is CharStream?(CharStream) input:new CharReader(input);
-		}
-		
-		private CharReader(System.IO.StreamReader in_Renamed) : base(in_Renamed)
-		{
-			input = in_Renamed;
-		}
-		
-		public override int CorrectOffset(int currentOff)
-		{
-			return currentOff;
-		}
+        internal System.IO.StreamReader input;
+        
+        public static CharStream Get(System.IO.TextReader input)
+        {
+            var charStream = input as CharStream;
+            if (charStream != null)
+                return charStream;
+            
+            // {{Aroush-2.9}} isn't there a better (faster) way to do this?
+            var theString = new System.IO.MemoryStream(System.Text.Encoding.UTF8.GetBytes(input.ReadToEnd()));
+            return new CharReader(new System.IO.StreamReader(theString));
+            //return input is CharStream?(CharStream) input:new CharReader(input);
+        }
+        
+        private CharReader(System.IO.StreamReader in_Renamed) : base(in_Renamed)
+        {
+            input = in_Renamed;
+        }
+        
+        public override int CorrectOffset(int currentOff)
+        {
+            return currentOff;
+        }
 
         protected override void Dispose(bool disposing)
         {
@@ -69,26 +69,26 @@ namespace Lucene.Net.Analysis
             isDisposed = true;
             base.Dispose(disposing);
         }
-		
-		public  override int Read(System.Char[] cbuf, int off, int len)
-		{
-			return input.Read(cbuf, off, len);
-		}
-		
-		public bool MarkSupported()
-		{
-			return input.BaseStream.CanSeek;
-		}
-		
-		public void  Mark(int readAheadLimit)
-		{
-			currentPosition = input.BaseStream.Position;
-			input.BaseStream.Position = readAheadLimit;
+        
+        public  override int Read(System.Char[] cbuf, int off, int len)
+        {
+            return input.Read(cbuf, off, len);
+        }
+        
+        public bool MarkSupported()
+        {
+            return input.BaseStream.CanSeek;
         }
-		
-		public void  Reset()
-		{
-			input.BaseStream.Position = currentPosition;
+        
+        public void  Mark(int readAheadLimit)
+        {
+            currentPosition = input.BaseStream.Position;
+            input.BaseStream.Position = readAheadLimit;
+        }
+        
+        public void  Reset()
+        {
+            input.BaseStream.Position = currentPosition;
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CharStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CharStream.cs b/src/core/Analysis/CharStream.cs
index 0b36fe2..22aaaae 100644
--- a/src/core/Analysis/CharStream.cs
+++ b/src/core/Analysis/CharStream.cs
@@ -17,29 +17,29 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> CharStream adds <see cref="CorrectOffset" />
-	/// functionality over <see cref="System.IO.TextReader" />.  All Tokenizers accept a
-	/// CharStream instead of <see cref="System.IO.TextReader" /> as input, which enables
-	/// arbitrary character based filtering before tokenization. 
-	/// The <see cref="CorrectOffset" /> method fixed offsets to account for
-	/// removal or insertion of characters, so that the offsets
-	/// reported in the tokens match the character offsets of the
-	/// original Reader.
+    
+    /// <summary> CharStream adds <see cref="CorrectOffset" />
+    /// functionality over <see cref="System.IO.TextReader" />.  All Tokenizers accept a
+    /// CharStream instead of <see cref="System.IO.TextReader" /> as input, which enables
+    /// arbitrary character based filtering before tokenization. 
+    /// The <see cref="CorrectOffset" /> method fixed offsets to account for
+    /// removal or insertion of characters, so that the offsets
+    /// reported in the tokens match the character offsets of the
+    /// original Reader.
     /// </summary>
-	public abstract class CharStream : System.IO.StreamReader
-	{
-	    protected CharStream(System.IO.StreamReader reader) : base(reader.BaseStream)
+    public abstract class CharStream : System.IO.StreamReader
+    {
+        protected CharStream(System.IO.StreamReader reader) : base(reader.BaseStream)
         {
         }
-		
-		/// <summary> Called by CharFilter(s) and Tokenizer to correct token offset.
-		/// 
-		/// </summary>
-		/// <param name="currentOff">offset as seen in the output
-		/// </param>
-		/// <returns> corrected offset based on the input
-		/// </returns>
-		public abstract int CorrectOffset(int currentOff);
-	}
+        
+        /// <summary> Called by CharFilter(s) and Tokenizer to correct token offset.
+        /// 
+        /// </summary>
+        /// <param name="currentOff">offset as seen in the output
+        /// </param>
+        /// <returns> corrected offset based on the input
+        /// </returns>
+        public abstract int CorrectOffset(int currentOff);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/CharTokenizer.cs b/src/core/Analysis/CharTokenizer.cs
index 22423ec..3c34664 100644
--- a/src/core/Analysis/CharTokenizer.cs
+++ b/src/core/Analysis/CharTokenizer.cs
@@ -20,116 +20,116 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
-	public abstract class CharTokenizer:Tokenizer
-	{
-	    protected CharTokenizer(System.IO.TextReader input):base(input)
-		{
-			offsetAtt = AddAttribute<IOffsetAttribute>();
+    
+    /// <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
+    public abstract class CharTokenizer:Tokenizer
+    {
+        protected CharTokenizer(System.IO.TextReader input):base(input)
+        {
+            offsetAtt = AddAttribute<IOffsetAttribute>();
             termAtt = AddAttribute<ITermAttribute>();
-		}
+        }
 
-	    protected CharTokenizer(AttributeSource source, System.IO.TextReader input):base(source, input)
-		{
+        protected CharTokenizer(AttributeSource source, System.IO.TextReader input):base(source, input)
+        {
             offsetAtt = AddAttribute<IOffsetAttribute>();
             termAtt = AddAttribute<ITermAttribute>();
-		}
+        }
 
-	    protected CharTokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory, input)
-		{
+        protected CharTokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory, input)
+        {
             offsetAtt = AddAttribute<IOffsetAttribute>();
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		private int offset = 0, bufferIndex = 0, dataLen = 0;
-		private const int MAX_WORD_LEN = 255;
-		private const int IO_BUFFER_SIZE = 4096;
-		private readonly char[] ioBuffer = new char[IO_BUFFER_SIZE];
-		
-		private readonly ITermAttribute termAtt;
-		private readonly IOffsetAttribute offsetAtt;
-		
-		/// <summary>Returns true iff a character should be included in a token.  This
-		/// tokenizer generates as tokens adjacent sequences of characters which
-		/// satisfy this predicate.  Characters for which this is false are used to
-		/// define token boundaries and are not included in tokens. 
-		/// </summary>
-		protected internal abstract bool IsTokenChar(char c);
-		
-		/// <summary>Called on each token character to normalize it before it is added to the
-		/// token.  The default implementation does nothing.  Subclasses may use this
-		/// to, e.g., lowercase tokens. 
-		/// </summary>
-		protected internal virtual char Normalize(char c)
-		{
-			return c;
-		}
-		
-		public override bool IncrementToken()
-		{
-			ClearAttributes();
-			int length = 0;
-			int start = bufferIndex;
-			char[] buffer = termAtt.TermBuffer();
-			while (true)
-			{
-				
-				if (bufferIndex >= dataLen)
-				{
-					offset += dataLen;
-					dataLen = input.Read(ioBuffer, 0, ioBuffer.Length);
-					if (dataLen <= 0)
-					{
-						dataLen = 0; // so next offset += dataLen won't decrement offset
-						if (length > 0)
-							break;
-						return false;
-					}
-					bufferIndex = 0;
-				}
-				
-				char c = ioBuffer[bufferIndex++];
-				
-				if (IsTokenChar(c))
-				{
-					// if it's a token char
-					
-					if (length == 0)
-					// start of token
-						start = offset + bufferIndex - 1;
-					else if (length == buffer.Length)
-						buffer = termAtt.ResizeTermBuffer(1 + length);
-					
-					buffer[length++] = Normalize(c); // buffer it, normalized
-					
-					if (length == MAX_WORD_LEN)
-					// buffer overflow!
-						break;
-				}
-				else if (length > 0)
-				// at non-Letter w/ chars
-					break; // return 'em
-			}
-			
-			termAtt.SetTermLength(length);
-			offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + length));
-			return true;
-		}
-		
-		public override void  End()
-		{
-			// set final offset
-			int finalOffset = CorrectOffset(offset);
-			offsetAtt.SetOffset(finalOffset, finalOffset);
-		}
-		
-		public override void  Reset(System.IO.TextReader input)
-		{
-			base.Reset(input);
-			bufferIndex = 0;
-			offset = 0;
-			dataLen = 0;
-		}
-	}
+        }
+        
+        private int offset = 0, bufferIndex = 0, dataLen = 0;
+        private const int MAX_WORD_LEN = 255;
+        private const int IO_BUFFER_SIZE = 4096;
+        private readonly char[] ioBuffer = new char[IO_BUFFER_SIZE];
+        
+        private readonly ITermAttribute termAtt;
+        private readonly IOffsetAttribute offsetAtt;
+        
+        /// <summary>Returns true iff a character should be included in a token.  This
+        /// tokenizer generates as tokens adjacent sequences of characters which
+        /// satisfy this predicate.  Characters for which this is false are used to
+        /// define token boundaries and are not included in tokens. 
+        /// </summary>
+        protected internal abstract bool IsTokenChar(char c);
+        
+        /// <summary>Called on each token character to normalize it before it is added to the
+        /// token.  The default implementation does nothing.  Subclasses may use this
+        /// to, e.g., lowercase tokens. 
+        /// </summary>
+        protected internal virtual char Normalize(char c)
+        {
+            return c;
+        }
+        
+        public override bool IncrementToken()
+        {
+            ClearAttributes();
+            int length = 0;
+            int start = bufferIndex;
+            char[] buffer = termAtt.TermBuffer();
+            while (true)
+            {
+                
+                if (bufferIndex >= dataLen)
+                {
+                    offset += dataLen;
+                    dataLen = input.Read(ioBuffer, 0, ioBuffer.Length);
+                    if (dataLen <= 0)
+                    {
+                        dataLen = 0; // so next offset += dataLen won't decrement offset
+                        if (length > 0)
+                            break;
+                        return false;
+                    }
+                    bufferIndex = 0;
+                }
+                
+                char c = ioBuffer[bufferIndex++];
+                
+                if (IsTokenChar(c))
+                {
+                    // if it's a token char
+                    
+                    if (length == 0)
+                    // start of token
+                        start = offset + bufferIndex - 1;
+                    else if (length == buffer.Length)
+                        buffer = termAtt.ResizeTermBuffer(1 + length);
+                    
+                    buffer[length++] = Normalize(c); // buffer it, normalized
+                    
+                    if (length == MAX_WORD_LEN)
+                    // buffer overflow!
+                        break;
+                }
+                else if (length > 0)
+                // at non-Letter w/ chars
+                    break; // return 'em
+            }
+            
+            termAtt.SetTermLength(length);
+            offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + length));
+            return true;
+        }
+        
+        public override void  End()
+        {
+            // set final offset
+            int finalOffset = CorrectOffset(offset);
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+        
+        public override void  Reset(System.IO.TextReader input)
+        {
+            base.Reset(input);
+            bufferIndex = 0;
+            offset = 0;
+            dataLen = 0;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/ISOLatin1AccentFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/ISOLatin1AccentFilter.cs b/src/core/Analysis/ISOLatin1AccentFilter.cs
index 5fd839e..a6fde44 100644
--- a/src/core/Analysis/ISOLatin1AccentFilter.cs
+++ b/src/core/Analysis/ISOLatin1AccentFilter.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -20,325 +20,325 @@ using Lucene.Net.Analysis.Tokenattributes;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> A filter that replaces accented characters in the ISO Latin 1 character set 
-	/// (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
-	/// <p/>
-	/// For instance, '&#192;' will be replaced by 'a'.
-	/// <p/>
-	/// 
-	/// </summary>
-	/// <deprecated> If you build a new index, use <see cref="ASCIIFoldingFilter"/>
-	/// which covers a superset of Latin 1.
-	/// This class is included for use with existing indexes and will be removed
-	/// in a future release (possible Lucene 4.0)
-	/// </deprecated>
+    
+    /// <summary> A filter that replaces accented characters in the ISO Latin 1 character set 
+    /// (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
+    /// <p/>
+    /// For instance, '&#192;' will be replaced by 'a'.
+    /// <p/>
+    /// 
+    /// </summary>
+    /// <deprecated> If you build a new index, use <see cref="ASCIIFoldingFilter"/>
+    /// which covers a superset of Latin 1.
+    /// This class is included for use with existing indexes and will be removed
+    /// in a future release (possible Lucene 4.0)
+    /// </deprecated>
     [Obsolete("If you build a new index, use ASCIIFoldingFilter which covers a superset of Latin 1.  This class is included for use with existing indexes and will be removed in a future release (possible Lucene 4.0).")]
-	public class ISOLatin1AccentFilter : TokenFilter
-	{
-		public ISOLatin1AccentFilter(TokenStream input):base(input)
-		{
+    public class ISOLatin1AccentFilter : TokenFilter
+    {
+        public ISOLatin1AccentFilter(TokenStream input):base(input)
+        {
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		private char[] output = new char[256];
-		private int outputPos;
-		private readonly ITermAttribute termAtt;
-		
-		public override bool IncrementToken()
-		{
-			if (input.IncrementToken())
-			{
-				char[] buffer = termAtt.TermBuffer();
-				int length = termAtt.TermLength();
-				// If no characters actually require rewriting then we
-				// just return token as-is:
-				for (int i = 0; i < length; i++)
-				{
-					char c = buffer[i];
-					if (c >= '\u00c0' && c <= '\uFB06')
-					{
-						RemoveAccents(buffer, length);
-						termAtt.SetTermBuffer(output, 0, outputPos);
-						break;
-					}
-				}
-				return true;
-			}
-			return false;
-		}
+        }
+        
+        private char[] output = new char[256];
+        private int outputPos;
+        private readonly ITermAttribute termAtt;
+        
+        public override bool IncrementToken()
+        {
+            if (input.IncrementToken())
+            {
+                char[] buffer = termAtt.TermBuffer();
+                int length = termAtt.TermLength();
+                // If no characters actually require rewriting then we
+                // just return token as-is:
+                for (int i = 0; i < length; i++)
+                {
+                    char c = buffer[i];
+                    if (c >= '\u00c0' && c <= '\uFB06')
+                    {
+                        RemoveAccents(buffer, length);
+                        termAtt.SetTermBuffer(output, 0, outputPos);
+                        break;
+                    }
+                }
+                return true;
+            }
+            return false;
+        }
 
-		/// <summary> To replace accented characters in a String by unaccented equivalents.</summary>
-		public void  RemoveAccents(char[] input, int length)
-		{
-			
-			// Worst-case length required:
-			int maxSizeNeeded = 2 * length;
-			
-			int size = output.Length;
-			while (size < maxSizeNeeded)
-				size *= 2;
-			
-			if (size != output.Length)
-				output = new char[size];
-			
-			outputPos = 0;
-			
-			int pos = 0;
-			
-			for (int i = 0; i < length; i++, pos++)
-			{
-				char c = input[pos];
-				
-				// Quick test: if it's not in range then just keep
-				// current character
-				if (c < '\u00c0' || c > '\uFB06')
-					output[outputPos++] = c;
-				else
-				{
-					switch (c)
-					{
-						
-						case '\u00C0': 
-						// À
-						case '\u00C1': 
-						// �?
-						case '\u00C2': 
-						// Â
-						case '\u00C3': 
-						// Ã
-						case '\u00C4': 
-						// Ä
-						case '\u00C5':  // Ã…
-							output[outputPos++] = 'A';
-							break;
-						
-						case '\u00C6':  // Æ
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\u00C7':  // Ç
-							output[outputPos++] = 'C';
-							break;
-						
-						case '\u00C8': 
-						// È
-						case '\u00C9': 
-						// É
-						case '\u00CA': 
-						// Ê
-						case '\u00CB':  // Ë
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\u00CC': 
-						// Ì
-						case '\u00CD': 
-						// �?
-						case '\u00CE': 
-						// ÃŽ
-						case '\u00CF':  // �?
-							output[outputPos++] = 'I';
-							break;
-						
-						case '\u0132':  // IJ
-							output[outputPos++] = 'I';
-							output[outputPos++] = 'J';
-							break;
-						
-						case '\u00D0':  // �?
-							output[outputPos++] = 'D';
-							break;
-						
-						case '\u00D1':  // Ñ
-							output[outputPos++] = 'N';
-							break;
-						
-						case '\u00D2': 
-						// Ã’
-						case '\u00D3': 
-						// Ó
-						case '\u00D4': 
-						// Ô
-						case '\u00D5': 
-						// Õ
-						case '\u00D6': 
-						// Ö
-						case '\u00D8':  // Ø
-							output[outputPos++] = 'O';
-							break;
-						
-						case '\u0152':  // Å’
-							output[outputPos++] = 'O';
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\u00DE':  // Þ
-							output[outputPos++] = 'T';
-							output[outputPos++] = 'H';
-							break;
-						
-						case '\u00D9': 
-						// Ù
-						case '\u00DA': 
-						// Ú
-						case '\u00DB': 
-						// Û
-						case '\u00DC':  // Ü
-							output[outputPos++] = 'U';
-							break;
-						
-						case '\u00DD': 
-						// �?
-						case '\u0178':  // Ÿ
-							output[outputPos++] = 'Y';
-							break;
-						
-						case '\u00E0': 
-						// à
-						case '\u00E1': 
-						// á
-						case '\u00E2': 
-						// â
-						case '\u00E3': 
-						// ã
-						case '\u00E4': 
-						// ä
-						case '\u00E5':  // å
-							output[outputPos++] = 'a';
-							break;
-						
-						case '\u00E6':  // æ
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\u00E7':  // ç
-							output[outputPos++] = 'c';
-							break;
-						
-						case '\u00E8': 
-						// è
-						case '\u00E9': 
-						// é
-						case '\u00EA': 
-						// ê
-						case '\u00EB':  // ë
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\u00EC': 
-						// ì
-						case '\u00ED': 
-						// í
-						case '\u00EE': 
-						// î
-						case '\u00EF':  // ï
-							output[outputPos++] = 'i';
-							break;
-						
-						case '\u0133':  // ij
-							output[outputPos++] = 'i';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u00F0':  // ð
-							output[outputPos++] = 'd';
-							break;
-						
-						case '\u00F1':  // ñ
-							output[outputPos++] = 'n';
-							break;
-						
-						case '\u00F2': 
-						// ò
-						case '\u00F3': 
-						// ó
-						case '\u00F4': 
-						// ô
-						case '\u00F5': 
-						// õ
-						case '\u00F6': 
-						// ö
-						case '\u00F8':  // ø
-							output[outputPos++] = 'o';
-							break;
-						
-						case '\u0153':  // Å“
-							output[outputPos++] = 'o';
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\u00DF':  // ß
-							output[outputPos++] = 's';
-							output[outputPos++] = 's';
-							break;
-						
-						case '\u00FE':  // þ
-							output[outputPos++] = 't';
-							output[outputPos++] = 'h';
-							break;
-						
-						case '\u00F9': 
-						// ù
-						case '\u00FA': 
-						// ú
-						case '\u00FB': 
-						// û
-						case '\u00FC':  // ü
-							output[outputPos++] = 'u';
-							break;
-						
-						case '\u00FD': 
-						// ý
-						case '\u00FF':  // ÿ
-							output[outputPos++] = 'y';
-							break;
-						
-						case '\uFB00':  // ff
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'f';
-							break;
-						
-						case '\uFB01':  // �?
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'i';
-							break;
-						
-						case '\uFB02':  // fl
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'l';
-							break;
-							// following 2 are commented as they can break the maxSizeNeeded (and doing *3 could be expensive)
-							//        case '\uFB03': // ffi
-							//            output[outputPos++] = 'f';
-							//            output[outputPos++] = 'f';
-							//            output[outputPos++] = 'i';
-							//            break;
-							//        case '\uFB04': // ffl
-							//            output[outputPos++] = 'f';
-							//            output[outputPos++] = 'f';
-							//            output[outputPos++] = 'l';
-							//            break;
-						
-						case '\uFB05':  // ſt
-							output[outputPos++] = 'f';
-							output[outputPos++] = 't';
-							break;
-						
-						case '\uFB06':  // st
-							output[outputPos++] = 's';
-							output[outputPos++] = 't';
-							break;
-						
-						default: 
-							output[outputPos++] = c;
-							break;
-						
-					}
-				}
-			}
-		}
-	}
+        /// <summary> To replace accented characters in a String by unaccented equivalents.</summary>
+        public void  RemoveAccents(char[] input, int length)
+        {
+            
+            // Worst-case length required:
+            int maxSizeNeeded = 2 * length;
+            
+            int size = output.Length;
+            while (size < maxSizeNeeded)
+                size *= 2;
+            
+            if (size != output.Length)
+                output = new char[size];
+            
+            outputPos = 0;
+            
+            int pos = 0;
+            
+            for (int i = 0; i < length; i++, pos++)
+            {
+                char c = input[pos];
+                
+                // Quick test: if it's not in range then just keep
+                // current character
+                if (c < '\u00c0' || c > '\uFB06')
+                    output[outputPos++] = c;
+                else
+                {
+                    switch (c)
+                    {
+                        
+                        case '\u00C0': 
+                        // À
+                        case '\u00C1': 
+                        // �?
+                        case '\u00C2': 
+                        // Â
+                        case '\u00C3': 
+                        // Ã
+                        case '\u00C4': 
+                        // Ä
+                        case '\u00C5':  // Ã…
+                            output[outputPos++] = 'A';
+                            break;
+                        
+                        case '\u00C6':  // Æ
+                            output[outputPos++] = 'A';
+                            output[outputPos++] = 'E';
+                            break;
+                        
+                        case '\u00C7':  // Ç
+                            output[outputPos++] = 'C';
+                            break;
+                        
+                        case '\u00C8': 
+                        // È
+                        case '\u00C9': 
+                        // É
+                        case '\u00CA': 
+                        // Ê
+                        case '\u00CB':  // Ë
+                            output[outputPos++] = 'E';
+                            break;
+                        
+                        case '\u00CC': 
+                        // Ì
+                        case '\u00CD': 
+                        // �?
+                        case '\u00CE': 
+                        // ÃŽ
+                        case '\u00CF':  // �?
+                            output[outputPos++] = 'I';
+                            break;
+                        
+                        case '\u0132':  // IJ
+                            output[outputPos++] = 'I';
+                            output[outputPos++] = 'J';
+                            break;
+                        
+                        case '\u00D0':  // �?
+                            output[outputPos++] = 'D';
+                            break;
+                        
+                        case '\u00D1':  // Ñ
+                            output[outputPos++] = 'N';
+                            break;
+                        
+                        case '\u00D2': 
+                        // Ã’
+                        case '\u00D3': 
+                        // Ó
+                        case '\u00D4': 
+                        // Ô
+                        case '\u00D5': 
+                        // Õ
+                        case '\u00D6': 
+                        // Ö
+                        case '\u00D8':  // Ø
+                            output[outputPos++] = 'O';
+                            break;
+                        
+                        case '\u0152':  // Å’
+                            output[outputPos++] = 'O';
+                            output[outputPos++] = 'E';
+                            break;
+                        
+                        case '\u00DE':  // Þ
+                            output[outputPos++] = 'T';
+                            output[outputPos++] = 'H';
+                            break;
+                        
+                        case '\u00D9': 
+                        // Ù
+                        case '\u00DA': 
+                        // Ú
+                        case '\u00DB': 
+                        // Û
+                        case '\u00DC':  // Ü
+                            output[outputPos++] = 'U';
+                            break;
+                        
+                        case '\u00DD': 
+                        // �?
+                        case '\u0178':  // Ÿ
+                            output[outputPos++] = 'Y';
+                            break;
+                        
+                        case '\u00E0': 
+                        // à
+                        case '\u00E1': 
+                        // á
+                        case '\u00E2': 
+                        // â
+                        case '\u00E3': 
+                        // ã
+                        case '\u00E4': 
+                        // ä
+                        case '\u00E5':  // å
+                            output[outputPos++] = 'a';
+                            break;
+                        
+                        case '\u00E6':  // æ
+                            output[outputPos++] = 'a';
+                            output[outputPos++] = 'e';
+                            break;
+                        
+                        case '\u00E7':  // ç
+                            output[outputPos++] = 'c';
+                            break;
+                        
+                        case '\u00E8': 
+                        // è
+                        case '\u00E9': 
+                        // é
+                        case '\u00EA': 
+                        // ê
+                        case '\u00EB':  // ë
+                            output[outputPos++] = 'e';
+                            break;
+                        
+                        case '\u00EC': 
+                        // ì
+                        case '\u00ED': 
+                        // í
+                        case '\u00EE': 
+                        // î
+                        case '\u00EF':  // ï
+                            output[outputPos++] = 'i';
+                            break;
+                        
+                        case '\u0133':  // ij
+                            output[outputPos++] = 'i';
+                            output[outputPos++] = 'j';
+                            break;
+                        
+                        case '\u00F0':  // ð
+                            output[outputPos++] = 'd';
+                            break;
+                        
+                        case '\u00F1':  // ñ
+                            output[outputPos++] = 'n';
+                            break;
+                        
+                        case '\u00F2': 
+                        // ò
+                        case '\u00F3': 
+                        // ó
+                        case '\u00F4': 
+                        // ô
+                        case '\u00F5': 
+                        // õ
+                        case '\u00F6': 
+                        // ö
+                        case '\u00F8':  // ø
+                            output[outputPos++] = 'o';
+                            break;
+                        
+                        case '\u0153':  // Å“
+                            output[outputPos++] = 'o';
+                            output[outputPos++] = 'e';
+                            break;
+                        
+                        case '\u00DF':  // ß
+                            output[outputPos++] = 's';
+                            output[outputPos++] = 's';
+                            break;
+                        
+                        case '\u00FE':  // þ
+                            output[outputPos++] = 't';
+                            output[outputPos++] = 'h';
+                            break;
+                        
+                        case '\u00F9': 
+                        // ù
+                        case '\u00FA': 
+                        // ú
+                        case '\u00FB': 
+                        // û
+                        case '\u00FC':  // ü
+                            output[outputPos++] = 'u';
+                            break;
+                        
+                        case '\u00FD': 
+                        // ý
+                        case '\u00FF':  // ÿ
+                            output[outputPos++] = 'y';
+                            break;
+                        
+                        case '\uFB00':  // ff
+                            output[outputPos++] = 'f';
+                            output[outputPos++] = 'f';
+                            break;
+                        
+                        case '\uFB01':  // �?
+                            output[outputPos++] = 'f';
+                            output[outputPos++] = 'i';
+                            break;
+                        
+                        case '\uFB02':  // fl
+                            output[outputPos++] = 'f';
+                            output[outputPos++] = 'l';
+                            break;
+                            // following 2 are commented as they can break the maxSizeNeeded (and doing *3 could be expensive)
+                            //        case '\uFB03': // ffi
+                            //            output[outputPos++] = 'f';
+                            //            output[outputPos++] = 'f';
+                            //            output[outputPos++] = 'i';
+                            //            break;
+                            //        case '\uFB04': // ffl
+                            //            output[outputPos++] = 'f';
+                            //            output[outputPos++] = 'f';
+                            //            output[outputPos++] = 'l';
+                            //            break;
+                        
+                        case '\uFB05':  // ſt
+                            output[outputPos++] = 'f';
+                            output[outputPos++] = 't';
+                            break;
+                        
+                        case '\uFB06':  // st
+                            output[outputPos++] = 's';
+                            output[outputPos++] = 't';
+                            break;
+                        
+                        default: 
+                            output[outputPos++] = c;
+                            break;
+                        
+                    }
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/KeywordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/KeywordAnalyzer.cs b/src/core/Analysis/KeywordAnalyzer.cs
index 116babb..9083816 100644
--- a/src/core/Analysis/KeywordAnalyzer.cs
+++ b/src/core/Analysis/KeywordAnalyzer.cs
@@ -17,38 +17,38 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> "Tokenizes" the entire stream as a single token. This is useful
-	/// for data like zip codes, ids, and some product names.
-	/// </summary>
-	public class KeywordAnalyzer:Analyzer
-	{
-		public KeywordAnalyzer()
-		{
+    
+    /// <summary> "Tokenizes" the entire stream as a single token. This is useful
+    /// for data like zip codes, ids, and some product names.
+    /// </summary>
+    public class KeywordAnalyzer:Analyzer
+    {
+        public KeywordAnalyzer()
+        {
             SetOverridesTokenStreamMethod<KeywordAnalyzer>();
-		}
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			return new KeywordTokenizer(reader);
-		}
-		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			if (overridesTokenStreamMethod)
-			{
-				// LUCENE-1678: force fallback to tokenStream() if we
-				// have been subclassed and that subclass overrides
-				// tokenStream but not reusableTokenStream
-				return TokenStream(fieldName, reader);
-			}
-			var tokenizer = (Tokenizer) PreviousTokenStream;
-			if (tokenizer == null)
-			{
-				tokenizer = new KeywordTokenizer(reader);
-				PreviousTokenStream = tokenizer;
-			}
-			else
-				tokenizer.Reset(reader);
-			return tokenizer;
-		}
-	}
+        }
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            return new KeywordTokenizer(reader);
+        }
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            if (overridesTokenStreamMethod)
+            {
+                // LUCENE-1678: force fallback to tokenStream() if we
+                // have been subclassed and that subclass overrides
+                // tokenStream but not reusableTokenStream
+                return TokenStream(fieldName, reader);
+            }
+            var tokenizer = (Tokenizer) PreviousTokenStream;
+            if (tokenizer == null)
+            {
+                tokenizer = new KeywordTokenizer(reader);
+                PreviousTokenStream = tokenizer;
+            }
+            else
+                tokenizer.Reset(reader);
+            return tokenizer;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/KeywordTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/KeywordTokenizer.cs b/src/core/Analysis/KeywordTokenizer.cs
index f97ff95..38f6f8a 100644
--- a/src/core/Analysis/KeywordTokenizer.cs
+++ b/src/core/Analysis/KeywordTokenizer.cs
@@ -20,80 +20,80 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Emits the entire input as a single token.</summary>
-	public sealed class KeywordTokenizer:Tokenizer
-	{
-		
-		private const int DEFAULT_BUFFER_SIZE = 256;
-		
-		private bool done;
-		private int finalOffset;
-		private ITermAttribute termAtt;
-		private IOffsetAttribute offsetAtt;
-		
-		public KeywordTokenizer(System.IO.TextReader input):this(input, DEFAULT_BUFFER_SIZE)
-		{
-		}
-		
-		public KeywordTokenizer(System.IO.TextReader input, int bufferSize):base(input)
-		{
-			Init(bufferSize);
-		}
-		
-		public KeywordTokenizer(AttributeSource source, System.IO.TextReader input, int bufferSize):base(source, input)
-		{
-			Init(bufferSize);
-		}
-		
-		public KeywordTokenizer(AttributeFactory factory, System.IO.TextReader input, int bufferSize):base(factory, input)
-		{
-			Init(bufferSize);
-		}
-		
-		private void  Init(int bufferSize)
-		{
-			this.done = false;
+    
+    /// <summary> Emits the entire input as a single token.</summary>
+    public sealed class KeywordTokenizer:Tokenizer
+    {
+        
+        private const int DEFAULT_BUFFER_SIZE = 256;
+        
+        private bool done;
+        private int finalOffset;
+        private ITermAttribute termAtt;
+        private IOffsetAttribute offsetAtt;
+        
+        public KeywordTokenizer(System.IO.TextReader input):this(input, DEFAULT_BUFFER_SIZE)
+        {
+        }
+        
+        public KeywordTokenizer(System.IO.TextReader input, int bufferSize):base(input)
+        {
+            Init(bufferSize);
+        }
+        
+        public KeywordTokenizer(AttributeSource source, System.IO.TextReader input, int bufferSize):base(source, input)
+        {
+            Init(bufferSize);
+        }
+        
+        public KeywordTokenizer(AttributeFactory factory, System.IO.TextReader input, int bufferSize):base(factory, input)
+        {
+            Init(bufferSize);
+        }
+        
+        private void  Init(int bufferSize)
+        {
+            this.done = false;
             termAtt = AddAttribute<ITermAttribute>();
             offsetAtt = AddAttribute<IOffsetAttribute>();
-			termAtt.ResizeTermBuffer(bufferSize);
-		}
-		
-		public override bool IncrementToken()
-		{
-			if (!done)
-			{
-				ClearAttributes();
-				done = true;
-				int upto = 0;
-				char[] buffer = termAtt.TermBuffer();
-				while (true)
-				{
-					int length = input.Read(buffer, upto, buffer.Length - upto);
-					if (length == 0)
-						break;
-					upto += length;
-					if (upto == buffer.Length)
-						buffer = termAtt.ResizeTermBuffer(1 + buffer.Length);
-				}
-				termAtt.SetTermLength(upto);
-				finalOffset = CorrectOffset(upto);
-				offsetAtt.SetOffset(CorrectOffset(0), finalOffset);
-				return true;
-			}
-			return false;
-		}
-		
-		public override void  End()
-		{
-			// set final offset 
-			offsetAtt.SetOffset(finalOffset, finalOffset);
-		}
-		
-		public override void  Reset(System.IO.TextReader input)
-		{
-			base.Reset(input);
-			this.done = false;
-		}
-	}
+            termAtt.ResizeTermBuffer(bufferSize);
+        }
+        
+        public override bool IncrementToken()
+        {
+            if (!done)
+            {
+                ClearAttributes();
+                done = true;
+                int upto = 0;
+                char[] buffer = termAtt.TermBuffer();
+                while (true)
+                {
+                    int length = input.Read(buffer, upto, buffer.Length - upto);
+                    if (length == 0)
+                        break;
+                    upto += length;
+                    if (upto == buffer.Length)
+                        buffer = termAtt.ResizeTermBuffer(1 + buffer.Length);
+                }
+                termAtt.SetTermLength(upto);
+                finalOffset = CorrectOffset(upto);
+                offsetAtt.SetOffset(CorrectOffset(0), finalOffset);
+                return true;
+            }
+            return false;
+        }
+        
+        public override void  End()
+        {
+            // set final offset 
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+        
+        public override void  Reset(System.IO.TextReader input)
+        {
+            base.Reset(input);
+            this.done = false;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/LengthFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/LengthFilter.cs b/src/core/Analysis/LengthFilter.cs
index c4f60ad..1a9899f 100644
--- a/src/core/Analysis/LengthFilter.cs
+++ b/src/core/Analysis/LengthFilter.cs
@@ -19,42 +19,42 @@ using Lucene.Net.Analysis.Tokenattributes;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>Removes words that are too long or too short from the stream.</summary>
-	public sealed class LengthFilter:TokenFilter
-	{
-		
-		internal int min;
-		internal int max;
-		
-		private readonly ITermAttribute termAtt;
-		
-		/// <summary> Build a filter that removes words that are too long or too
-		/// short from the text.
-		/// </summary>
-		public LengthFilter(TokenStream in_Renamed, int min, int max)
+    
+    /// <summary>Removes words that are too long or too short from the stream.</summary>
+    public sealed class LengthFilter:TokenFilter
+    {
+        
+        internal int min;
+        internal int max;
+        
+        private readonly ITermAttribute termAtt;
+        
+        /// <summary> Build a filter that removes words that are too long or too
+        /// short from the text.
+        /// </summary>
+        public LengthFilter(TokenStream in_Renamed, int min, int max)
             : base(in_Renamed)
-		{
-			this.min = min;
-			this.max = max;
+        {
+            this.min = min;
+            this.max = max;
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		/// <summary> Returns the next input Token whose term() is the right len</summary>
-		public override bool IncrementToken()
-		{
-			// return the first non-stop word found
-			while (input.IncrementToken())
-			{
-				var len = termAtt.TermLength();
-				if (len >= min && len <= max)
-				{
-					return true;
-				}
-				// note: else we ignore it but should we index each part of it?
-			}
-			// reached EOS -- return false
-			return false;
-		}
-	}
+        }
+        
+        /// <summary> Returns the next input Token whose term() is the right len</summary>
+        public override bool IncrementToken()
+        {
+            // return the first non-stop word found
+            while (input.IncrementToken())
+            {
+                var len = termAtt.TermLength();
+                if (len >= min && len <= max)
+                {
+                    return true;
+                }
+                // note: else we ignore it but should we index each part of it?
+            }
+            // reached EOS -- return false
+            return false;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/LetterTokenizer.cs b/src/core/Analysis/LetterTokenizer.cs
index 77629a8..ecd0cae 100644
--- a/src/core/Analysis/LetterTokenizer.cs
+++ b/src/core/Analysis/LetterTokenizer.cs
@@ -19,39 +19,39 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
-	/// to say, it defines tokens as maximal strings of adjacent letters, as defined
-	/// by java.lang.Character.isLetter() predicate.
-	/// Note: this does a decent job for most European languages, but does a terrible
-	/// job for some Asian languages, where words are not separated by spaces. 
-	/// </summary>
-	
-	public class LetterTokenizer:CharTokenizer
-	{
-		/// <summary>Construct a new LetterTokenizer. </summary>
-		public LetterTokenizer(System.IO.TextReader @in):base(@in)
-		{
-		}
-		
-		/// <summary>Construct a new LetterTokenizer using a given <see cref="AttributeSource" />. </summary>
-		public LetterTokenizer(AttributeSource source, System.IO.TextReader @in)
-			: base(source, @in)
-		{
-		}
-		
-		/// <summary>Construct a new LetterTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-		public LetterTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-			: base(factory, @in)
-		{
-		}
-		
-		/// <summary>Collects only characters which satisfy
-		/// <see cref="char.IsLetter(char)" />.
-		/// </summary>
-		protected internal override bool IsTokenChar(char c)
-		{
-			return System.Char.IsLetter(c);
-		}
-	}
+    
+    /// <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
+    /// to say, it defines tokens as maximal strings of adjacent letters, as defined
+    /// by java.lang.Character.isLetter() predicate.
+    /// Note: this does a decent job for most European languages, but does a terrible
+    /// job for some Asian languages, where words are not separated by spaces. 
+    /// </summary>
+    
+    public class LetterTokenizer:CharTokenizer
+    {
+        /// <summary>Construct a new LetterTokenizer. </summary>
+        public LetterTokenizer(System.IO.TextReader @in):base(@in)
+        {
+        }
+        
+        /// <summary>Construct a new LetterTokenizer using a given <see cref="AttributeSource" />. </summary>
+        public LetterTokenizer(AttributeSource source, System.IO.TextReader @in)
+            : base(source, @in)
+        {
+        }
+        
+        /// <summary>Construct a new LetterTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
+        public LetterTokenizer(AttributeFactory factory, System.IO.TextReader @in)
+            : base(factory, @in)
+        {
+        }
+        
+        /// <summary>Collects only characters which satisfy
+        /// <see cref="char.IsLetter(char)" />.
+        /// </summary>
+        protected internal override bool IsTokenChar(char c)
+        {
+            return System.Char.IsLetter(c);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/LowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/LowerCaseFilter.cs b/src/core/Analysis/LowerCaseFilter.cs
index cad0197..b6dcca6 100644
--- a/src/core/Analysis/LowerCaseFilter.cs
+++ b/src/core/Analysis/LowerCaseFilter.cs
@@ -19,31 +19,31 @@ using Lucene.Net.Analysis.Tokenattributes;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>Normalizes token text to lower case.</summary>
-	public sealed class LowerCaseFilter:TokenFilter
-	{
-		public LowerCaseFilter(TokenStream @in)
-			: base(@in)
-		{
+    
+    /// <summary>Normalizes token text to lower case.</summary>
+    public sealed class LowerCaseFilter:TokenFilter
+    {
+        public LowerCaseFilter(TokenStream @in)
+            : base(@in)
+        {
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		private readonly ITermAttribute termAtt;
-		
-		public override bool IncrementToken()
-		{
-			if (input.IncrementToken())
-			{
-				
-				char[] buffer = termAtt.TermBuffer();
-				int length = termAtt.TermLength();
-				for (int i = 0; i < length; i++)
-					buffer[i] = System.Char.ToLower(buffer[i]);
-				
-				return true;
-			}
-			return false;
-		}
-	}
+        }
+        
+        private readonly ITermAttribute termAtt;
+        
+        public override bool IncrementToken()
+        {
+            if (input.IncrementToken())
+            {
+                
+                char[] buffer = termAtt.TermBuffer();
+                int length = termAtt.TermLength();
+                for (int i = 0; i < length; i++)
+                    buffer[i] = System.Char.ToLower(buffer[i]);
+                
+                return true;
+            }
+            return false;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/LowerCaseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/LowerCaseTokenizer.cs b/src/core/Analysis/LowerCaseTokenizer.cs
index 4cea217..530b37c 100644
--- a/src/core/Analysis/LowerCaseTokenizer.cs
+++ b/src/core/Analysis/LowerCaseTokenizer.cs
@@ -19,42 +19,42 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> LowerCaseTokenizer performs the function of LetterTokenizer
-	/// and LowerCaseFilter together.  It divides text at non-letters and converts
-	/// them to lower case.  While it is functionally equivalent to the combination
-	/// of LetterTokenizer and LowerCaseFilter, there is a performance advantage
-	/// to doing the two tasks at once, hence this (redundant) implementation.
-	/// <p/>
-	/// Note: this does a decent job for most European languages, but does a terrible
-	/// job for some Asian languages, where words are not separated by spaces.
-	/// </summary>
-	public sealed class LowerCaseTokenizer:LetterTokenizer
-	{
-		/// <summary>Construct a new LowerCaseTokenizer. </summary>
-		public LowerCaseTokenizer(System.IO.TextReader @in)
-			: base(@in)
-		{
-		}
-		
-		/// <summary>Construct a new LowerCaseTokenizer using a given <see cref="AttributeSource" />. </summary>
-		public LowerCaseTokenizer(AttributeSource source, System.IO.TextReader @in)
-			: base(source, @in)
-		{
-		}
-		
-		/// <summary>Construct a new LowerCaseTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-		public LowerCaseTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-			: base(factory, @in)
-		{
-		}
-		
-		/// <summary>Converts char to lower case
-		/// <see cref="char.ToLower(char)" />.
-		/// </summary>
-		protected internal override char Normalize(char c)
-		{
-			return System.Char.ToLower(c);
-		}
-	}
+    
+    /// <summary> LowerCaseTokenizer performs the function of LetterTokenizer
+    /// and LowerCaseFilter together.  It divides text at non-letters and converts
+    /// them to lower case.  While it is functionally equivalent to the combination
+    /// of LetterTokenizer and LowerCaseFilter, there is a performance advantage
+    /// to doing the two tasks at once, hence this (redundant) implementation.
+    /// <p/>
+    /// Note: this does a decent job for most European languages, but does a terrible
+    /// job for some Asian languages, where words are not separated by spaces.
+    /// </summary>
+    public sealed class LowerCaseTokenizer:LetterTokenizer
+    {
+        /// <summary>Construct a new LowerCaseTokenizer. </summary>
+        public LowerCaseTokenizer(System.IO.TextReader @in)
+            : base(@in)
+        {
+        }
+        
+        /// <summary>Construct a new LowerCaseTokenizer using a given <see cref="AttributeSource" />. </summary>
+        public LowerCaseTokenizer(AttributeSource source, System.IO.TextReader @in)
+            : base(source, @in)
+        {
+        }
+        
+        /// <summary>Construct a new LowerCaseTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
+        public LowerCaseTokenizer(AttributeFactory factory, System.IO.TextReader @in)
+            : base(factory, @in)
+        {
+        }
+        
+        /// <summary>Converts char to lower case
+        /// <see cref="char.ToLower(char)" />.
+        /// </summary>
+        protected internal override char Normalize(char c)
+        {
+            return System.Char.ToLower(c);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/MappingCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/MappingCharFilter.cs b/src/core/Analysis/MappingCharFilter.cs
index 9705719..9dd1c6d 100644
--- a/src/core/Analysis/MappingCharFilter.cs
+++ b/src/core/Analysis/MappingCharFilter.cs
@@ -19,148 +19,148 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Simplistic <see cref="CharFilter" /> that applies the mappings
-	/// contained in a <see cref="NormalizeCharMap" /> to the character
-	/// stream, and correcting the resulting changes to the
-	/// offsets.
-	/// </summary>
-	public class MappingCharFilter : BaseCharFilter
-	{
-		private readonly NormalizeCharMap normMap;
-		private LinkedList<char> buffer;
-		private System.String replacement;
-		private int charPointer;
-		private int nextCharCounter;
-		
-		/// Default constructor that takes a <see cref="CharStream" />.
-		public MappingCharFilter(NormalizeCharMap normMap, CharStream @in)
-			: base(@in)
-		{
-			this.normMap = normMap;
-		}
-		
-		/// Easy-use constructor that takes a <see cref="System.IO.TextReader" />.
-		public MappingCharFilter(NormalizeCharMap normMap, System.IO.TextReader @in)
-			: base(CharReader.Get(@in))
-		{
-			this.normMap = normMap;
-		}
-		
-		public  override int Read()
-		{
-			while (true)
-			{
-				if (replacement != null && charPointer < replacement.Length)
-				{
-					return replacement[charPointer++];
-				}
-				
-				int firstChar = NextChar();
-				if (firstChar == - 1)
-					return - 1;
-			    NormalizeCharMap nm = normMap.submap != null
-			                              ? normMap.submap[(char) firstChar]
-			                              : null;
-				if (nm == null)
-					return firstChar;
-				NormalizeCharMap result = Match(nm);
-				if (result == null)
-					return firstChar;
-				replacement = result.normStr;
-				charPointer = 0;
-				if (result.diff != 0)
-				{
-					int prevCumulativeDiff = LastCumulativeDiff;
-					if (result.diff < 0)
-					{
-						for (int i = 0; i < - result.diff; i++)
-							AddOffCorrectMap(nextCharCounter + i - prevCumulativeDiff, prevCumulativeDiff - 1 - i);
-					}
-					else
-					{
-						AddOffCorrectMap(nextCharCounter - result.diff - prevCumulativeDiff, prevCumulativeDiff + result.diff);
-					}
-				}
-			}
-		}
-		
-		private int NextChar()
-		{
-			nextCharCounter++;
-			if (buffer != null && buffer.Count != 0)
-			{
-				char tempObject = buffer.First.Value;
-				buffer.RemoveFirst();
-				return (tempObject);
-			}
-			return input.Read();
-		}
-		
-		private void  PushChar(int c)
-		{
-			nextCharCounter--;
-			if (buffer == null)
-			{
-				buffer = new LinkedList<char>();
-			}
-			buffer.AddFirst((char)c);
-		}
-		
-		private void  PushLastChar(int c)
-		{
-			if (buffer == null)
-			{
+    
+    /// <summary> Simplistic <see cref="CharFilter" /> that applies the mappings
+    /// contained in a <see cref="NormalizeCharMap" /> to the character
+    /// stream, and correcting the resulting changes to the
+    /// offsets.
+    /// </summary>
+    public class MappingCharFilter : BaseCharFilter
+    {
+        private readonly NormalizeCharMap normMap;
+        private LinkedList<char> buffer;
+        private System.String replacement;
+        private int charPointer;
+        private int nextCharCounter;
+        
+        /// Default constructor that takes a <see cref="CharStream" />.
+        public MappingCharFilter(NormalizeCharMap normMap, CharStream @in)
+            : base(@in)
+        {
+            this.normMap = normMap;
+        }
+        
+        /// Easy-use constructor that takes a <see cref="System.IO.TextReader" />.
+        public MappingCharFilter(NormalizeCharMap normMap, System.IO.TextReader @in)
+            : base(CharReader.Get(@in))
+        {
+            this.normMap = normMap;
+        }
+        
+        public  override int Read()
+        {
+            while (true)
+            {
+                if (replacement != null && charPointer < replacement.Length)
+                {
+                    return replacement[charPointer++];
+                }
+                
+                int firstChar = NextChar();
+                if (firstChar == - 1)
+                    return - 1;
+                NormalizeCharMap nm = normMap.submap != null
+                                          ? normMap.submap[(char) firstChar]
+                                          : null;
+                if (nm == null)
+                    return firstChar;
+                NormalizeCharMap result = Match(nm);
+                if (result == null)
+                    return firstChar;
+                replacement = result.normStr;
+                charPointer = 0;
+                if (result.diff != 0)
+                {
+                    int prevCumulativeDiff = LastCumulativeDiff;
+                    if (result.diff < 0)
+                    {
+                        for (int i = 0; i < - result.diff; i++)
+                            AddOffCorrectMap(nextCharCounter + i - prevCumulativeDiff, prevCumulativeDiff - 1 - i);
+                    }
+                    else
+                    {
+                        AddOffCorrectMap(nextCharCounter - result.diff - prevCumulativeDiff, prevCumulativeDiff + result.diff);
+                    }
+                }
+            }
+        }
+        
+        private int NextChar()
+        {
+            nextCharCounter++;
+            if (buffer != null && buffer.Count != 0)
+            {
+                char tempObject = buffer.First.Value;
+                buffer.RemoveFirst();
+                return (tempObject);
+            }
+            return input.Read();
+        }
+        
+        private void  PushChar(int c)
+        {
+            nextCharCounter--;
+            if (buffer == null)
+            {
                 buffer = new LinkedList<char>();
-			}
-			buffer.AddLast((char)c);
-		}
-		
-		private NormalizeCharMap Match(NormalizeCharMap map)
-		{
-			NormalizeCharMap result = null;
-			if (map.submap != null)
-			{
-				int chr = NextChar();
-				if (chr != - 1)
-				{
-					NormalizeCharMap subMap = map.submap[(char)chr];
-					if (subMap != null)
-					{
-						result = Match(subMap);
-					}
-					if (result == null)
-					{
-						PushChar(chr);
-					}
-				}
-			}
-			if (result == null && map.normStr != null)
-			{
-				result = map;
-			}
-			return result;
-		}
-		
-		public  override int Read(System.Char[] cbuf, int off, int len)
-		{
-			var tmp = new char[len];
-			int l = input.Read(tmp, 0, len);
-			if (l != 0)
-			{
-				for (int i = 0; i < l; i++)
-					PushLastChar(tmp[i]);
-			}
-			l = 0;
-			for (int i = off; i < off + len; i++)
-			{
-				int c = Read();
-				if (c == - 1)
-					break;
-				cbuf[i] = (char) c;
-				l++;
-			}
-			return l == 0?- 1:l;
-		}
-	}
+            }
+            buffer.AddFirst((char)c);
+        }
+        
+        private void  PushLastChar(int c)
+        {
+            if (buffer == null)
+            {
+                buffer = new LinkedList<char>();
+            }
+            buffer.AddLast((char)c);
+        }
+        
+        private NormalizeCharMap Match(NormalizeCharMap map)
+        {
+            NormalizeCharMap result = null;
+            if (map.submap != null)
+            {
+                int chr = NextChar();
+                if (chr != - 1)
+                {
+                    NormalizeCharMap subMap = map.submap[(char)chr];
+                    if (subMap != null)
+                    {
+                        result = Match(subMap);
+                    }
+                    if (result == null)
+                    {
+                        PushChar(chr);
+                    }
+                }
+            }
+            if (result == null && map.normStr != null)
+            {
+                result = map;
+            }
+            return result;
+        }
+        
+        public  override int Read(System.Char[] cbuf, int off, int len)
+        {
+            var tmp = new char[len];
+            int l = input.Read(tmp, 0, len);
+            if (l != 0)
+            {
+                for (int i = 0; i < l; i++)
+                    PushLastChar(tmp[i]);
+            }
+            l = 0;
+            for (int i = off; i < off + len; i++)
+            {
+                int c = Read();
+                if (c == - 1)
+                    break;
+                cbuf[i] = (char) c;
+                l++;
+            }
+            return l == 0?- 1:l;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/NormalizeCharMap.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/NormalizeCharMap.cs b/src/core/Analysis/NormalizeCharMap.cs
index 7fd520c..5d6d558 100644
--- a/src/core/Analysis/NormalizeCharMap.cs
+++ b/src/core/Analysis/NormalizeCharMap.cs
@@ -19,50 +19,50 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Holds a map of String input to String output, to be used
-	/// with <see cref="MappingCharFilter" />.
-	/// </summary>
-	public class NormalizeCharMap
-	{
-		internal System.Collections.Generic.IDictionary<char, NormalizeCharMap> submap;
-		internal System.String normStr;
-		internal int diff;
-		
-		/// <summary>Records a replacement to be applied to the inputs
-		/// stream.  Whenever <c>singleMatch</c> occurs in
-		/// the input, it will be replaced with
-		/// <c>replacement</c>.
-		/// 
-		/// </summary>
-		/// <param name="singleMatch">input String to be replaced
-		/// </param>
-		/// <param name="replacement">output String
-		/// </param>
-		public virtual void  Add(System.String singleMatch, System.String replacement)
-		{
-			NormalizeCharMap currMap = this;
-			for (var i = 0; i < singleMatch.Length; i++)
-			{
-				char c = singleMatch[i];
-				if (currMap.submap == null)
-				{
-					currMap.submap = new HashMap<char, NormalizeCharMap>(1);
-				}
-				var map = currMap.submap[c];
-				if (map == null)
-				{
-					map = new NormalizeCharMap();
-					currMap.submap[c] = map;
-				}
-				currMap = map;
-			}
-			if (currMap.normStr != null)
-			{
-				throw new System.SystemException("MappingCharFilter: there is already a mapping for " + singleMatch);
-			}
-			currMap.normStr = replacement;
-			currMap.diff = singleMatch.Length - replacement.Length;
-		}
-	}
+    
+    /// <summary> Holds a map of String input to String output, to be used
+    /// with <see cref="MappingCharFilter" />.
+    /// </summary>
+    public class NormalizeCharMap
+    {
+        internal System.Collections.Generic.IDictionary<char, NormalizeCharMap> submap;
+        internal System.String normStr;
+        internal int diff;
+        
+        /// <summary>Records a replacement to be applied to the inputs
+        /// stream.  Whenever <c>singleMatch</c> occurs in
+        /// the input, it will be replaced with
+        /// <c>replacement</c>.
+        /// 
+        /// </summary>
+        /// <param name="singleMatch">input String to be replaced
+        /// </param>
+        /// <param name="replacement">output String
+        /// </param>
+        public virtual void  Add(System.String singleMatch, System.String replacement)
+        {
+            NormalizeCharMap currMap = this;
+            for (var i = 0; i < singleMatch.Length; i++)
+            {
+                char c = singleMatch[i];
+                if (currMap.submap == null)
+                {
+                    currMap.submap = new HashMap<char, NormalizeCharMap>(1);
+                }
+                var map = currMap.submap[c];
+                if (map == null)
+                {
+                    map = new NormalizeCharMap();
+                    currMap.submap[c] = map;
+                }
+                currMap = map;
+            }
+            if (currMap.normStr != null)
+            {
+                throw new System.SystemException("MappingCharFilter: there is already a mapping for " + singleMatch);
+            }
+            currMap.normStr = replacement;
+            currMap.diff = singleMatch.Length - replacement.Length;
+        }
+    }
 }
\ No newline at end of file


[11/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexFileDeleter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexFileDeleter.cs b/src/core/Index/IndexFileDeleter.cs
index 3ac815d..c6afe73 100644
--- a/src/core/Index/IndexFileDeleter.cs
+++ b/src/core/Index/IndexFileDeleter.cs
@@ -59,111 +59,111 @@ namespace Lucene.Net.Index
     /// instantiating this class.  It opens segments_N file(s)
     /// directly with no retry logic.</para>
     /// </summary>
-	
-	public sealed class IndexFileDeleter : IDisposable
-	{
-		
-		//// Files that we tried to delete but failed (likely
-		/// because they are open and we are running on Windows),
-		/// so we will retry them again later: ////
-		private IList<string> deletable;
-		
-		//// Reference count for all files in the index.  
-		/// Counts how many existing commits reference a file.
-		/// Maps String to RefCount (class below) instances: ////
-		private IDictionary<string, RefCount> refCounts = new HashMap<string, RefCount>();
-		
-		//// Holds all commits (segments_N) currently in the index.
-		/// This will have just 1 commit if you are using the
-		/// default delete policy (KeepOnlyLastCommitDeletionPolicy).
-		/// Other policies may leave commit points live for longer
-		/// in which case this list would be longer than 1: ////
+    
+    public sealed class IndexFileDeleter : IDisposable
+    {
+        
+        //// Files that we tried to delete but failed (likely
+        /// because they are open and we are running on Windows),
+        /// so we will retry them again later: ////
+        private IList<string> deletable;
+        
+        //// Reference count for all files in the index.  
+        /// Counts how many existing commits reference a file.
+        /// Maps String to RefCount (class below) instances: ////
+        private IDictionary<string, RefCount> refCounts = new HashMap<string, RefCount>();
+        
+        //// Holds all commits (segments_N) currently in the index.
+        /// This will have just 1 commit if you are using the
+        /// default delete policy (KeepOnlyLastCommitDeletionPolicy).
+        /// Other policies may leave commit points live for longer
+        /// in which case this list would be longer than 1: ////
         private List<CommitPoint> commits = new List<CommitPoint>();
-		
-		//// Holds files we had incref'd from the previous
-		/// non-commit checkpoint: ////
+        
+        //// Holds files we had incref'd from the previous
+        /// non-commit checkpoint: ////
         private List<ICollection<string>> lastFiles = new List<ICollection<string>>();
-		
-		//// Commits that the IndexDeletionPolicy have decided to delete: ////
+        
+        //// Commits that the IndexDeletionPolicy have decided to delete: ////
         private List<CommitPoint> commitsToDelete = new List<CommitPoint>();
-		
-		private System.IO.StreamWriter infoStream;
-		private Directory directory;
-		private IndexDeletionPolicy policy;
-		private DocumentsWriter docWriter;
-		
-		internal bool startingCommitDeleted;
+        
+        private System.IO.StreamWriter infoStream;
+        private Directory directory;
+        private IndexDeletionPolicy policy;
+        private DocumentsWriter docWriter;
+        
+        internal bool startingCommitDeleted;
         private SegmentInfos lastSegmentInfos;
 
         private HashSet<string> synced;
-		
-		/// <summary>Change to true to see details of reference counts when
-		/// infoStream != null 
-		/// </summary>
-		public static bool VERBOSE_REF_COUNTS = false;
-		
-		internal void  SetInfoStream(System.IO.StreamWriter infoStream)
-		{
-			this.infoStream = infoStream;
-			if (infoStream != null)
-			{
-				Message("setInfoStream deletionPolicy=" + policy);
-			}
-		}
-		
-		private void  Message(System.String message)
-		{
+        
+        /// <summary>Change to true to see details of reference counts when
+        /// infoStream != null 
+        /// </summary>
+        public static bool VERBOSE_REF_COUNTS = false;
+        
+        internal void  SetInfoStream(System.IO.StreamWriter infoStream)
+        {
+            this.infoStream = infoStream;
+            if (infoStream != null)
+            {
+                Message("setInfoStream deletionPolicy=" + policy);
+            }
+        }
+        
+        private void  Message(System.String message)
+        {
             infoStream.WriteLine("IFD [" + new DateTime().ToString() + "; " + ThreadClass.Current().Name + "]: " + message);
-		}
-		
-		/// <summary> Initialize the deleter: find all previous commits in
-		/// the Directory, incref the files they reference, call
-		/// the policy to let it delete commits.  This will remove
-		/// any files not referenced by any of the commits.
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
+        }
+        
+        /// <summary> Initialize the deleter: find all previous commits in
+        /// the Directory, incref the files they reference, call
+        /// the policy to let it delete commits.  This will remove
+        /// any files not referenced by any of the commits.
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
         public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, System.IO.StreamWriter infoStream, DocumentsWriter docWriter, HashSet<string> synced)
-		{
-			
-			this.docWriter = docWriter;
-			this.infoStream = infoStream;
+        {
+            
+            this.docWriter = docWriter;
+            this.infoStream = infoStream;
             this.synced = synced;
-			
-			if (infoStream != null)
-			{
-				Message("init: current segments file is \"" + segmentInfos.GetCurrentSegmentFileName() + "\"; deletionPolicy=" + policy);
-			}
-			
-			this.policy = policy;
-			this.directory = directory;
-			
-			// First pass: walk the files and initialize our ref
-			// counts:
-			long currentGen = segmentInfos.Generation;
-			IndexFileNameFilter filter = IndexFileNameFilter.Filter;
-			
-			System.String[] files = directory.ListAll();
-			
-			CommitPoint currentCommitPoint = null;
-			
-			for (int i = 0; i < files.Length; i++)
-			{
-				
-				System.String fileName = files[i];
-				
-				if (filter.Accept(null, fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
-				{
-					
-					// Add this file to refCounts with initial count 0:
-					GetRefCount(fileName);
-					
-					if (fileName.StartsWith(IndexFileNames.SEGMENTS))
-					{
-						
-						// This is a commit (segments or segments_N), and
-						// it's valid (<= the max gen).  Load it, then
-						// incref all files it refers to:
+            
+            if (infoStream != null)
+            {
+                Message("init: current segments file is \"" + segmentInfos.GetCurrentSegmentFileName() + "\"; deletionPolicy=" + policy);
+            }
+            
+            this.policy = policy;
+            this.directory = directory;
+            
+            // First pass: walk the files and initialize our ref
+            // counts:
+            long currentGen = segmentInfos.Generation;
+            IndexFileNameFilter filter = IndexFileNameFilter.Filter;
+            
+            System.String[] files = directory.ListAll();
+            
+            CommitPoint currentCommitPoint = null;
+            
+            for (int i = 0; i < files.Length; i++)
+            {
+                
+                System.String fileName = files[i];
+                
+                if (filter.Accept(null, fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
+                {
+                    
+                    // Add this file to refCounts with initial count 0:
+                    GetRefCount(fileName);
+                    
+                    if (fileName.StartsWith(IndexFileNames.SEGMENTS))
+                    {
+                        
+                        // This is a commit (segments or segments_N), and
+                        // it's valid (<= the max gen).  Load it, then
+                        // incref all files it refers to:
                         if (infoStream != null)
                         {
                             Message("init: load commit \"" + fileName + "\"");
@@ -216,68 +216,68 @@ namespace Lucene.Net.Index
                             {
                                 lastSegmentInfos = sis;
                             }
-						}
-					}
-				}
-			}
-			
-			if (currentCommitPoint == null)
-			{
-				// We did not in fact see the segments_N file
-				// corresponding to the segmentInfos that was passed
-				// in.  Yet, it must exist, because our caller holds
-				// the write lock.  This can happen when the directory
-				// listing was stale (eg when index accessed via NFS
-				// client with stale directory listing cache).  So we
-				// try now to explicitly open this commit point:
-				SegmentInfos sis = new SegmentInfos();
-				try
-				{
-					sis.Read(directory, segmentInfos.GetCurrentSegmentFileName());
-				}
-				catch (System.IO.IOException)
-				{
-					throw new CorruptIndexException("failed to locate current segments_N file");
-				}
-				if (infoStream != null)
-					Message("forced open of current segments file " + segmentInfos.GetCurrentSegmentFileName());
-				currentCommitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
-				commits.Add(currentCommitPoint);
-				IncRef(sis, true);
-			}
-			
-			// We keep commits list in sorted order (oldest to newest):
-			commits.Sort();
-			
-			// Now delete anything with ref count at 0.  These are
-			// presumably abandoned files eg due to crash of
-			// IndexWriter.
-			foreach(KeyValuePair<string, RefCount> entry in refCounts)
-			{
+                        }
+                    }
+                }
+            }
+            
+            if (currentCommitPoint == null)
+            {
+                // We did not in fact see the segments_N file
+                // corresponding to the segmentInfos that was passed
+                // in.  Yet, it must exist, because our caller holds
+                // the write lock.  This can happen when the directory
+                // listing was stale (eg when index accessed via NFS
+                // client with stale directory listing cache).  So we
+                // try now to explicitly open this commit point:
+                SegmentInfos sis = new SegmentInfos();
+                try
+                {
+                    sis.Read(directory, segmentInfos.GetCurrentSegmentFileName());
+                }
+                catch (System.IO.IOException)
+                {
+                    throw new CorruptIndexException("failed to locate current segments_N file");
+                }
+                if (infoStream != null)
+                    Message("forced open of current segments file " + segmentInfos.GetCurrentSegmentFileName());
+                currentCommitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
+                commits.Add(currentCommitPoint);
+                IncRef(sis, true);
+            }
+            
+            // We keep commits list in sorted order (oldest to newest):
+            commits.Sort();
+            
+            // Now delete anything with ref count at 0.  These are
+            // presumably abandoned files eg due to crash of
+            // IndexWriter.
+            foreach(KeyValuePair<string, RefCount> entry in refCounts)
+            {
                 string fileName = entry.Key;
-				RefCount rc = refCounts[fileName];
-				if (0 == rc.count)
-				{
-					if (infoStream != null)
-					{
-						Message("init: removing unreferenced file \"" + fileName + "\"");
-					}
-					DeleteFile(fileName);
-				}
-			}
-			
-			// Finally, give policy a chance to remove things on
-			// startup:
-			policy.OnInit(commits);
-			
-			// Always protect the incoming segmentInfos since
-			// sometime it may not be the most recent commit
-			Checkpoint(segmentInfos, false);
-			
-			startingCommitDeleted = currentCommitPoint.IsDeleted;
-			
-			DeleteCommits();
-		}
+                RefCount rc = refCounts[fileName];
+                if (0 == rc.count)
+                {
+                    if (infoStream != null)
+                    {
+                        Message("init: removing unreferenced file \"" + fileName + "\"");
+                    }
+                    DeleteFile(fileName);
+                }
+            }
+            
+            // Finally, give policy a chance to remove things on
+            // startup:
+            policy.OnInit(commits);
+            
+            // Always protect the incoming segmentInfos since
+            // sometime it may not be the most recent commit
+            Checkpoint(segmentInfos, false);
+            
+            startingCommitDeleted = currentCommitPoint.IsDeleted;
+            
+            DeleteCommits();
+        }
 
         public SegmentInfos LastSegmentInfos
         {
@@ -285,264 +285,264 @@ namespace Lucene.Net.Index
         }
 
         /// <summary> Remove the CommitPoints in the commitsToDelete List by
-		/// DecRef'ing all files from each SegmentInfos.
-		/// </summary>
-		private void  DeleteCommits()
-		{
-			
-			int size = commitsToDelete.Count;
-			
-			if (size > 0)
-			{
-				
-				// First decref all files that had been referred to by
-				// the now-deleted commits:
-				for (int i = 0; i < size; i++)
-				{
-					CommitPoint commit = commitsToDelete[i];
-					if (infoStream != null)
-					{
-						Message("deleteCommits: now decRef commit \"" + commit.SegmentsFileName + "\"");
-					}
-					foreach(string file in commit.files)
-					{
-						DecRef(file);
-					}
-				}
-				commitsToDelete.Clear();
-				
-				// Now compact commits to remove deleted ones (preserving the sort):
-				size = commits.Count;
-				int readFrom = 0;
-				int writeTo = 0;
-				while (readFrom < size)
-				{
-					CommitPoint commit = commits[readFrom];
-					if (!commit.deleted)
-					{
-						if (writeTo != readFrom)
-						{
-							commits[writeTo] = commits[readFrom];
-						}
-						writeTo++;
-					}
-					readFrom++;
-				}
-				
-				while (size > writeTo)
-				{
-					commits.RemoveAt(size - 1);
-					size--;
-				}
-			}
-		}
-		
-		/// <summary> Writer calls this when it has hit an error and had to
-		/// roll back, to tell us that there may now be
-		/// unreferenced files in the filesystem.  So we re-list
-		/// the filesystem and delete such files.  If segmentName
-		/// is non-null, we will only delete files corresponding to
-		/// that segment.
-		/// </summary>
-		public void  Refresh(System.String segmentName)
-		{
-			System.String[] files = directory.ListAll();
-			IndexFileNameFilter filter = IndexFileNameFilter.Filter;
-			System.String segmentPrefix1;
-			System.String segmentPrefix2;
-			if (segmentName != null)
-			{
-				segmentPrefix1 = segmentName + ".";
-				segmentPrefix2 = segmentName + "_";
-			}
-			else
-			{
-				segmentPrefix1 = null;
-				segmentPrefix2 = null;
-			}
-			
-			for (int i = 0; i < files.Length; i++)
-			{
-				System.String fileName = files[i];
-				if (filter.Accept(null, fileName) && (segmentName == null || fileName.StartsWith(segmentPrefix1) || fileName.StartsWith(segmentPrefix2)) && !refCounts.ContainsKey(fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
-				{
-					// Unreferenced file, so remove it
-					if (infoStream != null)
-					{
-						Message("refresh [prefix=" + segmentName + "]: removing newly created unreferenced file \"" + fileName + "\"");
-					}
-					DeleteFile(fileName);
-				}
-			}
-		}
-		
-		public void  Refresh()
-		{
-			Refresh(null);
-		}
-		
-		public void Dispose()
-		{
+        /// DecRef'ing all files from each SegmentInfos.
+        /// </summary>
+        private void  DeleteCommits()
+        {
+            
+            int size = commitsToDelete.Count;
+            
+            if (size > 0)
+            {
+                
+                // First decref all files that had been referred to by
+                // the now-deleted commits:
+                for (int i = 0; i < size; i++)
+                {
+                    CommitPoint commit = commitsToDelete[i];
+                    if (infoStream != null)
+                    {
+                        Message("deleteCommits: now decRef commit \"" + commit.SegmentsFileName + "\"");
+                    }
+                    foreach(string file in commit.files)
+                    {
+                        DecRef(file);
+                    }
+                }
+                commitsToDelete.Clear();
+                
+                // Now compact commits to remove deleted ones (preserving the sort):
+                size = commits.Count;
+                int readFrom = 0;
+                int writeTo = 0;
+                while (readFrom < size)
+                {
+                    CommitPoint commit = commits[readFrom];
+                    if (!commit.deleted)
+                    {
+                        if (writeTo != readFrom)
+                        {
+                            commits[writeTo] = commits[readFrom];
+                        }
+                        writeTo++;
+                    }
+                    readFrom++;
+                }
+                
+                while (size > writeTo)
+                {
+                    commits.RemoveAt(size - 1);
+                    size--;
+                }
+            }
+        }
+        
+        /// <summary> Writer calls this when it has hit an error and had to
+        /// roll back, to tell us that there may now be
+        /// unreferenced files in the filesystem.  So we re-list
+        /// the filesystem and delete such files.  If segmentName
+        /// is non-null, we will only delete files corresponding to
+        /// that segment.
+        /// </summary>
+        public void  Refresh(System.String segmentName)
+        {
+            System.String[] files = directory.ListAll();
+            IndexFileNameFilter filter = IndexFileNameFilter.Filter;
+            System.String segmentPrefix1;
+            System.String segmentPrefix2;
+            if (segmentName != null)
+            {
+                segmentPrefix1 = segmentName + ".";
+                segmentPrefix2 = segmentName + "_";
+            }
+            else
+            {
+                segmentPrefix1 = null;
+                segmentPrefix2 = null;
+            }
+            
+            for (int i = 0; i < files.Length; i++)
+            {
+                System.String fileName = files[i];
+                if (filter.Accept(null, fileName) && (segmentName == null || fileName.StartsWith(segmentPrefix1) || fileName.StartsWith(segmentPrefix2)) && !refCounts.ContainsKey(fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
+                {
+                    // Unreferenced file, so remove it
+                    if (infoStream != null)
+                    {
+                        Message("refresh [prefix=" + segmentName + "]: removing newly created unreferenced file \"" + fileName + "\"");
+                    }
+                    DeleteFile(fileName);
+                }
+            }
+        }
+        
+        public void  Refresh()
+        {
+            Refresh(null);
+        }
+        
+        public void Dispose()
+        {
             // Move to protected method if class becomes unsealed
-			// DecRef old files from the last checkpoint, if any:
-			int size = lastFiles.Count;
-			if (size > 0)
-			{
-				for (int i = 0; i < size; i++)
-					DecRef(lastFiles[i]);
-				lastFiles.Clear();
-			}
-			
-			DeletePendingFiles();
-		}
-		
-		private void  DeletePendingFiles()
-		{
-			if (deletable != null)
-			{
-				IList<string> oldDeletable = deletable;
-				deletable = null;
-				int size = oldDeletable.Count;
-				for (int i = 0; i < size; i++)
-				{
-					if (infoStream != null)
-					{
-						Message("delete pending file " + oldDeletable[i]);
-					}
-					DeleteFile(oldDeletable[i]);
-				}
-			}
-		}
-		
-		/// <summary> For definition of "check point" see IndexWriter comments:
-		/// "Clarification: Check Points (and commits)".
-		/// 
-		/// Writer calls this when it has made a "consistent
-		/// change" to the index, meaning new files are written to
-		/// the index and the in-memory SegmentInfos have been
-		/// modified to point to those files.
-		/// 
-		/// This may or may not be a commit (segments_N may or may
-		/// not have been written).
-		/// 
-		/// We simply incref the files referenced by the new
-		/// SegmentInfos and decref the files we had previously
-		/// seen (if any).
-		/// 
-		/// If this is a commit, we also call the policy to give it
-		/// a chance to remove other commits.  If any commits are
-		/// removed, we decref their files as well.
-		/// </summary>
-		public void  Checkpoint(SegmentInfos segmentInfos, bool isCommit)
-		{
-			
-			if (infoStream != null)
-			{
-				Message("now checkpoint \"" + segmentInfos.GetCurrentSegmentFileName() + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]");
-			}
-			
-			// Try again now to delete any previously un-deletable
-			// files (because they were in use, on Windows):
-			DeletePendingFiles();
-			
-			// Incref the files:
-			IncRef(segmentInfos, isCommit);
-			
-			if (isCommit)
-			{
-				// Append to our commits list:
-				commits.Add(new CommitPoint(this, commitsToDelete, directory, segmentInfos));
-				
-				// Tell policy so it can remove commits:
-				policy.OnCommit(commits);
-				
-				// Decref files for commits that were deleted by the policy:
-				DeleteCommits();
-			}
-			else
-			{
-				
-				IList<string> docWriterFiles;
-				if (docWriter != null)
-				{
-					docWriterFiles = docWriter.OpenFiles();
-					if (docWriterFiles != null)
-					// We must incRef these files before decRef'ing
-					// last files to make sure we don't accidentally
-					// delete them:
-						IncRef(docWriterFiles);
-				}
-				else
-					docWriterFiles = null;
-				
-				// DecRef old files from the last checkpoint, if any:
-				int size = lastFiles.Count;
-				if (size > 0)
-				{
-					for (int i = 0; i < size; i++)
-						DecRef(lastFiles[i]);
-					lastFiles.Clear();
-				}
-				
-				// Save files so we can decr on next checkpoint/commit:
+            // DecRef old files from the last checkpoint, if any:
+            int size = lastFiles.Count;
+            if (size > 0)
+            {
+                for (int i = 0; i < size; i++)
+                    DecRef(lastFiles[i]);
+                lastFiles.Clear();
+            }
+            
+            DeletePendingFiles();
+        }
+        
+        private void  DeletePendingFiles()
+        {
+            if (deletable != null)
+            {
+                IList<string> oldDeletable = deletable;
+                deletable = null;
+                int size = oldDeletable.Count;
+                for (int i = 0; i < size; i++)
+                {
+                    if (infoStream != null)
+                    {
+                        Message("delete pending file " + oldDeletable[i]);
+                    }
+                    DeleteFile(oldDeletable[i]);
+                }
+            }
+        }
+        
+        /// <summary> For definition of "check point" see IndexWriter comments:
+        /// "Clarification: Check Points (and commits)".
+        /// 
+        /// Writer calls this when it has made a "consistent
+        /// change" to the index, meaning new files are written to
+        /// the index and the in-memory SegmentInfos have been
+        /// modified to point to those files.
+        /// 
+        /// This may or may not be a commit (segments_N may or may
+        /// not have been written).
+        /// 
+        /// We simply incref the files referenced by the new
+        /// SegmentInfos and decref the files we had previously
+        /// seen (if any).
+        /// 
+        /// If this is a commit, we also call the policy to give it
+        /// a chance to remove other commits.  If any commits are
+        /// removed, we decref their files as well.
+        /// </summary>
+        public void  Checkpoint(SegmentInfos segmentInfos, bool isCommit)
+        {
+            
+            if (infoStream != null)
+            {
+                Message("now checkpoint \"" + segmentInfos.GetCurrentSegmentFileName() + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]");
+            }
+            
+            // Try again now to delete any previously un-deletable
+            // files (because they were in use, on Windows):
+            DeletePendingFiles();
+            
+            // Incref the files:
+            IncRef(segmentInfos, isCommit);
+            
+            if (isCommit)
+            {
+                // Append to our commits list:
+                commits.Add(new CommitPoint(this, commitsToDelete, directory, segmentInfos));
+                
+                // Tell policy so it can remove commits:
+                policy.OnCommit(commits);
+                
+                // Decref files for commits that were deleted by the policy:
+                DeleteCommits();
+            }
+            else
+            {
+                
+                IList<string> docWriterFiles;
+                if (docWriter != null)
+                {
+                    docWriterFiles = docWriter.OpenFiles();
+                    if (docWriterFiles != null)
+                    // We must incRef these files before decRef'ing
+                    // last files to make sure we don't accidentally
+                    // delete them:
+                        IncRef(docWriterFiles);
+                }
+                else
+                    docWriterFiles = null;
+                
+                // DecRef old files from the last checkpoint, if any:
+                int size = lastFiles.Count;
+                if (size > 0)
+                {
+                    for (int i = 0; i < size; i++)
+                        DecRef(lastFiles[i]);
+                    lastFiles.Clear();
+                }
+                
+                // Save files so we can decr on next checkpoint/commit:
                 lastFiles.Add(segmentInfos.Files(directory, false));
-				
+                
                 if (docWriterFiles != null)
                 {
                     lastFiles.Add(docWriterFiles);
                 }
-			}
-		}
-		
-		internal void  IncRef(SegmentInfos segmentInfos, bool isCommit)
-		{
-			// If this is a commit point, also incRef the
-			// segments_N file:
-			foreach(string fileName in segmentInfos.Files(directory, isCommit))
-			{
-				IncRef(fileName);
-			}
-		}
+            }
+        }
+        
+        internal void  IncRef(SegmentInfos segmentInfos, bool isCommit)
+        {
+            // If this is a commit point, also incRef the
+            // segments_N file:
+            foreach(string fileName in segmentInfos.Files(directory, isCommit))
+            {
+                IncRef(fileName);
+            }
+        }
 
         internal void IncRef(ICollection<string> files)
-		{
+        {
             foreach(string file in files)
-			{
+            {
                 IncRef(file);
-			}
-		}
-		
-		internal void  IncRef(string fileName)
-		{
-			RefCount rc = GetRefCount(fileName);
-			if (infoStream != null && VERBOSE_REF_COUNTS)
-			{
-				Message("  IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
-			}
-			rc.IncRef();
-		}
-		
-		internal void  DecRef(ICollection<string> files)
-		{
+            }
+        }
+        
+        internal void  IncRef(string fileName)
+        {
+            RefCount rc = GetRefCount(fileName);
+            if (infoStream != null && VERBOSE_REF_COUNTS)
+            {
+                Message("  IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
+            }
+            rc.IncRef();
+        }
+        
+        internal void  DecRef(ICollection<string> files)
+        {
             foreach(string file in files)
             {
                 DecRef(file);
             }
-		}
-		
-		internal void  DecRef(System.String fileName)
-		{
-			RefCount rc = GetRefCount(fileName);
-			if (infoStream != null && VERBOSE_REF_COUNTS)
-			{
-				Message("  DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
-			}
-			if (0 == rc.DecRef())
-			{
-				// This file is no longer referenced by any past
-				// commit points nor by the in-memory SegmentInfos:
-				DeleteFile(fileName);
-				refCounts.Remove(fileName);
+        }
+        
+        internal void  DecRef(System.String fileName)
+        {
+            RefCount rc = GetRefCount(fileName);
+            if (infoStream != null && VERBOSE_REF_COUNTS)
+            {
+                Message("  DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
+            }
+            if (0 == rc.DecRef())
+            {
+                // This file is no longer referenced by any past
+                // commit points nor by the in-memory SegmentInfos:
+                DeleteFile(fileName);
+                refCounts.Remove(fileName);
 
                 if (synced != null) {
                     lock(synced) 
@@ -550,16 +550,16 @@ namespace Lucene.Net.Index
                       synced.Remove(fileName);
                     }
                 }
-			}
-		}
-		
-		internal void  DecRef(SegmentInfos segmentInfos)
-		{
-			foreach(string file in segmentInfos.Files(directory, false))
-			{
-				DecRef(file);
-			}
-		}
+            }
+        }
+        
+        internal void  DecRef(SegmentInfos segmentInfos)
+        {
+            foreach(string file in segmentInfos.Files(directory, false))
+            {
+                DecRef(file);
+            }
+        }
 
         public bool Exists(String fileName)
         {
@@ -572,35 +572,35 @@ namespace Lucene.Net.Index
                 return GetRefCount(fileName).count > 0;
             }
         }
-		
-		private RefCount GetRefCount(System.String fileName)
-		{
-			RefCount rc;
-			if (!refCounts.ContainsKey(fileName))
-			{
-				rc = new RefCount(fileName);
-				refCounts[fileName] = rc;
-			}
-			else
-			{
-				rc = refCounts[fileName];
-			}
-			return rc;
-		}
-		
-		internal void  DeleteFiles(System.Collections.Generic.IList<string> files)
-		{
-			foreach(string file in files)
-				DeleteFile(file);
-		}
-		
-		/// <summary>Deletes the specified files, but only if they are new
-		/// (have not yet been incref'd). 
-		/// </summary>
+        
+        private RefCount GetRefCount(System.String fileName)
+        {
+            RefCount rc;
+            if (!refCounts.ContainsKey(fileName))
+            {
+                rc = new RefCount(fileName);
+                refCounts[fileName] = rc;
+            }
+            else
+            {
+                rc = refCounts[fileName];
+            }
+            return rc;
+        }
+        
+        internal void  DeleteFiles(System.Collections.Generic.IList<string> files)
+        {
+            foreach(string file in files)
+                DeleteFile(file);
+        }
+        
+        /// <summary>Deletes the specified files, but only if they are new
+        /// (have not yet been incref'd). 
+        /// </summary>
         internal void DeleteNewFiles(System.Collections.Generic.ICollection<string> files)
-		{
-			foreach(string fileName in files)
-			{
+        {
+            foreach(string fileName in files)
+            {
                 if (!refCounts.ContainsKey(fileName))
                 {
                     if (infoStream != null)
@@ -609,87 +609,87 @@ namespace Lucene.Net.Index
                     }
                     DeleteFile(fileName);
                 }
-			}
-		}
-		
-		internal void  DeleteFile(System.String fileName)
-		{
-			try
-			{
-				if (infoStream != null)
-				{
-					Message("delete \"" + fileName + "\"");
-				}
-				directory.DeleteFile(fileName);
-			}
-			catch (System.IO.IOException e)
-			{
-				// if delete fails
-				if (directory.FileExists(fileName))
-				{
-					
-					// Some operating systems (e.g. Windows) don't
-					// permit a file to be deleted while it is opened
-					// for read (e.g. by another process or thread). So
-					// we assume that when a delete fails it is because
-					// the file is open in another process, and queue
-					// the file for subsequent deletion.
-					
-					if (infoStream != null)
-					{
-						Message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.ToString() + "; Will re-try later.");
-					}
-					if (deletable == null)
-					{
+            }
+        }
+        
+        internal void  DeleteFile(System.String fileName)
+        {
+            try
+            {
+                if (infoStream != null)
+                {
+                    Message("delete \"" + fileName + "\"");
+                }
+                directory.DeleteFile(fileName);
+            }
+            catch (System.IO.IOException e)
+            {
+                // if delete fails
+                if (directory.FileExists(fileName))
+                {
+                    
+                    // Some operating systems (e.g. Windows) don't
+                    // permit a file to be deleted while it is opened
+                    // for read (e.g. by another process or thread). So
+                    // we assume that when a delete fails it is because
+                    // the file is open in another process, and queue
+                    // the file for subsequent deletion.
+                    
+                    if (infoStream != null)
+                    {
+                        Message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.ToString() + "; Will re-try later.");
+                    }
+                    if (deletable == null)
+                    {
                         deletable = new List<string>();
-					}
-					deletable.Add(fileName); // add to deletable
-				}
-			}
-		}
-		
-		/// <summary> Tracks the reference count for a single index file:</summary>
-		sealed private class RefCount
-		{
-			
-			// fileName used only for better assert error messages
-			internal System.String fileName;
-			internal bool initDone;
-			internal RefCount(System.String fileName)
-			{
-				this.fileName = fileName;
-			}
-			
-			internal int count;
-			
-			public int IncRef()
-			{
-				if (!initDone)
-				{
-					initDone = true;
-				}
-				else
-				{
-					System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-increment for file " + fileName);
-				}
-				return ++count;
-			}
-			
-			public int DecRef()
-			{
-				System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-decrement for file " + fileName);
-				return --count;
-			}
-		}
-		
-		/// <summary> Holds details for each commit point.  This class is
-		/// also passed to the deletion policy.  Note: this class
-		/// has a natural ordering that is inconsistent with
-		/// equals.
-		/// </summary>
-		
-		sealed private class CommitPoint:IndexCommit, System.IComparable<CommitPoint>
-		{
+                    }
+                    deletable.Add(fileName); // add to deletable
+                }
+            }
+        }
+        
+        /// <summary> Tracks the reference count for a single index file:</summary>
+        sealed private class RefCount
+        {
+            
+            // fileName used only for better assert error messages
+            internal System.String fileName;
+            internal bool initDone;
+            internal RefCount(System.String fileName)
+            {
+                this.fileName = fileName;
+            }
+            
+            internal int count;
+            
+            public int IncRef()
+            {
+                if (!initDone)
+                {
+                    initDone = true;
+                }
+                else
+                {
+                    System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-increment for file " + fileName);
+                }
+                return ++count;
+            }
+            
+            public int DecRef()
+            {
+                System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-decrement for file " + fileName);
+                return --count;
+            }
+        }
+        
+        /// <summary> Holds details for each commit point.  This class is
+        /// also passed to the deletion policy.  Note: this class
+        /// has a natural ordering that is inconsistent with
+        /// equals.
+        /// </summary>
+        
+        sealed private class CommitPoint:IndexCommit, System.IComparable<CommitPoint>
+        {
             private void InitBlock(IndexFileDeleter enclosingInstance)
             {
                 this.enclosingInstance = enclosingInstance;
@@ -703,106 +703,106 @@ namespace Lucene.Net.Index
                 }
 
             }
-			
-			internal long gen;
+            
+            internal long gen;
             internal ICollection<string> files;
-			internal string segmentsFileName;
-			internal bool deleted;
-			internal Directory directory;
+            internal string segmentsFileName;
+            internal bool deleted;
+            internal Directory directory;
             internal ICollection<CommitPoint> commitsToDelete;
-			internal long version;
-			internal long generation;
-			internal bool isOptimized;
+            internal long version;
+            internal long generation;
+            internal bool isOptimized;
             internal IDictionary<string, string> userData;
-			
-			public CommitPoint(IndexFileDeleter enclosingInstance, ICollection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos)
-			{
-				InitBlock(enclosingInstance);
-				this.directory = directory;
-				this.commitsToDelete = commitsToDelete;
-				userData = segmentInfos.UserData;
-				segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
-				version = segmentInfos.Version;
-				generation = segmentInfos.Generation;
+            
+            public CommitPoint(IndexFileDeleter enclosingInstance, ICollection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos)
+            {
+                InitBlock(enclosingInstance);
+                this.directory = directory;
+                this.commitsToDelete = commitsToDelete;
+                userData = segmentInfos.UserData;
+                segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
+                version = segmentInfos.Version;
+                generation = segmentInfos.Generation;
                 files = segmentInfos.Files(directory, true);
-				gen = segmentInfos.Generation;
-				isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
-				
-				System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
-			}
+                gen = segmentInfos.Generation;
+                isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
+                
+                System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
+            }
 
             public override string ToString()
             {
                 return "IndexFileDeleter.CommitPoint(" + segmentsFileName + ")";
             }
 
-		    public override bool IsOptimized
-		    {
-		        get { return isOptimized; }
-		    }
+            public override bool IsOptimized
+            {
+                get { return isOptimized; }
+            }
 
-		    public override string SegmentsFileName
-		    {
-		        get { return segmentsFileName; }
-		    }
+            public override string SegmentsFileName
+            {
+                get { return segmentsFileName; }
+            }
 
-		    public override ICollection<string> FileNames
-		    {
-		        get { return files; }
-		    }
+            public override ICollection<string> FileNames
+            {
+                get { return files; }
+            }
 
-		    public override Directory Directory
-		    {
-		        get { return directory; }
-		    }
+            public override Directory Directory
+            {
+                get { return directory; }
+            }
 
-		    public override long Version
-		    {
-		        get { return version; }
-		    }
+            public override long Version
+            {
+                get { return version; }
+            }
 
-		    public override long Generation
-		    {
-		        get { return generation; }
-		    }
+            public override long Generation
+            {
+                get { return generation; }
+            }
 
-		    public override IDictionary<string, string> UserData
-		    {
-		        get { return userData; }
-		    }
+            public override IDictionary<string, string> UserData
+            {
+                get { return userData; }
+            }
 
-		    /// <summary> Called only be the deletion policy, to remove this
-			/// commit point from the index.
-			/// </summary>
-			public override void  Delete()
-			{
-				if (!deleted)
-				{
-					deleted = true;
-					Enclosing_Instance.commitsToDelete.Add(this);
-				}
-			}
+            /// <summary> Called only be the deletion policy, to remove this
+            /// commit point from the index.
+            /// </summary>
+            public override void  Delete()
+            {
+                if (!deleted)
+                {
+                    deleted = true;
+                    Enclosing_Instance.commitsToDelete.Add(this);
+                }
+            }
 
-		    public override bool IsDeleted
-		    {
-		        get { return deleted; }
-		    }
+            public override bool IsDeleted
+            {
+                get { return deleted; }
+            }
 
-		    public int CompareTo(CommitPoint commit)
-			{
-				if (gen < commit.gen)
-				{
-					return - 1;
-				}
-				else if (gen > commit.gen)
-				{
-					return 1;
-				}
-				else
-				{
-					return 0;
-				}
-			}
-		}
-	}
+            public int CompareTo(CommitPoint commit)
+            {
+                if (gen < commit.gen)
+                {
+                    return - 1;
+                }
+                else if (gen > commit.gen)
+                {
+                    return 1;
+                }
+                else
+                {
+                    return 0;
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexFileNameFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexFileNameFilter.cs b/src/core/Index/IndexFileNameFilter.cs
index 474381f..f8abe25 100644
--- a/src/core/Index/IndexFileNameFilter.cs
+++ b/src/core/Index/IndexFileNameFilter.cs
@@ -20,88 +20,88 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Filename filter that accept filenames and extensions only created by Lucene. </summary>
-	public class IndexFileNameFilter
-	{
-		
-		private static IndexFileNameFilter singleton = new IndexFileNameFilter();
+    
+    /// <summary>Filename filter that accept filenames and extensions only created by Lucene. </summary>
+    public class IndexFileNameFilter
+    {
+        
+        private static IndexFileNameFilter singleton = new IndexFileNameFilter();
         private HashSet<String> extensions;
         private HashSet<String> extensionsInCFS;
-		
-		// Prevent instantiation.
-		private IndexFileNameFilter()
-		{
+        
+        // Prevent instantiation.
+        private IndexFileNameFilter()
+        {
             extensions = new HashSet<String>();
-			for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.Length; i++)
-			{
-				extensions.Add(IndexFileNames.INDEX_EXTENSIONS[i]);
-			}
+            for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.Length; i++)
+            {
+                extensions.Add(IndexFileNames.INDEX_EXTENSIONS[i]);
+            }
             extensionsInCFS = new HashSet<String>();
-			for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.Length; i++)
-			{
-				extensionsInCFS.Add(IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i]);
-			}
-		}
-		
-		/* (non-Javadoc)
-		* <see cref="java.io.FilenameFilter.accept(java.io.File, java.lang.String)"/>
-		*/
-		public virtual bool Accept(System.IO.FileInfo dir, System.String name)
-		{
-			int i = name.LastIndexOf((System.Char) '.');
-			if (i != - 1)
-			{
-				System.String extension = name.Substring(1 + i);
-				if (extensions.Contains(extension))
-				{
-					return true;
-				}
-				else if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
-				{
-					return true;
-				}
-				else if (extension.StartsWith("s") && (new System.Text.RegularExpressions.Regex("s\\d+")).Match(extension).Success)
-				{
-					return true;
-				}
-			}
-			else
-			{
-				if (name.Equals(IndexFileNames.DELETABLE))
-					return true;
-				else if (name.StartsWith(IndexFileNames.SEGMENTS))
-					return true;
-			}
-			return false;
-		}
-		
-		/// <summary> Returns true if this is a file that would be contained
-		/// in a CFS file.  This function should only be called on
-		/// files that pass the above "accept" (ie, are already
-		/// known to be a Lucene index file).
-		/// </summary>
-		public virtual bool IsCFSFile(System.String name)
-		{
-			int i = name.LastIndexOf((System.Char) '.');
-			if (i != - 1)
-			{
-				System.String extension = name.Substring(1 + i);
-				if (extensionsInCFS.Contains(extension))
-				{
-					return true;
-				}
-				if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
-				{
-					return true;
-				}
-			}
-			return false;
-		}
+            for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.Length; i++)
+            {
+                extensionsInCFS.Add(IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i]);
+            }
+        }
+        
+        /* (non-Javadoc)
+        * <see cref="java.io.FilenameFilter.accept(java.io.File, java.lang.String)"/>
+        */
+        public virtual bool Accept(System.IO.FileInfo dir, System.String name)
+        {
+            int i = name.LastIndexOf((System.Char) '.');
+            if (i != - 1)
+            {
+                System.String extension = name.Substring(1 + i);
+                if (extensions.Contains(extension))
+                {
+                    return true;
+                }
+                else if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
+                {
+                    return true;
+                }
+                else if (extension.StartsWith("s") && (new System.Text.RegularExpressions.Regex("s\\d+")).Match(extension).Success)
+                {
+                    return true;
+                }
+            }
+            else
+            {
+                if (name.Equals(IndexFileNames.DELETABLE))
+                    return true;
+                else if (name.StartsWith(IndexFileNames.SEGMENTS))
+                    return true;
+            }
+            return false;
+        }
+        
+        /// <summary> Returns true if this is a file that would be contained
+        /// in a CFS file.  This function should only be called on
+        /// files that pass the above "accept" (ie, are already
+        /// known to be a Lucene index file).
+        /// </summary>
+        public virtual bool IsCFSFile(System.String name)
+        {
+            int i = name.LastIndexOf((System.Char) '.');
+            if (i != - 1)
+            {
+                System.String extension = name.Substring(1 + i);
+                if (extensionsInCFS.Contains(extension))
+                {
+                    return true;
+                }
+                if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
+                {
+                    return true;
+                }
+            }
+            return false;
+        }
 
-	    public static IndexFileNameFilter Filter
-	    {
-	        get { return singleton; }
-	    }
-	}
+        public static IndexFileNameFilter Filter
+        {
+            get { return singleton; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexFileNames.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexFileNames.cs b/src/core/Index/IndexFileNames.cs
index ef50119..130d7af 100644
--- a/src/core/Index/IndexFileNames.cs
+++ b/src/core/Index/IndexFileNames.cs
@@ -20,146 +20,146 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Useful constants representing filenames and extensions used by lucene</summary>
-	public sealed class IndexFileNames
-	{
-		
-		/// <summary>Name of the index segment file </summary>
-		public /*internal*/ const System.String SEGMENTS = "segments";
-		
-		/// <summary>Name of the generation reference file name </summary>
-		public /*internal*/ const System.String SEGMENTS_GEN = "segments.gen";
-		
-		/// <summary>Name of the index deletable file (only used in
-		/// pre-lockless indices) 
-		/// </summary>
-		public /*internal*/ const System.String DELETABLE = "deletable";
-		
-		/// <summary>Extension of norms file </summary>
-		public /*internal*/ const System.String NORMS_EXTENSION = "nrm";
-		
-		/// <summary>Extension of freq postings file </summary>
-		public /*internal*/ const System.String FREQ_EXTENSION = "frq";
-		
-		/// <summary>Extension of prox postings file </summary>
-		public /*internal*/ const System.String PROX_EXTENSION = "prx";
-		
-		/// <summary>Extension of terms file </summary>
-		public /*internal*/ const System.String TERMS_EXTENSION = "tis";
-		
-		/// <summary>Extension of terms index file </summary>
-		public /*internal*/ const System.String TERMS_INDEX_EXTENSION = "tii";
-		
-		/// <summary>Extension of stored fields index file </summary>
-		public /*internal*/ const System.String FIELDS_INDEX_EXTENSION = "fdx";
-		
-		/// <summary>Extension of stored fields file </summary>
-		public /*internal*/ const System.String FIELDS_EXTENSION = "fdt";
-		
-		/// <summary>Extension of vectors fields file </summary>
-		public /*internal*/ const System.String VECTORS_FIELDS_EXTENSION = "tvf";
-		
-		/// <summary>Extension of vectors documents file </summary>
-		public /*internal*/ const System.String VECTORS_DOCUMENTS_EXTENSION = "tvd";
-		
-		/// <summary>Extension of vectors index file </summary>
-		public /*internal*/ const System.String VECTORS_INDEX_EXTENSION = "tvx";
-		
-		/// <summary>Extension of compound file </summary>
-		public /*internal*/ const System.String COMPOUND_FILE_EXTENSION = "cfs";
-		
-		/// <summary>Extension of compound file for doc store files</summary>
-		public /*internal*/ const System.String COMPOUND_FILE_STORE_EXTENSION = "cfx";
-		
-		/// <summary>Extension of deletes </summary>
-		internal const System.String DELETES_EXTENSION = "del";
-		
-		/// <summary>Extension of field infos </summary>
-		public /*internal*/ const System.String FIELD_INFOS_EXTENSION = "fnm";
-		
-		/// <summary>Extension of plain norms </summary>
-		public /*internal*/ const System.String PLAIN_NORMS_EXTENSION = "f";
-		
-		/// <summary>Extension of separate norms </summary>
-		public /*internal*/ const System.String SEPARATE_NORMS_EXTENSION = "s";
-		
-		/// <summary>Extension of gen file </summary>
-		public /*internal*/ const System.String GEN_EXTENSION = "gen";
-		
-		/// <summary> This array contains all filename extensions used by
-		/// Lucene's index files, with two exceptions, namely the
-		/// extension made up from <c>.f</c> + a number and
-		/// from <c>.s</c> + a number.  Also note that
-		/// Lucene's <c>segments_N</c> files do not have any
-		/// filename extension.
-		/// </summary>
-		public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS = new System.String[]{COMPOUND_FILE_EXTENSION, FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, DELETES_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, GEN_EXTENSION, NORMS_EXTENSION, COMPOUND_FILE_STORE_EXTENSION};
-		
-		/// <summary>File extensions that are added to a compound file
-		/// (same as above, minus "del", "gen", "cfs"). 
-		/// </summary>
-		public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS_IN_COMPOUND_FILE = new System.String[]{FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, NORMS_EXTENSION};
-		
-		public /*internal*/ static readonly System.String[] STORE_INDEX_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_FIELDS_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION};
-		
-		public /*internal*/ static readonly System.String[] NON_STORE_INDEX_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, TERMS_EXTENSION, TERMS_INDEX_EXTENSION, NORMS_EXTENSION};
-		
-		/// <summary>File extensions of old-style index files </summary>
-		public /*internal*/ static readonly System.String[] COMPOUND_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION};
-		
-		/// <summary>File extensions for term vector support </summary>
-		public /*internal*/ static readonly System.String[] VECTOR_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION};
-		
-		/// <summary> Computes the full file name from base, extension and
-		/// generation.  If the generation is -1, the file name is
-		/// null.  If it's 0, the file name is 
-		/// If it's > 0, the file name is 
-		/// 
-		/// </summary>
+    
+    /// <summary>Useful constants representing filenames and extensions used by lucene</summary>
+    public sealed class IndexFileNames
+    {
+        
+        /// <summary>Name of the index segment file </summary>
+        public /*internal*/ const System.String SEGMENTS = "segments";
+        
+        /// <summary>Name of the generation reference file name </summary>
+        public /*internal*/ const System.String SEGMENTS_GEN = "segments.gen";
+        
+        /// <summary>Name of the index deletable file (only used in
+        /// pre-lockless indices) 
+        /// </summary>
+        public /*internal*/ const System.String DELETABLE = "deletable";
+        
+        /// <summary>Extension of norms file </summary>
+        public /*internal*/ const System.String NORMS_EXTENSION = "nrm";
+        
+        /// <summary>Extension of freq postings file </summary>
+        public /*internal*/ const System.String FREQ_EXTENSION = "frq";
+        
+        /// <summary>Extension of prox postings file </summary>
+        public /*internal*/ const System.String PROX_EXTENSION = "prx";
+        
+        /// <summary>Extension of terms file </summary>
+        public /*internal*/ const System.String TERMS_EXTENSION = "tis";
+        
+        /// <summary>Extension of terms index file </summary>
+        public /*internal*/ const System.String TERMS_INDEX_EXTENSION = "tii";
+        
+        /// <summary>Extension of stored fields index file </summary>
+        public /*internal*/ const System.String FIELDS_INDEX_EXTENSION = "fdx";
+        
+        /// <summary>Extension of stored fields file </summary>
+        public /*internal*/ const System.String FIELDS_EXTENSION = "fdt";
+        
+        /// <summary>Extension of vectors fields file </summary>
+        public /*internal*/ const System.String VECTORS_FIELDS_EXTENSION = "tvf";
+        
+        /// <summary>Extension of vectors documents file </summary>
+        public /*internal*/ const System.String VECTORS_DOCUMENTS_EXTENSION = "tvd";
+        
+        /// <summary>Extension of vectors index file </summary>
+        public /*internal*/ const System.String VECTORS_INDEX_EXTENSION = "tvx";
+        
+        /// <summary>Extension of compound file </summary>
+        public /*internal*/ const System.String COMPOUND_FILE_EXTENSION = "cfs";
+        
+        /// <summary>Extension of compound file for doc store files</summary>
+        public /*internal*/ const System.String COMPOUND_FILE_STORE_EXTENSION = "cfx";
+        
+        /// <summary>Extension of deletes </summary>
+        internal const System.String DELETES_EXTENSION = "del";
+        
+        /// <summary>Extension of field infos </summary>
+        public /*internal*/ const System.String FIELD_INFOS_EXTENSION = "fnm";
+        
+        /// <summary>Extension of plain norms </summary>
+        public /*internal*/ const System.String PLAIN_NORMS_EXTENSION = "f";
+        
+        /// <summary>Extension of separate norms </summary>
+        public /*internal*/ const System.String SEPARATE_NORMS_EXTENSION = "s";
+        
+        /// <summary>Extension of gen file </summary>
+        public /*internal*/ const System.String GEN_EXTENSION = "gen";
+        
+        /// <summary> This array contains all filename extensions used by
+        /// Lucene's index files, with two exceptions, namely the
+        /// extension made up from <c>.f</c> + a number and
+        /// from <c>.s</c> + a number.  Also note that
+        /// Lucene's <c>segments_N</c> files do not have any
+        /// filename extension.
+        /// </summary>
+        public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS = new System.String[]{COMPOUND_FILE_EXTENSION, FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, DELETES_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, GEN_EXTENSION, NORMS_EXTENSION, COMPOUND_FILE_STORE_EXTENSION};
+        
+        /// <summary>File extensions that are added to a compound file
+        /// (same as above, minus "del", "gen", "cfs"). 
+        /// </summary>
+        public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS_IN_COMPOUND_FILE = new System.String[]{FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, NORMS_EXTENSION};
+        
+        public /*internal*/ static readonly System.String[] STORE_INDEX_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_FIELDS_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION};
+        
+        public /*internal*/ static readonly System.String[] NON_STORE_INDEX_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, TERMS_EXTENSION, TERMS_INDEX_EXTENSION, NORMS_EXTENSION};
+        
+        /// <summary>File extensions of old-style index files </summary>
+        public /*internal*/ static readonly System.String[] COMPOUND_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION};
+        
+        /// <summary>File extensions for term vector support </summary>
+        public /*internal*/ static readonly System.String[] VECTOR_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION};
+        
+        /// <summary> Computes the full file name from base, extension and
+        /// generation.  If the generation is -1, the file name is
+        /// null.  If it's 0, the file name is 
+        /// If it's > 0, the file name is 
+        /// 
+        /// </summary>
         /// <param name="base_Renamed">-- main part of the file name
-		/// </param>
-		/// <param name="extension">-- extension of the filename (including .)
-		/// </param>
-		/// <param name="gen">-- generation
-		/// </param>
-		public /*internal*/ static System.String FileNameFromGeneration(System.String base_Renamed, System.String extension, long gen)
-		{
-			if (gen == SegmentInfo.NO)
-			{
-				return null;
-			}
-			else if (gen == SegmentInfo.WITHOUT_GEN)
-			{
-				return base_Renamed + extension;
-			}
-			else
-			{
+        /// </param>
+        /// <param name="extension">-- extension of the filename (including .)
+        /// </param>
+        /// <param name="gen">-- generation
+        /// </param>
+        public /*internal*/ static System.String FileNameFromGeneration(System.String base_Renamed, System.String extension, long gen)
+        {
+            if (gen == SegmentInfo.NO)
+            {
+                return null;
+            }
+            else if (gen == SegmentInfo.WITHOUT_GEN)
+            {
+                return base_Renamed + extension;
+            }
+            else
+            {
 #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
-				return base_Renamed + "_" + Number.ToString(gen) + extension;
+                return base_Renamed + "_" + Number.ToString(gen) + extension;
 #else
-				return base_Renamed + "_" + System.Convert.ToString(gen, 16) + extension;
+                return base_Renamed + "_" + System.Convert.ToString(gen, 16) + extension;
 #endif
-			}
-		}
-		
-		/// <summary> Returns true if the provided filename is one of the doc
-		/// store files (ends with an extension in
-		/// STORE_INDEX_EXTENSIONS).
-		/// </summary>
-		internal static bool IsDocStoreFile(System.String fileName)
-		{
-			if (fileName.EndsWith(COMPOUND_FILE_STORE_EXTENSION))
-				return true;
-			for (int i = 0; i < STORE_INDEX_EXTENSIONS.Length; i++)
-				if (fileName.EndsWith(STORE_INDEX_EXTENSIONS[i]))
-					return true;
-			return false;
-		}
-		
-		internal static System.String SegmentFileName(System.String segmentName, System.String ext)
-		{
-			return segmentName + "." + ext;
-		}
-	}
+            }
+        }
+        
+        /// <summary> Returns true if the provided filename is one of the doc
+        /// store files (ends with an extension in
+        /// STORE_INDEX_EXTENSIONS).
+        /// </summary>
+        internal static bool IsDocStoreFile(System.String fileName)
+        {
+            if (fileName.EndsWith(COMPOUND_FILE_STORE_EXTENSION))
+                return true;
+            for (int i = 0; i < STORE_INDEX_EXTENSIONS.Length; i++)
+                if (fileName.EndsWith(STORE_INDEX_EXTENSIONS[i]))
+                    return true;
+            return false;
+        }
+        
+        internal static System.String SegmentFileName(System.String segmentName, System.String ext)
+        {
+            return segmentName + "." + ext;
+        }
+    }
 }
\ No newline at end of file


[13/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldsReader.cs b/src/core/Index/FieldsReader.cs
index 8fa351d..d4973d9 100644
--- a/src/core/Index/FieldsReader.cs
+++ b/src/core/Index/FieldsReader.cs
@@ -28,150 +28,150 @@ using IndexInput = Lucene.Net.Store.IndexInput;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Class responsible for access to stored document fields.
-	/// <p/>
-	/// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
-	/// 
-	/// </summary>
-	public sealed class FieldsReader : ICloneable, IDisposable
-	{
-		private readonly FieldInfos fieldInfos;
-		
-		// The main fieldStream, used only for cloning.
-		private readonly IndexInput cloneableFieldsStream;
-		
-		// This is a clone of cloneableFieldsStream used for reading documents.
-		// It should not be cloned outside of a synchronized context.
-		private readonly IndexInput fieldsStream;
-		
-		private readonly IndexInput cloneableIndexStream;
-		private readonly IndexInput indexStream;
-		private readonly int numTotalDocs;
-		private readonly int size;
-		private bool closed;
-		private readonly int format;
-		private readonly int formatSize;
-		
-		// The docID offset where our docs begin in the index
-		// file.  This will be 0 if we have our own private file.
-		private readonly int docStoreOffset;
-		
-		private readonly CloseableThreadLocal<IndexInput> fieldsStreamTL = new CloseableThreadLocal<IndexInput>();
-		private readonly bool isOriginal = false;
-		
-		/// <summary>Returns a cloned FieldsReader that shares open
-		/// IndexInputs with the original one.  It is the caller's
-		/// job not to close the original FieldsReader until all
-		/// clones are called (eg, currently SegmentReader manages
-		/// this logic). 
-		/// </summary>
-		public System.Object Clone()
-		{
-			EnsureOpen();
-			return new FieldsReader(fieldInfos, numTotalDocs, size, format, formatSize, docStoreOffset, cloneableFieldsStream, cloneableIndexStream);
-		}
-		
-		// Used only by clone
-		private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream)
-		{
-			this.fieldInfos = fieldInfos;
-			this.numTotalDocs = numTotalDocs;
-			this.size = size;
-			this.format = format;
-			this.formatSize = formatSize;
-			this.docStoreOffset = docStoreOffset;
-			this.cloneableFieldsStream = cloneableFieldsStream;
-			this.cloneableIndexStream = cloneableIndexStream;
-			fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
-			indexStream = (IndexInput) cloneableIndexStream.Clone();
-		}
-		
-		public /*internal*/ FieldsReader(Directory d, String segment, FieldInfos fn):this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, - 1, 0)
-		{
-		}
-		
-		internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize):this(d, segment, fn, readBufferSize, - 1, 0)
-		{
-		}
-		
-		internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size)
-		{
-			bool success = false;
-			isOriginal = true;
-			try
-			{
-				fieldInfos = fn;
-				
-				cloneableFieldsStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);
-				cloneableIndexStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
-				
-				// First version of fdx did not include a format
-				// header, but, the first int will always be 0 in that
-				// case
-				int firstInt = cloneableIndexStream.ReadInt();
-				format = firstInt == 0 ? 0 : firstInt;
-				
-				if (format > FieldsWriter.FORMAT_CURRENT)
-					throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FieldsWriter.FORMAT_CURRENT + " or lower");
-				
-				formatSize = format > FieldsWriter.FORMAT ? 4 : 0;
-				
-				if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
-					cloneableFieldsStream.SetModifiedUTF8StringsMode();
-				
-				fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
-				
-				long indexSize = cloneableIndexStream.Length() - formatSize;
-				
-				if (docStoreOffset != - 1)
-				{
-					// We read only a slice out of this shared fields file
-					this.docStoreOffset = docStoreOffset;
-					this.size = size;
-					
-					// Verify the file is long enough to hold all of our
-					// docs
-					System.Diagnostics.Debug.Assert(((int)(indexSize / 8)) >= size + this.docStoreOffset, "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset);
-				}
-				else
-				{
-					this.docStoreOffset = 0;
-					this.size = (int) (indexSize >> 3);
-				}
-				
-				indexStream = (IndexInput) cloneableIndexStream.Clone();
-				numTotalDocs = (int) (indexSize >> 3);
-				success = true;
-			}
-			finally
-			{
-				// With lock-less commits, it's entirely possible (and
-				// fine) to hit a FileNotFound exception above. In
-				// this case, we want to explicitly close any subset
-				// of things that were opened so that we don't have to
-				// wait for a GC to do so.
-				if (!success)
-				{
-					Dispose();
-				}
-			}
-		}
-		
-		/// <throws>  AlreadyClosedException if this FieldsReader is closed </throws>
-		internal void  EnsureOpen()
-		{
-			if (closed)
-			{
-				throw new AlreadyClosedException("this FieldsReader is closed");
-			}
-		}
-		
-		/// <summary> Closes the underlying <see cref="Lucene.Net.Store.IndexInput" /> streams, including any ones associated with a
-		/// lazy implementation of a Field.  This means that the Fields values will not be accessible.
-		/// 
-		/// </summary>
-		/// <throws>  IOException </throws>
+    
+    /// <summary> Class responsible for access to stored document fields.
+    /// <p/>
+    /// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
+    /// 
+    /// </summary>
+    public sealed class FieldsReader : ICloneable, IDisposable
+    {
+        private readonly FieldInfos fieldInfos;
+        
+        // The main fieldStream, used only for cloning.
+        private readonly IndexInput cloneableFieldsStream;
+        
+        // This is a clone of cloneableFieldsStream used for reading documents.
+        // It should not be cloned outside of a synchronized context.
+        private readonly IndexInput fieldsStream;
+        
+        private readonly IndexInput cloneableIndexStream;
+        private readonly IndexInput indexStream;
+        private readonly int numTotalDocs;
+        private readonly int size;
+        private bool closed;
+        private readonly int format;
+        private readonly int formatSize;
+        
+        // The docID offset where our docs begin in the index
+        // file.  This will be 0 if we have our own private file.
+        private readonly int docStoreOffset;
+        
+        private readonly CloseableThreadLocal<IndexInput> fieldsStreamTL = new CloseableThreadLocal<IndexInput>();
+        private readonly bool isOriginal = false;
+        
+        /// <summary>Returns a cloned FieldsReader that shares open
+        /// IndexInputs with the original one.  It is the caller's
+        /// job not to close the original FieldsReader until all
+        /// clones are called (eg, currently SegmentReader manages
+        /// this logic). 
+        /// </summary>
+        public System.Object Clone()
+        {
+            EnsureOpen();
+            return new FieldsReader(fieldInfos, numTotalDocs, size, format, formatSize, docStoreOffset, cloneableFieldsStream, cloneableIndexStream);
+        }
+        
+        // Used only by clone
+        private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream)
+        {
+            this.fieldInfos = fieldInfos;
+            this.numTotalDocs = numTotalDocs;
+            this.size = size;
+            this.format = format;
+            this.formatSize = formatSize;
+            this.docStoreOffset = docStoreOffset;
+            this.cloneableFieldsStream = cloneableFieldsStream;
+            this.cloneableIndexStream = cloneableIndexStream;
+            fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
+            indexStream = (IndexInput) cloneableIndexStream.Clone();
+        }
+        
+        public /*internal*/ FieldsReader(Directory d, String segment, FieldInfos fn):this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, - 1, 0)
+        {
+        }
+        
+        internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize):this(d, segment, fn, readBufferSize, - 1, 0)
+        {
+        }
+        
+        internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size)
+        {
+            bool success = false;
+            isOriginal = true;
+            try
+            {
+                fieldInfos = fn;
+                
+                cloneableFieldsStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);
+                cloneableIndexStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
+                
+                // First version of fdx did not include a format
+                // header, but, the first int will always be 0 in that
+                // case
+                int firstInt = cloneableIndexStream.ReadInt();
+                format = firstInt == 0 ? 0 : firstInt;
+                
+                if (format > FieldsWriter.FORMAT_CURRENT)
+                    throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FieldsWriter.FORMAT_CURRENT + " or lower");
+                
+                formatSize = format > FieldsWriter.FORMAT ? 4 : 0;
+                
+                if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                    cloneableFieldsStream.SetModifiedUTF8StringsMode();
+                
+                fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
+                
+                long indexSize = cloneableIndexStream.Length() - formatSize;
+                
+                if (docStoreOffset != - 1)
+                {
+                    // We read only a slice out of this shared fields file
+                    this.docStoreOffset = docStoreOffset;
+                    this.size = size;
+                    
+                    // Verify the file is long enough to hold all of our
+                    // docs
+                    System.Diagnostics.Debug.Assert(((int)(indexSize / 8)) >= size + this.docStoreOffset, "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset);
+                }
+                else
+                {
+                    this.docStoreOffset = 0;
+                    this.size = (int) (indexSize >> 3);
+                }
+                
+                indexStream = (IndexInput) cloneableIndexStream.Clone();
+                numTotalDocs = (int) (indexSize >> 3);
+                success = true;
+            }
+            finally
+            {
+                // With lock-less commits, it's entirely possible (and
+                // fine) to hit a FileNotFound exception above. In
+                // this case, we want to explicitly close any subset
+                // of things that were opened so that we don't have to
+                // wait for a GC to do so.
+                if (!success)
+                {
+                    Dispose();
+                }
+            }
+        }
+        
+        /// <throws>  AlreadyClosedException if this FieldsReader is closed </throws>
+        internal void  EnsureOpen()
+        {
+            if (closed)
+            {
+                throw new AlreadyClosedException("this FieldsReader is closed");
+            }
+        }
+        
+        /// <summary> Closes the underlying <see cref="Lucene.Net.Store.IndexInput" /> streams, including any ones associated with a
+        /// lazy implementation of a Field.  This means that the Fields values will not be accessible.
+        /// 
+        /// </summary>
+        /// <throws>  IOException </throws>
         public void Dispose()
         {
             // Move to protected method if class becomes unsealed
@@ -200,166 +200,166 @@ namespace Lucene.Net.Index
                 closed = true;
             }
         }
-		
-		public /*internal*/ int Size()
-		{
-			return size;
-		}
-		
-		private void  SeekIndex(int docID)
-		{
-			indexStream.Seek(formatSize + (docID + docStoreOffset) * 8L);
-		}
-		
-		internal bool CanReadRawDocs()
+        
+        public /*internal*/ int Size()
+        {
+            return size;
+        }
+        
+        private void  SeekIndex(int docID)
+        {
+            indexStream.Seek(formatSize + (docID + docStoreOffset) * 8L);
+        }
+        
+        internal bool CanReadRawDocs()
         {
             // Disable reading raw docs in 2.x format, because of the removal of compressed
             // fields in 3.0. We don't want rawDocs() to decode field bits to figure out
             // if a field was compressed, hence we enforce ordinary (non-raw) stored field merges
             // for <3.0 indexes.
-			return format >= FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
-		}
-		
-		public /*internal*/ Document Doc(int n, FieldSelector fieldSelector)
-		{
-			SeekIndex(n);
-			long position = indexStream.ReadLong();
-			fieldsStream.Seek(position);
-			
-			var doc = new Document();
-			int numFields = fieldsStream.ReadVInt();
-			for (int i = 0; i < numFields; i++)
-			{
-				int fieldNumber = fieldsStream.ReadVInt();
-				FieldInfo fi = fieldInfos.FieldInfo(fieldNumber);
-				FieldSelectorResult acceptField = fieldSelector == null?FieldSelectorResult.LOAD:fieldSelector.Accept(fi.name);
-				
-				byte bits = fieldsStream.ReadByte();
-				System.Diagnostics.Debug.Assert(bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY);
-				
-				bool compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;
-			    System.Diagnostics.Debug.Assert(
-			        (!compressed || (format < FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS)),
-			        "compressed fields are only allowed in indexes of version <= 2.9");
-				bool tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;
-				bool binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;
-				//TODO: Find an alternative approach here if this list continues to grow beyond the
-				//list of 5 or 6 currently here.  See Lucene 762 for discussion
-				if (acceptField.Equals(FieldSelectorResult.LOAD))
-				{
-					AddField(doc, fi, binary, compressed, tokenize);
-				}
-				else if (acceptField.Equals(FieldSelectorResult.LOAD_AND_BREAK))
-				{
-					AddField(doc, fi, binary, compressed, tokenize);
-					break; //Get out of this loop
-				}
-				else if (acceptField.Equals(FieldSelectorResult.LAZY_LOAD))
-				{
-					AddFieldLazy(doc, fi, binary, compressed, tokenize);
-				}
-				else if (acceptField.Equals(FieldSelectorResult.SIZE))
-				{
-					SkipField(binary, compressed, AddFieldSize(doc, fi, binary, compressed));
-				}
-				else if (acceptField.Equals(FieldSelectorResult.SIZE_AND_BREAK))
-				{
-					AddFieldSize(doc, fi, binary, compressed);
-					break;
-				}
-				else
-				{
-					SkipField(binary, compressed);
-				}
-			}
-			
-			return doc;
-		}
-		
-		/// <summary>Returns the length in bytes of each raw document in a
-		/// contiguous range of length numDocs starting with
-		/// startDocID.  Returns the IndexInput (the fieldStream),
-		/// already seeked to the starting point for startDocID.
-		/// </summary>
-		internal IndexInput RawDocs(int[] lengths, int startDocID, int numDocs)
-		{
-			SeekIndex(startDocID);
-			long startOffset = indexStream.ReadLong();
-			long lastOffset = startOffset;
-			int count = 0;
-			while (count < numDocs)
-			{
-				long offset;
-				int docID = docStoreOffset + startDocID + count + 1;
-				System.Diagnostics.Debug.Assert(docID <= numTotalDocs);
-				if (docID < numTotalDocs)
-					offset = indexStream.ReadLong();
-				else
-					offset = fieldsStream.Length();
-				lengths[count++] = (int) (offset - lastOffset);
-				lastOffset = offset;
-			}
-			
-			fieldsStream.Seek(startOffset);
-			
-			return fieldsStream;
-		}
-		
-		/// <summary> Skip the field.  We still have to read some of the information about the field, but can skip past the actual content.
-		/// This will have the most payoff on large fields.
-		/// </summary>
-		private void  SkipField(bool binary, bool compressed)
-		{
-			SkipField(binary, compressed, fieldsStream.ReadVInt());
-		}
-		
-		private void  SkipField(bool binary, bool compressed, int toRead)
-		{
-			if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed)
-			{
-				fieldsStream.Seek(fieldsStream.FilePointer + toRead);
-			}
-			else
-			{
-				// We need to skip chars.  This will slow us down, but still better
-				fieldsStream.SkipChars(toRead);
-			}
-		}
-		
-		private void  AddFieldLazy(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
-		{
-			if (binary)
-			{
-				int toRead = fieldsStream.ReadVInt();
-				long pointer = fieldsStream.FilePointer;
-				//was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
-				doc.Add(new LazyField(this, fi.name, Field.Store.YES, toRead, pointer, binary, compressed));
+            return format >= FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
+        }
+        
+        public /*internal*/ Document Doc(int n, FieldSelector fieldSelector)
+        {
+            SeekIndex(n);
+            long position = indexStream.ReadLong();
+            fieldsStream.Seek(position);
+            
+            var doc = new Document();
+            int numFields = fieldsStream.ReadVInt();
+            for (int i = 0; i < numFields; i++)
+            {
+                int fieldNumber = fieldsStream.ReadVInt();
+                FieldInfo fi = fieldInfos.FieldInfo(fieldNumber);
+                FieldSelectorResult acceptField = fieldSelector == null?FieldSelectorResult.LOAD:fieldSelector.Accept(fi.name);
+                
+                byte bits = fieldsStream.ReadByte();
+                System.Diagnostics.Debug.Assert(bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY);
+                
+                bool compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;
+                System.Diagnostics.Debug.Assert(
+                    (!compressed || (format < FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS)),
+                    "compressed fields are only allowed in indexes of version <= 2.9");
+                bool tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;
+                bool binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;
+                //TODO: Find an alternative approach here if this list continues to grow beyond the
+                //list of 5 or 6 currently here.  See Lucene 762 for discussion
+                if (acceptField.Equals(FieldSelectorResult.LOAD))
+                {
+                    AddField(doc, fi, binary, compressed, tokenize);
+                }
+                else if (acceptField.Equals(FieldSelectorResult.LOAD_AND_BREAK))
+                {
+                    AddField(doc, fi, binary, compressed, tokenize);
+                    break; //Get out of this loop
+                }
+                else if (acceptField.Equals(FieldSelectorResult.LAZY_LOAD))
+                {
+                    AddFieldLazy(doc, fi, binary, compressed, tokenize);
+                }
+                else if (acceptField.Equals(FieldSelectorResult.SIZE))
+                {
+                    SkipField(binary, compressed, AddFieldSize(doc, fi, binary, compressed));
+                }
+                else if (acceptField.Equals(FieldSelectorResult.SIZE_AND_BREAK))
+                {
+                    AddFieldSize(doc, fi, binary, compressed);
+                    break;
+                }
+                else
+                {
+                    SkipField(binary, compressed);
+                }
+            }
+            
+            return doc;
+        }
+        
+        /// <summary>Returns the length in bytes of each raw document in a
+        /// contiguous range of length numDocs starting with
+        /// startDocID.  Returns the IndexInput (the fieldStream),
+        /// already seeked to the starting point for startDocID.
+        /// </summary>
+        internal IndexInput RawDocs(int[] lengths, int startDocID, int numDocs)
+        {
+            SeekIndex(startDocID);
+            long startOffset = indexStream.ReadLong();
+            long lastOffset = startOffset;
+            int count = 0;
+            while (count < numDocs)
+            {
+                long offset;
+                int docID = docStoreOffset + startDocID + count + 1;
+                System.Diagnostics.Debug.Assert(docID <= numTotalDocs);
+                if (docID < numTotalDocs)
+                    offset = indexStream.ReadLong();
+                else
+                    offset = fieldsStream.Length();
+                lengths[count++] = (int) (offset - lastOffset);
+                lastOffset = offset;
+            }
+            
+            fieldsStream.Seek(startOffset);
+            
+            return fieldsStream;
+        }
+        
+        /// <summary> Skip the field.  We still have to read some of the information about the field, but can skip past the actual content.
+        /// This will have the most payoff on large fields.
+        /// </summary>
+        private void  SkipField(bool binary, bool compressed)
+        {
+            SkipField(binary, compressed, fieldsStream.ReadVInt());
+        }
+        
+        private void  SkipField(bool binary, bool compressed, int toRead)
+        {
+            if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed)
+            {
+                fieldsStream.Seek(fieldsStream.FilePointer + toRead);
+            }
+            else
+            {
+                // We need to skip chars.  This will slow us down, but still better
+                fieldsStream.SkipChars(toRead);
+            }
+        }
+        
+        private void  AddFieldLazy(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+        {
+            if (binary)
+            {
+                int toRead = fieldsStream.ReadVInt();
+                long pointer = fieldsStream.FilePointer;
+                //was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
+                doc.Add(new LazyField(this, fi.name, Field.Store.YES, toRead, pointer, binary, compressed));
 
-				//Need to move the pointer ahead by toRead positions
-				fieldsStream.Seek(pointer + toRead);
-			}
-			else
-			{
-				const Field.Store store = Field.Store.YES;
-				Field.Index index = FieldExtensions.ToIndex(fi.isIndexed, tokenize);
-				Field.TermVector termVector = FieldExtensions.ToTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
-				
-				AbstractField f;
-				if (compressed)
-				{
-					int toRead = fieldsStream.ReadVInt();
-					long pointer = fieldsStream.FilePointer;
-					f = new LazyField(this, fi.name, store, toRead, pointer, binary, compressed);
-					//skip over the part that we aren't loading
-					fieldsStream.Seek(pointer + toRead);
-					f.OmitNorms = fi.omitNorms;
-					f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
-				}
-				else
-				{
-					int length = fieldsStream.ReadVInt();
-					long pointer = fieldsStream.FilePointer;
-					//Skip ahead of where we are by the length of what is stored
+                //Need to move the pointer ahead by toRead positions
+                fieldsStream.Seek(pointer + toRead);
+            }
+            else
+            {
+                const Field.Store store = Field.Store.YES;
+                Field.Index index = FieldExtensions.ToIndex(fi.isIndexed, tokenize);
+                Field.TermVector termVector = FieldExtensions.ToTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
+                
+                AbstractField f;
+                if (compressed)
+                {
+                    int toRead = fieldsStream.ReadVInt();
+                    long pointer = fieldsStream.FilePointer;
+                    f = new LazyField(this, fi.name, store, toRead, pointer, binary, compressed);
+                    //skip over the part that we aren't loading
+                    fieldsStream.Seek(pointer + toRead);
+                    f.OmitNorms = fi.omitNorms;
+                    f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
+                }
+                else
+                {
+                    int length = fieldsStream.ReadVInt();
+                    long pointer = fieldsStream.FilePointer;
+                    //Skip ahead of where we are by the length of what is stored
                     if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
                     {
                         fieldsStream.Seek(pointer + length);
@@ -368,274 +368,274 @@ namespace Lucene.Net.Index
                     {
                         fieldsStream.SkipChars(length);
                     }
-					f = new LazyField(this, fi.name, store, index, termVector, length, pointer, binary, compressed)
-					    	{OmitNorms = fi.omitNorms, OmitTermFreqAndPositions = fi.omitTermFreqAndPositions};
-				}
+                    f = new LazyField(this, fi.name, store, index, termVector, length, pointer, binary, compressed)
+                            {OmitNorms = fi.omitNorms, OmitTermFreqAndPositions = fi.omitTermFreqAndPositions};
+                }
 
-				doc.Add(f);
-			}
-		}
+                doc.Add(f);
+            }
+        }
 
-		private void AddField(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
-		{
-			//we have a binary stored field, and it may be compressed
-			if (binary)
-			{
-				int toRead = fieldsStream.ReadVInt();
-				var b = new byte[toRead];
-				fieldsStream.ReadBytes(b, 0, b.Length);
-				doc.Add(compressed ? new Field(fi.name, Uncompress(b), Field.Store.YES) : new Field(fi.name, b, Field.Store.YES));
-			}
-			else
-			{
-				const Field.Store store = Field.Store.YES;
-				Field.Index index = FieldExtensions.ToIndex(fi.isIndexed, tokenize);
-				Field.TermVector termVector = FieldExtensions.ToTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
-				
-				AbstractField f;
-				if (compressed)
-				{
-					int toRead = fieldsStream.ReadVInt();
-					
-					var b = new byte[toRead];
-					fieldsStream.ReadBytes(b, 0, b.Length);
-					f = new Field(fi.name, false, System.Text.Encoding.GetEncoding("UTF-8").GetString(Uncompress(b)), store, index,
-					              termVector) {OmitTermFreqAndPositions = fi.omitTermFreqAndPositions, OmitNorms = fi.omitNorms};
-				}
-				else
-				{
-					f = new Field(fi.name, false, fieldsStream.ReadString(), store, index, termVector)
-					    	{OmitTermFreqAndPositions = fi.omitTermFreqAndPositions, OmitNorms = fi.omitNorms};
-				}
+        private void AddField(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+        {
+            //we have a binary stored field, and it may be compressed
+            if (binary)
+            {
+                int toRead = fieldsStream.ReadVInt();
+                var b = new byte[toRead];
+                fieldsStream.ReadBytes(b, 0, b.Length);
+                doc.Add(compressed ? new Field(fi.name, Uncompress(b), Field.Store.YES) : new Field(fi.name, b, Field.Store.YES));
+            }
+            else
+            {
+                const Field.Store store = Field.Store.YES;
+                Field.Index index = FieldExtensions.ToIndex(fi.isIndexed, tokenize);
+                Field.TermVector termVector = FieldExtensions.ToTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
+                
+                AbstractField f;
+                if (compressed)
+                {
+                    int toRead = fieldsStream.ReadVInt();
+                    
+                    var b = new byte[toRead];
+                    fieldsStream.ReadBytes(b, 0, b.Length);
+                    f = new Field(fi.name, false, System.Text.Encoding.GetEncoding("UTF-8").GetString(Uncompress(b)), store, index,
+                                  termVector) {OmitTermFreqAndPositions = fi.omitTermFreqAndPositions, OmitNorms = fi.omitNorms};
+                }
+                else
+                {
+                    f = new Field(fi.name, false, fieldsStream.ReadString(), store, index, termVector)
+                            {OmitTermFreqAndPositions = fi.omitTermFreqAndPositions, OmitNorms = fi.omitNorms};
+                }
 
-				doc.Add(f);
-			}
-		}
-		
-		// Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes)
-		// Read just the size -- caller must skip the field content to continue reading fields
-		// Return the size in bytes or chars, depending on field type
-		private int AddFieldSize(Document doc, FieldInfo fi, bool binary, bool compressed)
-		{
-			int size = fieldsStream.ReadVInt(), bytesize = binary || compressed?size:2 * size;
-			var sizebytes = new byte[4];
-			sizebytes[0] = (byte) (Number.URShift(bytesize, 24));
-			sizebytes[1] = (byte) (Number.URShift(bytesize, 16));
-			sizebytes[2] = (byte) (Number.URShift(bytesize, 8));
-			sizebytes[3] = (byte) bytesize;
-			doc.Add(new Field(fi.name, sizebytes, Field.Store.YES));
-			return size;
-		}
-		
-		/// <summary> A Lazy implementation of Fieldable that differs loading of fields until asked for, instead of when the Document is
-		/// loaded.
-		/// </summary>
-		[Serializable]
-		private sealed class LazyField : AbstractField
-		{
-			private void  InitBlock(FieldsReader enclosingInstance)
-			{
-				this.Enclosing_Instance = enclosingInstance;
-			}
+                doc.Add(f);
+            }
+        }
+        
+        // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes)
+        // Read just the size -- caller must skip the field content to continue reading fields
+        // Return the size in bytes or chars, depending on field type
+        private int AddFieldSize(Document doc, FieldInfo fi, bool binary, bool compressed)
+        {
+            int size = fieldsStream.ReadVInt(), bytesize = binary || compressed?size:2 * size;
+            var sizebytes = new byte[4];
+            sizebytes[0] = (byte) (Number.URShift(bytesize, 24));
+            sizebytes[1] = (byte) (Number.URShift(bytesize, 16));
+            sizebytes[2] = (byte) (Number.URShift(bytesize, 8));
+            sizebytes[3] = (byte) bytesize;
+            doc.Add(new Field(fi.name, sizebytes, Field.Store.YES));
+            return size;
+        }
+        
+        /// <summary> A Lazy implementation of Fieldable that differs loading of fields until asked for, instead of when the Document is
+        /// loaded.
+        /// </summary>
+        [Serializable]
+        private sealed class LazyField : AbstractField
+        {
+            private void  InitBlock(FieldsReader enclosingInstance)
+            {
+                this.Enclosing_Instance = enclosingInstance;
+            }
 
-			private FieldsReader Enclosing_Instance { get; set; }
+            private FieldsReader Enclosing_Instance { get; set; }
 
-			private int toRead;
-			private long pointer;
+            private int toRead;
+            private long pointer;
             [Obsolete("Only kept for backward-compatbility with <3.0 indexes. Will be removed in 4.0.")]
-		    private readonly Boolean isCompressed;
-			
-			public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, int toRead, long pointer, bool isBinary, bool isCompressed):base(name, store, Field.Index.NO, Field.TermVector.NO)
-			{
-				InitBlock(enclosingInstance);
-				this.toRead = toRead;
-				this.pointer = pointer;
-				this.internalIsBinary = isBinary;
-				if (isBinary)
-					internalBinaryLength = toRead;
-				lazy = true;
-			    this.isCompressed = isCompressed;
-			}
-			
-			public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, Field.Index index, Field.TermVector termVector, int toRead, long pointer, bool isBinary, bool isCompressed):base(name, store, index, termVector)
-			{
-				InitBlock(enclosingInstance);
-				this.toRead = toRead;
-				this.pointer = pointer;
-				this.internalIsBinary = isBinary;
-				if (isBinary)
-					internalBinaryLength = toRead;
-				lazy = true;
-			    this.isCompressed = isCompressed;
-			}
-			
-			private IndexInput GetFieldStream()
-			{
-				IndexInput localFieldsStream = Enclosing_Instance.fieldsStreamTL.Get();
-				if (localFieldsStream == null)
-				{
-					localFieldsStream = (IndexInput) Enclosing_Instance.cloneableFieldsStream.Clone();
-					Enclosing_Instance.fieldsStreamTL.Set(localFieldsStream);
-				}
-				return localFieldsStream;
-			}
+            private readonly Boolean isCompressed;
+            
+            public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, int toRead, long pointer, bool isBinary, bool isCompressed):base(name, store, Field.Index.NO, Field.TermVector.NO)
+            {
+                InitBlock(enclosingInstance);
+                this.toRead = toRead;
+                this.pointer = pointer;
+                this.internalIsBinary = isBinary;
+                if (isBinary)
+                    internalBinaryLength = toRead;
+                lazy = true;
+                this.isCompressed = isCompressed;
+            }
+            
+            public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, Field.Index index, Field.TermVector termVector, int toRead, long pointer, bool isBinary, bool isCompressed):base(name, store, index, termVector)
+            {
+                InitBlock(enclosingInstance);
+                this.toRead = toRead;
+                this.pointer = pointer;
+                this.internalIsBinary = isBinary;
+                if (isBinary)
+                    internalBinaryLength = toRead;
+                lazy = true;
+                this.isCompressed = isCompressed;
+            }
+            
+            private IndexInput GetFieldStream()
+            {
+                IndexInput localFieldsStream = Enclosing_Instance.fieldsStreamTL.Get();
+                if (localFieldsStream == null)
+                {
+                    localFieldsStream = (IndexInput) Enclosing_Instance.cloneableFieldsStream.Clone();
+                    Enclosing_Instance.fieldsStreamTL.Set(localFieldsStream);
+                }
+                return localFieldsStream;
+            }
 
-		    /// <summary>The value of the field as a Reader, or null.  If null, the String value,
-		    /// binary value, or TokenStream value is used.  Exactly one of StringValue(), 
-		    /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
-		    /// </summary>
-		    public override TextReader ReaderValue
-		    {
-		        get
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            return null;
-		        }
-		    }
+            /// <summary>The value of the field as a Reader, or null.  If null, the String value,
+            /// binary value, or TokenStream value is used.  Exactly one of StringValue(), 
+            /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
+            /// </summary>
+            public override TextReader ReaderValue
+            {
+                get
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    return null;
+                }
+            }
 
-		    /// <summary>The value of the field as a TokenStream, or null.  If null, the Reader value,
-		    /// String value, or binary value is used. Exactly one of StringValue(), 
-		    /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
-		    /// </summary>
-		    public override TokenStream TokenStreamValue
-		    {
-		        get
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            return null;
-		        }
-		    }
+            /// <summary>The value of the field as a TokenStream, or null.  If null, the Reader value,
+            /// String value, or binary value is used. Exactly one of StringValue(), 
+            /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
+            /// </summary>
+            public override TokenStream TokenStreamValue
+            {
+                get
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    return null;
+                }
+            }
 
-		    /// <summary>The value of the field as a String, or null.  If null, the Reader value,
-		    /// binary value, or TokenStream value is used.  Exactly one of StringValue(), 
-		    /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
-		    /// </summary>
-		    public override string StringValue
-		    {
-		        get
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            if (internalIsBinary)
-		                return null;
+            /// <summary>The value of the field as a String, or null.  If null, the Reader value,
+            /// binary value, or TokenStream value is used.  Exactly one of StringValue(), 
+            /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set. 
+            /// </summary>
+            public override string StringValue
+            {
+                get
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    if (internalIsBinary)
+                        return null;
 
-		        	if (fieldsData == null)
-		        	{
-		        		IndexInput localFieldsStream = GetFieldStream();
-		        		try
-		        		{
-		        			localFieldsStream.Seek(pointer);
-		        			if (isCompressed)
-		        			{
-		        				var b = new byte[toRead];
-		        				localFieldsStream.ReadBytes(b, 0, b.Length);
-		        				fieldsData =
-		        					System.Text.Encoding.GetEncoding("UTF-8").GetString(Enclosing_Instance.Uncompress(b));
-		        			}
-		        			else
-		        			{
-		        				if (Enclosing_Instance.format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
-		        				{
-		        					var bytes = new byte[toRead];
-		        					localFieldsStream.ReadBytes(bytes, 0, toRead);
-		        					fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(bytes);
-		        				}
-		        				else
-		        				{
-		        					//read in chars b/c we already know the length we need to read
-		        					var chars = new char[toRead];
-		        					localFieldsStream.ReadChars(chars, 0, toRead);
-		        					fieldsData = new System.String(chars);
-		        				}
-		        			}
-		        		}
-		        		catch (System.IO.IOException e)
-		        		{
-		        			throw new FieldReaderException(e);
-		        		}
-		        	}
-		        	return (System.String) fieldsData;
-		        }
-		    }
+                    if (fieldsData == null)
+                    {
+                        IndexInput localFieldsStream = GetFieldStream();
+                        try
+                        {
+                            localFieldsStream.Seek(pointer);
+                            if (isCompressed)
+                            {
+                                var b = new byte[toRead];
+                                localFieldsStream.ReadBytes(b, 0, b.Length);
+                                fieldsData =
+                                    System.Text.Encoding.GetEncoding("UTF-8").GetString(Enclosing_Instance.Uncompress(b));
+                            }
+                            else
+                            {
+                                if (Enclosing_Instance.format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                                {
+                                    var bytes = new byte[toRead];
+                                    localFieldsStream.ReadBytes(bytes, 0, toRead);
+                                    fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(bytes);
+                                }
+                                else
+                                {
+                                    //read in chars b/c we already know the length we need to read
+                                    var chars = new char[toRead];
+                                    localFieldsStream.ReadChars(chars, 0, toRead);
+                                    fieldsData = new System.String(chars);
+                                }
+                            }
+                        }
+                        catch (System.IO.IOException e)
+                        {
+                            throw new FieldReaderException(e);
+                        }
+                    }
+                    return (System.String) fieldsData;
+                }
+            }
 
-		    public long Pointer
-		    {
-		        get
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            return pointer;
-		        }
-		        set
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            this.pointer = value;
-		        }
-		    }
+            public long Pointer
+            {
+                get
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    return pointer;
+                }
+                set
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    this.pointer = value;
+                }
+            }
 
-		    public int ToRead
-		    {
-		        get
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            return toRead;
-		        }
-		        set
-		        {
-		            Enclosing_Instance.EnsureOpen();
-		            this.toRead = value;
-		        }
-		    }
+            public int ToRead
+            {
+                get
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    return toRead;
+                }
+                set
+                {
+                    Enclosing_Instance.EnsureOpen();
+                    this.toRead = value;
+                }
+            }
 
-		    public override byte[] GetBinaryValue(byte[] result)
-			{
-				Enclosing_Instance.EnsureOpen();
-				
-				if (internalIsBinary)
-				{
-					if (fieldsData == null)
-					{
-						// Allocate new buffer if result is null or too small
-						byte[] b;
-						if (result == null || result.Length < toRead)
-							b = new byte[toRead];
-						else
-							b = result;
-						
-						IndexInput localFieldsStream = GetFieldStream();
-						
-						// Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people
-						// since they are already handling this exception when getting the document
-						try
-						{
-							localFieldsStream.Seek(pointer);
-							localFieldsStream.ReadBytes(b, 0, toRead);
-							fieldsData = isCompressed ? Enclosing_Instance.Uncompress(b) : b;
-						}
-						catch (IOException e)
-						{
-							throw new FieldReaderException(e);
-						}
-						
-						internalbinaryOffset = 0;
-						internalBinaryLength = toRead;
-					}
-					
-					return (byte[]) fieldsData;
-				}
-		    	return null;
-			}
-		}
-		
-		private byte[] Uncompress(byte[] b)
-		{
-			try
-			{
-				return CompressionTools.Decompress(b);
-			}
-			catch (Exception e)
-			{
-				// this will happen if the field is not compressed
-				throw new CorruptIndexException("field data are in wrong format: " + e, e);
-			}
-		}
-	}
+            public override byte[] GetBinaryValue(byte[] result)
+            {
+                Enclosing_Instance.EnsureOpen();
+                
+                if (internalIsBinary)
+                {
+                    if (fieldsData == null)
+                    {
+                        // Allocate new buffer if result is null or too small
+                        byte[] b;
+                        if (result == null || result.Length < toRead)
+                            b = new byte[toRead];
+                        else
+                            b = result;
+                        
+                        IndexInput localFieldsStream = GetFieldStream();
+                        
+                        // Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people
+                        // since they are already handling this exception when getting the document
+                        try
+                        {
+                            localFieldsStream.Seek(pointer);
+                            localFieldsStream.ReadBytes(b, 0, toRead);
+                            fieldsData = isCompressed ? Enclosing_Instance.Uncompress(b) : b;
+                        }
+                        catch (IOException e)
+                        {
+                            throw new FieldReaderException(e);
+                        }
+                        
+                        internalbinaryOffset = 0;
+                        internalBinaryLength = toRead;
+                    }
+                    
+                    return (byte[]) fieldsData;
+                }
+                return null;
+            }
+        }
+        
+        private byte[] Uncompress(byte[] b)
+        {
+            try
+            {
+                return CompressionTools.Decompress(b);
+            }
+            catch (Exception e)
+            {
+                // this will happen if the field is not compressed
+                throw new CorruptIndexException("field data are in wrong format: " + e, e);
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldsWriter.cs b/src/core/Index/FieldsWriter.cs
index 9244195..d34a662 100644
--- a/src/core/Index/FieldsWriter.cs
+++ b/src/core/Index/FieldsWriter.cs
@@ -26,265 +26,265 @@ using RAMOutputStream = Lucene.Net.Store.RAMOutputStream;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class FieldsWriter : IDisposable
-	{
-		internal const byte FIELD_IS_TOKENIZED = (0x1);
-		internal const byte FIELD_IS_BINARY = (0x2);
+    
+    sealed class FieldsWriter : IDisposable
+    {
+        internal const byte FIELD_IS_TOKENIZED = (0x1);
+        internal const byte FIELD_IS_BINARY = (0x2);
         [Obsolete("Kept for backwards-compatibility with <3.0 indexes; will be removed in 4.0")]
-		internal const byte FIELD_IS_COMPRESSED = (0x4);
-		
-		// Original format
-		internal const int FORMAT = 0;
-		
-		// Changed strings to UTF8
-		internal const int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = 1;
+        internal const byte FIELD_IS_COMPRESSED = (0x4);
+        
+        // Original format
+        internal const int FORMAT = 0;
+        
+        // Changed strings to UTF8
+        internal const int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = 1;
                  
         // Lucene 3.0: Removal of compressed fields
         internal static int FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS = 2;
-		
-		// NOTE: if you introduce a new format, make it 1 higher
-		// than the current one, and always change this if you
-		// switch to a new format!
+        
+        // NOTE: if you introduce a new format, make it 1 higher
+        // than the current one, and always change this if you
+        // switch to a new format!
         internal static readonly int FORMAT_CURRENT = FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
-		
-		private readonly FieldInfos fieldInfos;
-		
-		private IndexOutput fieldsStream;
-		
-		private IndexOutput indexStream;
-		
-		private readonly bool doClose;
-		
-		internal FieldsWriter(Directory d, System.String segment, FieldInfos fn)
-		{
-			fieldInfos = fn;
-			
-			bool success = false;
-			String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION;
-			try
-			{
-				fieldsStream = d.CreateOutput(fieldsName);
-				fieldsStream.WriteInt(FORMAT_CURRENT);
-				success = true;
-			}
-			finally
-			{
-				if (!success)
-				{
-					try
-					{
-						Dispose();
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-					try
-					{
-						d.DeleteFile(fieldsName);
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-				}
-			}
-			
-			success = false;
-			String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
-			try
-			{
-				indexStream = d.CreateOutput(indexName);
-				indexStream.WriteInt(FORMAT_CURRENT);
-				success = true;
-			}
-			finally
-			{
-				if (!success)
-				{
-					try
-					{
-						Dispose();
-					}
-					catch (System.IO.IOException)
-					{
-					}
-					try
-					{
-						d.DeleteFile(fieldsName);
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-					try
-					{
-						d.DeleteFile(indexName);
-					}
-					catch (System.Exception)
-					{
-						// Suppress so we keep throwing the original exception
-					}
-				}
-			}
-			
-			doClose = true;
-		}
-		
-		internal FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn)
-		{
-			fieldInfos = fn;
-			fieldsStream = fdt;
-			indexStream = fdx;
-			doClose = false;
-		}
-		
-		internal void  SetFieldsStream(IndexOutput stream)
-		{
-			this.fieldsStream = stream;
-		}
-		
-		// Writes the contents of buffer into the fields stream
-		// and adds a new entry for this document into the index
-		// stream.  This assumes the buffer was already written
-		// in the correct fields format.
-		internal void  FlushDocument(int numStoredFields, RAMOutputStream buffer)
-		{
-			indexStream.WriteLong(fieldsStream.FilePointer);
-			fieldsStream.WriteVInt(numStoredFields);
-			buffer.WriteTo(fieldsStream);
-		}
-		
-		internal void  SkipDocument()
-		{
-			indexStream.WriteLong(fieldsStream.FilePointer);
-			fieldsStream.WriteVInt(0);
-		}
-		
-		internal void  Flush()
-		{
-			indexStream.Flush();
-			fieldsStream.Flush();
-		}
-		
-		public void Dispose()
-		{
+        
+        private readonly FieldInfos fieldInfos;
+        
+        private IndexOutput fieldsStream;
+        
+        private IndexOutput indexStream;
+        
+        private readonly bool doClose;
+        
+        internal FieldsWriter(Directory d, System.String segment, FieldInfos fn)
+        {
+            fieldInfos = fn;
+            
+            bool success = false;
+            String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION;
+            try
+            {
+                fieldsStream = d.CreateOutput(fieldsName);
+                fieldsStream.WriteInt(FORMAT_CURRENT);
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    try
+                    {
+                        Dispose();
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                    try
+                    {
+                        d.DeleteFile(fieldsName);
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                }
+            }
+            
+            success = false;
+            String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
+            try
+            {
+                indexStream = d.CreateOutput(indexName);
+                indexStream.WriteInt(FORMAT_CURRENT);
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    try
+                    {
+                        Dispose();
+                    }
+                    catch (System.IO.IOException)
+                    {
+                    }
+                    try
+                    {
+                        d.DeleteFile(fieldsName);
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                    try
+                    {
+                        d.DeleteFile(indexName);
+                    }
+                    catch (System.Exception)
+                    {
+                        // Suppress so we keep throwing the original exception
+                    }
+                }
+            }
+            
+            doClose = true;
+        }
+        
+        internal FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn)
+        {
+            fieldInfos = fn;
+            fieldsStream = fdt;
+            indexStream = fdx;
+            doClose = false;
+        }
+        
+        internal void  SetFieldsStream(IndexOutput stream)
+        {
+            this.fieldsStream = stream;
+        }
+        
+        // Writes the contents of buffer into the fields stream
+        // and adds a new entry for this document into the index
+        // stream.  This assumes the buffer was already written
+        // in the correct fields format.
+        internal void  FlushDocument(int numStoredFields, RAMOutputStream buffer)
+        {
+            indexStream.WriteLong(fieldsStream.FilePointer);
+            fieldsStream.WriteVInt(numStoredFields);
+            buffer.WriteTo(fieldsStream);
+        }
+        
+        internal void  SkipDocument()
+        {
+            indexStream.WriteLong(fieldsStream.FilePointer);
+            fieldsStream.WriteVInt(0);
+        }
+        
+        internal void  Flush()
+        {
+            indexStream.Flush();
+            fieldsStream.Flush();
+        }
+        
+        public void Dispose()
+        {
             // Move to protected method if class becomes unsealed
-			if (doClose)
-			{
-				try
-				{
-					if (fieldsStream != null)
-					{
-						try
-						{
-							fieldsStream.Close();
-						}
-						finally
-						{
-							fieldsStream = null;
-						}
-					}
-				}
-				catch (System.IO.IOException)
-				{
-					try
-					{
-						if (indexStream != null)
-						{
-							try
-							{
-								indexStream.Close();
-							}
-							finally
-							{
-								indexStream = null;
-							}
-						}
-					}
-					catch (System.IO.IOException)
-					{
-						// Ignore so we throw only first IOException hit
-					}
-					throw;
-				}
-				finally
-				{
-					if (indexStream != null)
-					{
-						try
-						{
-							indexStream.Close();
-						}
-						finally
-						{
-							indexStream = null;
-						}
-					}
-				}
-			}
-		}
-		
-		internal void  WriteField(FieldInfo fi, IFieldable field)
-		{
-			fieldsStream.WriteVInt(fi.number);
-			byte bits = 0;
-			if (field.IsTokenized)
-				bits |= FieldsWriter.FIELD_IS_TOKENIZED;
-			if (field.IsBinary)
-				bits |= FieldsWriter.FIELD_IS_BINARY;
-			
-			fieldsStream.WriteByte(bits);
-			
-			// compression is disabled for the current field
-			if (field.IsBinary)
-			{
-				byte[] data = field.GetBinaryValue();
-				int len = field.BinaryLength;
-				int offset = field.BinaryOffset;
-					
-				fieldsStream.WriteVInt(len);
-				fieldsStream.WriteBytes(data, offset, len);
-			}
-			else
-			{
-				fieldsStream.WriteString(field.StringValue);
-			}
-		}
-		
-		/// <summary>Bulk write a contiguous series of documents.  The
-		/// lengths array is the length (in bytes) of each raw
-		/// document.  The stream IndexInput is the
-		/// fieldsStream from which we should bulk-copy all
-		/// bytes. 
-		/// </summary>
-		internal void  AddRawDocuments(IndexInput stream, int[] lengths, int numDocs)
-		{
-			long position = fieldsStream.FilePointer;
-			long start = position;
-			for (int i = 0; i < numDocs; i++)
-			{
-				indexStream.WriteLong(position);
-				position += lengths[i];
-			}
-			fieldsStream.CopyBytes(stream, position - start);
-			System.Diagnostics.Debug.Assert(fieldsStream.FilePointer == position);
-		}
-		
-		internal void  AddDocument(Document doc)
-		{
-			indexStream.WriteLong(fieldsStream.FilePointer);
+            if (doClose)
+            {
+                try
+                {
+                    if (fieldsStream != null)
+                    {
+                        try
+                        {
+                            fieldsStream.Close();
+                        }
+                        finally
+                        {
+                            fieldsStream = null;
+                        }
+                    }
+                }
+                catch (System.IO.IOException)
+                {
+                    try
+                    {
+                        if (indexStream != null)
+                        {
+                            try
+                            {
+                                indexStream.Close();
+                            }
+                            finally
+                            {
+                                indexStream = null;
+                            }
+                        }
+                    }
+                    catch (System.IO.IOException)
+                    {
+                        // Ignore so we throw only first IOException hit
+                    }
+                    throw;
+                }
+                finally
+                {
+                    if (indexStream != null)
+                    {
+                        try
+                        {
+                            indexStream.Close();
+                        }
+                        finally
+                        {
+                            indexStream = null;
+                        }
+                    }
+                }
+            }
+        }
+        
+        internal void  WriteField(FieldInfo fi, IFieldable field)
+        {
+            fieldsStream.WriteVInt(fi.number);
+            byte bits = 0;
+            if (field.IsTokenized)
+                bits |= FieldsWriter.FIELD_IS_TOKENIZED;
+            if (field.IsBinary)
+                bits |= FieldsWriter.FIELD_IS_BINARY;
+            
+            fieldsStream.WriteByte(bits);
+            
+            // compression is disabled for the current field
+            if (field.IsBinary)
+            {
+                byte[] data = field.GetBinaryValue();
+                int len = field.BinaryLength;
+                int offset = field.BinaryOffset;
+                    
+                fieldsStream.WriteVInt(len);
+                fieldsStream.WriteBytes(data, offset, len);
+            }
+            else
+            {
+                fieldsStream.WriteString(field.StringValue);
+            }
+        }
+        
+        /// <summary>Bulk write a contiguous series of documents.  The
+        /// lengths array is the length (in bytes) of each raw
+        /// document.  The stream IndexInput is the
+        /// fieldsStream from which we should bulk-copy all
+        /// bytes. 
+        /// </summary>
+        internal void  AddRawDocuments(IndexInput stream, int[] lengths, int numDocs)
+        {
+            long position = fieldsStream.FilePointer;
+            long start = position;
+            for (int i = 0; i < numDocs; i++)
+            {
+                indexStream.WriteLong(position);
+                position += lengths[i];
+            }
+            fieldsStream.CopyBytes(stream, position - start);
+            System.Diagnostics.Debug.Assert(fieldsStream.FilePointer == position);
+        }
+        
+        internal void  AddDocument(Document doc)
+        {
+            indexStream.WriteLong(fieldsStream.FilePointer);
 
-			System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
-			int storedCount = fields.Count(field => field.IsStored);
-			fieldsStream.WriteVInt(storedCount);
-			
-			foreach(IFieldable field in fields)
-			{
-				if (field.IsStored)
-					WriteField(fieldInfos.FieldInfo(field.Name), field);
-			}
-		}
-	}
+            System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
+            int storedCount = fields.Count(field => field.IsStored);
+            fieldsStream.WriteVInt(storedCount);
+            
+            foreach(IFieldable field in fields)
+            {
+                if (field.IsStored)
+                    WriteField(fieldInfos.FieldInfo(field.Name), field);
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FilterIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FilterIndexReader.cs b/src/core/Index/FilterIndexReader.cs
index dc61613..ced4220 100644
--- a/src/core/Index/FilterIndexReader.cs
+++ b/src/core/Index/FilterIndexReader.cs
@@ -23,37 +23,37 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>A <c>FilterIndexReader</c> contains another IndexReader, which it
-	/// uses as its basic source of data, possibly transforming the data along the
-	/// way or providing additional functionality. The class
-	/// <c>FilterIndexReader</c> itself simply implements all abstract methods
-	/// of <c>IndexReader</c> with versions that pass all requests to the
-	/// contained index reader. Subclasses of <c>FilterIndexReader</c> may
-	/// further override some of these methods and may also provide additional
-	/// methods and fields.
-	/// </summary>
-	public class FilterIndexReader:IndexReader
-	{
+    
+    /// <summary>A <c>FilterIndexReader</c> contains another IndexReader, which it
+    /// uses as its basic source of data, possibly transforming the data along the
+    /// way or providing additional functionality. The class
+    /// <c>FilterIndexReader</c> itself simply implements all abstract methods
+    /// of <c>IndexReader</c> with versions that pass all requests to the
+    /// contained index reader. Subclasses of <c>FilterIndexReader</c> may
+    /// further override some of these methods and may also provide additional
+    /// methods and fields.
+    /// </summary>
+    public class FilterIndexReader:IndexReader
+    {
 
         /// <summary>Base class for filtering <see cref="Lucene.Net.Index.TermDocs" /> implementations. </summary>
-		public class FilterTermDocs : TermDocs
-		{
-			protected internal TermDocs in_Renamed;
-			
-			public FilterTermDocs(TermDocs in_Renamed)
-			{
-				this.in_Renamed = in_Renamed;
-			}
-			
-			public virtual void  Seek(Term term)
-			{
-				in_Renamed.Seek(term);
-			}
-			public virtual void  Seek(TermEnum termEnum)
-			{
-				in_Renamed.Seek(termEnum);
-			}
+        public class FilterTermDocs : TermDocs
+        {
+            protected internal TermDocs in_Renamed;
+            
+            public FilterTermDocs(TermDocs in_Renamed)
+            {
+                this.in_Renamed = in_Renamed;
+            }
+            
+            public virtual void  Seek(Term term)
+            {
+                in_Renamed.Seek(term);
+            }
+            public virtual void  Seek(TermEnum termEnum)
+            {
+                in_Renamed.Seek(termEnum);
+            }
 
             public virtual int Doc
             {
@@ -66,22 +66,22 @@ namespace Lucene.Net.Index
             }
 
             public virtual bool Next()
-			{
-				return in_Renamed.Next();
-			}
-			public virtual int Read(int[] docs, int[] freqs)
-			{
-				return in_Renamed.Read(docs, freqs);
-			}
-			public virtual bool SkipTo(int i)
-			{
-				return in_Renamed.SkipTo(i);
-			}
+            {
+                return in_Renamed.Next();
+            }
+            public virtual int Read(int[] docs, int[] freqs)
+            {
+                return in_Renamed.Read(docs, freqs);
+            }
+            public virtual bool SkipTo(int i)
+            {
+                return in_Renamed.SkipTo(i);
+            }
 
-			public void Close()
-			{
-				Dispose();
-			}
+            public void Close()
+            {
+                Dispose();
+            }
 
             public void Dispose()
             {
@@ -95,64 +95,64 @@ namespace Lucene.Net.Index
                     in_Renamed.Close();
                 }
             }
-		}
-		
-		/// <summary>Base class for filtering <see cref="TermPositions" /> implementations. </summary>
-		public class FilterTermPositions:FilterTermDocs, TermPositions
-		{
-			
-			public FilterTermPositions(TermPositions in_Renamed):base(in_Renamed)
-			{
-			}
-			
-			public virtual int NextPosition()
-			{
-				return ((TermPositions) this.in_Renamed).NextPosition();
-			}
+        }
+        
+        /// <summary>Base class for filtering <see cref="TermPositions" /> implementations. </summary>
+        public class FilterTermPositions:FilterTermDocs, TermPositions
+        {
+            
+            public FilterTermPositions(TermPositions in_Renamed):base(in_Renamed)
+            {
+            }
+            
+            public virtual int NextPosition()
+            {
+                return ((TermPositions) this.in_Renamed).NextPosition();
+            }
 
-		    public virtual int PayloadLength
-		    {
-		        get { return ((TermPositions) this.in_Renamed).PayloadLength; }
-		    }
+            public virtual int PayloadLength
+            {
+                get { return ((TermPositions) this.in_Renamed).PayloadLength; }
+            }
 
-		    public virtual byte[] GetPayload(byte[] data, int offset)
-			{
-				return ((TermPositions) this.in_Renamed).GetPayload(data, offset);
-			}
-			
-			
-			// TODO: Remove warning after API has been finalized
+            public virtual byte[] GetPayload(byte[] data, int offset)
+            {
+                return ((TermPositions) this.in_Renamed).GetPayload(data, offset);
+            }
+            
+            
+            // TODO: Remove warning after API has been finalized
 
-		    public virtual bool IsPayloadAvailable
-		    {
-		        get { return ((TermPositions) this.in_Renamed).IsPayloadAvailable; }
-		    }
-		}
-		
-		/// <summary>Base class for filtering <see cref="TermEnum" /> implementations. </summary>
-		public class FilterTermEnum:TermEnum
-		{
-			protected internal TermEnum in_Renamed;
-			
-			public FilterTermEnum(TermEnum in_Renamed)
-			{
-				this.in_Renamed = in_Renamed;
-			}
-			
-			public override bool Next()
-			{
-				return in_Renamed.Next();
-			}
+            public virtual bool IsPayloadAvailable
+            {
+                get { return ((TermPositions) this.in_Renamed).IsPayloadAvailable; }
+            }
+        }
+        
+        /// <summary>Base class for filtering <see cref="TermEnum" /> implementations. </summary>
+        public class FilterTermEnum:TermEnum
+        {
+            protected internal TermEnum in_Renamed;
+            
+            public FilterTermEnum(TermEnum in_Renamed)
+            {
+                this.in_Renamed = in_Renamed;
+            }
+            
+            public override bool Next()
+            {
+                return in_Renamed.Next();
+            }
 
-		    public override Term Term
-		    {
-		        get { return in_Renamed.Term; }
-		    }
+            public override Term Term
+            {
+                get { return in_Renamed.Term; }
+            }
 
-		    public override int DocFreq()
-			{
-				return in_Renamed.DocFreq();
-			}
+            public override int DocFreq()
+            {
+                return in_Renamed.DocFreq();
+            }
 
             protected override void Dispose(bool disposing)
             {
@@ -161,228 +161,228 @@ namespace Lucene.Net.Index
                     in_Renamed.Close();
                 }
             }
-		}
-		
-		protected internal IndexReader in_Renamed;
-		
-		/// <summary> <p/>Construct a FilterIndexReader based on the specified base reader.
-		/// Directory locking for delete, undeleteAll, and setNorm operations is
-		/// left to the base reader.<p/>
-		/// <p/>Note that base reader is closed if this FilterIndexReader is closed.<p/>
-		/// </summary>
-		///  <param name="in_Renamed">specified base reader.
-		/// </param>
-		public FilterIndexReader(IndexReader in_Renamed):base()
-		{
-			this.in_Renamed = in_Renamed;
-		}
-		
-		public override Directory Directory()
-		{
-			return in_Renamed.Directory();
-		}
-		
-		public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
-		{
-			EnsureOpen();
-			return in_Renamed.GetTermFreqVectors(docNumber);
-		}
-		
-		public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
-		{
-			EnsureOpen();
-			return in_Renamed.GetTermFreqVector(docNumber, field);
-		}
-		
-		
-		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			in_Renamed.GetTermFreqVector(docNumber, field, mapper);
-		}
-		
-		public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			in_Renamed.GetTermFreqVector(docNumber, mapper);
-		}
+        }
+        
+        protected internal IndexReader in_Renamed;
+        
+        /// <summary> <p/>Construct a FilterIndexReader based on the specified base reader.
+        /// Directory locking for delete, undeleteAll, and setNorm operations is
+        /// left to the base reader.<p/>
+        /// <p/>Note that base reader is closed if this FilterIndexReader is closed.<p/>
+        /// </summary>
+        ///  <param name="in_Renamed">specified base reader.
+        /// </param>
+        public FilterIndexReader(IndexReader in_Renamed):base()
+        {
+            this.in_Renamed = in_Renamed;
+        }
+        
+        public override Directory Directory()
+        {
+            return in_Renamed.Directory();
+        }
+        
+        public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
+        {
+            EnsureOpen();
+            return in_Renamed.GetTermFreqVectors(docNumber);
+        }
+        
+        public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
+        {
+            EnsureOpen();
+            return in_Renamed.GetTermFreqVector(docNumber, field);
+        }
+        
+        
+        public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            in_Renamed.GetTermFreqVector(docNumber, field, mapper);
+        }
+        
+        public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            in_Renamed.GetTermFreqVector(docNumber, mapper);
+        }
 
-	    public override int NumDocs()
-	    {
-	        // Don't call ensureOpen() here (it could affect performance)
-	        return in_Renamed.NumDocs();
-	    }
+        public override int NumDocs()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            return in_Renamed.NumDocs();
+        }
 
-	    public override int MaxDoc
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return in_Renamed.MaxDoc;
-	        }
-	    }
+        public override int MaxDoc
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return in_Renamed.MaxDoc;
+            }
+        }
 
-	    public override Document Document(int n, FieldSelector fieldSelector)
-		{
-			EnsureOpen();
-			return in_Renamed.Document(n, fieldSelector);
-		}
-		
-		public override bool IsDeleted(int n)
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			return in_Renamed.IsDeleted(n);
-		}
+        public override Document Document(int n, FieldSelector fieldSelector)
+        {
+            EnsureOpen();
+            return in_Renamed.Document(n, fieldSelector);
+        }
+        
+        public override bool IsDeleted(int n)
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            return in_Renamed.IsDeleted(n);
+        }
 
-	    public override bool HasDeletions
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return in_Renamed.HasDeletions;
-	        }
-	    }
+        public override bool HasDeletions
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return in_Renamed.HasDeletions;
+            }
+        }
 
-	    protected internal override void  DoUndeleteAll()
-		{
-			in_Renamed.UndeleteAll();
-		}
-		
-		public override bool HasNorms(System.String field)
-		{
-			EnsureOpen();
-			return in_Renamed.HasNorms(field);
-		}
-		
-		public override byte[] Norms(System.String f)
-		{
-			EnsureOpen();
-			return in_Renamed.Norms(f);
-		}
-		
-		public override void  Norms(System.String f, byte[] bytes, int offset)
-		{
-			EnsureOpen();
-			in_Renamed.Norms(f, bytes, offset);
-		}
-		
-		protected internal override void  DoSetNorm(int d, System.String f, byte b)
-		{
-			in_Renamed.SetNorm(d, f, b);
-		}
-		
-		public override TermEnum Terms()
-		{
-			EnsureOpen();
-			return in_Renamed.Terms();
-		}
-		
-		public override TermEnum Terms(Term t)
-		{
-			EnsureOpen();
-			return in_Renamed.Terms(t);
-		}
-		
-		public override int DocFreq(Term t)
-		{
-			EnsureOpen();
-			return in_Renamed.DocFreq(t);
-		}
-		
-		public override TermDocs TermDocs()
-		{
-			EnsureOpen();
-			return in_Renamed.TermDocs();
-		}
-		
-		public override TermDocs TermDocs(Term term)
-		{
-			EnsureOpen();
-			return in_Renamed.TermDocs(term);
-		}
-		
-		public override TermPositions TermPositions()
-		{
-			EnsureOpen();
-			return in_Renamed.TermPositions();
-		}
-		
-		protected internal override void  DoDelete(int n)
-		{
-			in_Renamed.DeleteDocument(n);
-		}
+        protected internal override void  DoUndeleteAll()
+        {
+            in_Renamed.UndeleteAll();
+        }
+        
+        public override bool HasNorms(System.String field)
+        {
+            EnsureOpen();
+            return in_Renamed.HasNorms(field);
+        }
+        
+        public override byte[] Norms(System.String f)
+        {
+            EnsureOpen();
+            return in_Renamed.Norms(f);
+        }
+        
+        public override void  Norms(System.String f, byte[] bytes, int offset)
+        {
+            EnsureOpen();
+            in_Renamed.Norms(f, bytes, offset);
+        }
+        
+        protected internal override void  DoSetNorm(int d, System.String f, byte b)
+        {
+            in_Renamed.SetNorm(d, f, b);
+        }
+        
+        public override TermEnum Terms()
+        {
+            EnsureOpen();
+            return in_Renamed.Terms();
+        }
+        
+        public override TermEnum Terms(Term t)
+        {
+            EnsureOpen();
+            return in_Renamed.Terms(t);
+        }
+        
+        public override int DocFreq(Term t)
+        {
+            EnsureOpen();
+            return in_Renamed.DocFreq(t);
+        }
+        
+        public override TermDocs TermDocs()
+        {
+            EnsureOpen();
+            return in_Renamed.TermDocs();
+        }
+        
+        public override TermDocs TermDocs(Term term)
+        {
+            EnsureOpen();
+            return in_Renamed.TermDocs(term);
+        }
+        
+        public override TermPositions TermPositions()
+        {
+            EnsureOpen();
+            return in_Renamed.TermPositions();
+        }
+        
+        protected internal override void  DoDelete(int n)
+        {
+            in_Renamed.DeleteDocument(n);
+        }
 
         protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
-		{
-			in_Renamed.Commit(commitUserData);
-		}
-		
-		protected internal override void  DoClose()
-		{
-			in_Renamed.Close();
+        {
+            in_Renamed.Commit(commitUserData);
+        }
+        
+        protected internal override void  DoClose()
+        {
+            in_Renamed.Close();
             // NOTE: only needed in case someone had asked for
             // FieldCache for top-level reader (which is generally
             // not a good idea):
             Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
-		}
+        }
 
 
         public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
-		{
-			EnsureOpen();
-			return in_Renamed.GetFieldNames(fieldNames);
-		}
+        {
+            EnsureOpen();
+            return in_Renamed.GetFieldNames(fieldNames);
+        }
 
-	    public override long Version
-	    {
-	        get
-	        {
-	            EnsureOpen();
-	            return in_Renamed.Version;
-	        }
-	    }
+        public override long Version
+        {
+            get
+            {
+                EnsureOpen();
+                return in_Renamed.Version;
+            }
+        }
 
-	    public override bool IsCurrent()
-	    {
-	        EnsureOpen();
-	        return in_Renamed.IsCurrent();
-	    }
+        public override bool IsCurrent()
+        {
+            EnsureOpen();
+            return in_Renamed.IsCurrent();
+        }
 
-	    public override bool IsOptimized()
-	    {
-	        EnsureOpen();
-	        return in_Renamed.IsOptimized();
-	    }
+        public override bool IsOptimized()
+        {
+            EnsureOpen();
+            return in_Renamed.IsOptimized();
+        }
 
-	    public override IndexReader[] GetSequentialSubReaders()
-	    {
-	        return in_Renamed.GetSequentialSubReaders();
-	    }
+        public override IndexReader[] GetSequentialSubReaders()
+        {
+            return in_Renamed.GetSequentialSubReaders();
+        }
 
-	    override public System.Object Clone()
-		{
+        override public System.Object Clone()
+        {
             System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this FilterIndexReader.Clone()"); // {{Aroush-2.9}}
-			return null;
-		}
+            return null;
+        }
 
-	    /// <summary>
-	    /// If the subclass of FilteredIndexReader modifies the
-	    /// contents of the FieldCache, you must override this
-	    /// method to provide a different key */
-	    ///</summary>
-	    public override object FieldCacheKey
-	    {
-	        get { return in_Renamed.FieldCacheKey; }
-	    }
+        /// <summary>
+        /// If the subclass of FilteredIndexReader modifies the
+        /// contents of the FieldCache, you must override this
+        /// method to provide a different key */
+        ///</summary>
+        public override object FieldCacheKey
+        {
+            get { return in_Renamed.FieldCacheKey; }
+        }
 
-	    /// <summary>
-	    /// If the subclass of FilteredIndexReader modifies the
-	    /// deleted docs, you must override this method to provide
-	    /// a different key */
-	    /// </summary>
-	    public override object DeletesCacheKey
-	    {
-	        get { return in_Renamed.DeletesCacheKey; }
-	    }
-	}
+        /// <summary>
+        /// If the subclass of FilteredIndexReader modifies the
+        /// deleted docs, you must override this method to provide
+        /// a different key */
+        /// </summary>
+        public override object DeletesCacheKey
+        {
+            get { return in_Renamed.DeletesCacheKey; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsDocsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsDocsConsumer.cs b/src/core/Index/FormatPostingsDocsConsumer.cs
index 29c0558..74efb0d 100644
--- a/src/core/Index/FormatPostingsDocsConsumer.cs
+++ b/src/core/Index/FormatPostingsDocsConsumer.cs
@@ -19,18 +19,18 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> NOTE: this API is experimental and will likely change</summary>
-	
-	abstract class FormatPostingsDocsConsumer
-	{
-		
-		/// <summary>Adds a new doc in this term.  If this returns null
-		/// then we just skip consuming positions/payloads. 
-		/// </summary>
-		internal abstract FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq);
-		
-		/// <summary>Called when we are done adding docs to this term </summary>
-		internal abstract void  Finish();
-	}
+    
+    /// <summary> NOTE: this API is experimental and will likely change</summary>
+    
+    abstract class FormatPostingsDocsConsumer
+    {
+        
+        /// <summary>Adds a new doc in this term.  If this returns null
+        /// then we just skip consuming positions/payloads. 
+        /// </summary>
+        internal abstract FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq);
+        
+        /// <summary>Called when we are done adding docs to this term </summary>
+        internal abstract void  Finish();
+    }
 }
\ No newline at end of file


[03/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentMerger.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentMerger.cs b/src/core/Index/SegmentMerger.cs
index 0ab159d..f309097 100644
--- a/src/core/Index/SegmentMerger.cs
+++ b/src/core/Index/SegmentMerger.cs
@@ -28,243 +28,243 @@ using IndexOutput = Lucene.Net.Store.IndexOutput;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> The SegmentMerger class combines two or more Segments, represented by an IndexReader (<see cref="Add" />,
-	/// into a single Segment.  After adding the appropriate readers, call the merge method to combine the 
-	/// segments.
-	/// <p/> 
-	/// If the compoundFile flag is set, then the segments will be merged into a compound file.
-	/// 
-	/// 
-	/// </summary>
-	/// <seealso cref="Merge()">
-	/// </seealso>
-	/// <seealso cref="Add">
-	/// </seealso>
-	public sealed class SegmentMerger
-	{
-		private class AnonymousClassCheckAbort:CheckAbort
-		{
-			private void  InitBlock(SegmentMerger enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private SegmentMerger enclosingInstance;
-			public SegmentMerger Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal AnonymousClassCheckAbort(SegmentMerger enclosingInstance, Lucene.Net.Index.MergePolicy.OneMerge Param1, Lucene.Net.Store.Directory Param2):base(Param1, Param2)
-			{
-				InitBlock(enclosingInstance);
-			}
-			public override void  Work(double units)
-			{
-				// do nothing
-			}
-		}
-		private class AnonymousClassCheckAbort1:CheckAbort
-		{
-			private void  InitBlock(SegmentMerger enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private SegmentMerger enclosingInstance;
-			public SegmentMerger Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal AnonymousClassCheckAbort1(SegmentMerger enclosingInstance, Lucene.Net.Index.MergePolicy.OneMerge Param1, Lucene.Net.Store.Directory Param2):base(Param1, Param2)
-			{
-				InitBlock(enclosingInstance);
-			}
-			public override void  Work(double units)
-			{
-				// do nothing
-			}
-		}
+    
+    /// <summary> The SegmentMerger class combines two or more Segments, represented by an IndexReader (<see cref="Add" />,
+    /// into a single Segment.  After adding the appropriate readers, call the merge method to combine the 
+    /// segments.
+    /// <p/> 
+    /// If the compoundFile flag is set, then the segments will be merged into a compound file.
+    /// 
+    /// 
+    /// </summary>
+    /// <seealso cref="Merge()">
+    /// </seealso>
+    /// <seealso cref="Add">
+    /// </seealso>
+    public sealed class SegmentMerger
+    {
+        private class AnonymousClassCheckAbort:CheckAbort
+        {
+            private void  InitBlock(SegmentMerger enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private SegmentMerger enclosingInstance;
+            public SegmentMerger Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal AnonymousClassCheckAbort(SegmentMerger enclosingInstance, Lucene.Net.Index.MergePolicy.OneMerge Param1, Lucene.Net.Store.Directory Param2):base(Param1, Param2)
+            {
+                InitBlock(enclosingInstance);
+            }
+            public override void  Work(double units)
+            {
+                // do nothing
+            }
+        }
+        private class AnonymousClassCheckAbort1:CheckAbort
+        {
+            private void  InitBlock(SegmentMerger enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private SegmentMerger enclosingInstance;
+            public SegmentMerger Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal AnonymousClassCheckAbort1(SegmentMerger enclosingInstance, Lucene.Net.Index.MergePolicy.OneMerge Param1, Lucene.Net.Store.Directory Param2):base(Param1, Param2)
+            {
+                InitBlock(enclosingInstance);
+            }
+            public override void  Work(double units)
+            {
+                // do nothing
+            }
+        }
 
-		private void  InitBlock()
-		{
-			termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
-		}
-		
-		/// <summary>norms header placeholder </summary>
-		internal static readonly byte[] NORMS_HEADER = new byte[]{(byte) 'N', (byte) 'R', (byte) 'M', unchecked((byte) - 1)};
-		
-		private Directory directory;
-		private System.String segment;
-		private int termIndexInterval;
-		
-		private IList<IndexReader> readers = new List<IndexReader>();
-		private FieldInfos fieldInfos;
-		
-		private int mergedDocs;
-		
-		private CheckAbort checkAbort;
-		
-		// Whether we should merge doc stores (stored fields and
-		// vectors files).  When all segments we are merging
-		// already share the same doc store files, we don't need
-		// to merge the doc stores.
-		private bool mergeDocStores;
-		
-		/// <summary>Maximum number of contiguous documents to bulk-copy
-		/// when merging stored fields 
-		/// </summary>
-		private const int MAX_RAW_MERGE_DOCS = 4192;
-		
-		/// <summary>This ctor used only by test code.
-		/// 
-		/// </summary>
-		/// <param name="dir">The Directory to merge the other segments into
-		/// </param>
-		/// <param name="name">The name of the new segment
-		/// </param>
-		public /*internal*/ SegmentMerger(Directory dir, System.String name)
-		{
-			InitBlock();
-			directory = dir;
-			segment = name;
-			checkAbort = new AnonymousClassCheckAbort(this, null, null);
-		}
-		
-		internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
-		{
-			InitBlock();
-			directory = writer.Directory;
-			segment = name;
-			if (merge != null)
-			{
-				checkAbort = new CheckAbort(merge, directory);
-			}
-			else
-			{
-				checkAbort = new AnonymousClassCheckAbort1(this, null, null);
-			}
-			termIndexInterval = writer.TermIndexInterval;
-		}
-		
-		internal bool HasProx()
-		{
-			return fieldInfos.HasProx();
-		}
-		
-		/// <summary> Add an IndexReader to the collection of readers that are to be merged</summary>
-		/// <param name="reader">
-		/// </param>
-		public /*internal*/ void  Add(IndexReader reader)
-		{
-			readers.Add(reader);
-		}
-		
-		/// <summary> </summary>
-		/// <param name="i">The index of the reader to return
-		/// </param>
-		/// <returns> The ith reader to be merged
-		/// </returns>
-		internal IndexReader SegmentReader(int i)
-		{
-			return readers[i];
-		}
-		
-		/// <summary> Merges the readers specified by the <see cref="Add" /> method into the directory passed to the constructor</summary>
-		/// <returns> The number of documents that were merged
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public /*internal*/ int Merge()
-		{
-			return Merge(true);
-		}
-		
-		/// <summary> Merges the readers specified by the <see cref="Add" /> method
-		/// into the directory passed to the constructor.
-		/// </summary>
-		/// <param name="mergeDocStores">if false, we will not merge the
-		/// stored fields nor vectors files
-		/// </param>
-		/// <returns> The number of documents that were merged
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		internal int Merge(bool mergeDocStores)
-		{
-			
-			this.mergeDocStores = mergeDocStores;
-			
-			// NOTE: it's important to add calls to
-			// checkAbort.work(...) if you make any changes to this
-			// method that will spend alot of time.  The frequency
-			// of this check impacts how long
-			// IndexWriter.close(false) takes to actually stop the
-			// threads.
-			
-			mergedDocs = MergeFields();
-			MergeTerms();
-			MergeNorms();
-			
-			if (mergeDocStores && fieldInfos.HasVectors())
-				MergeVectors();
-			
-			return mergedDocs;
-		}
-		
-		/// <summary> close all IndexReaders that have been added.
-		/// Should not be called before merge().
-		/// </summary>
-		/// <throws>  IOException </throws>
-		internal void  CloseReaders()
-		{
-			foreach(IndexReader reader in readers)
-			{
-				reader.Dispose();
-			}
-		}
+        private void  InitBlock()
+        {
+            termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
+        }
+        
+        /// <summary>norms header placeholder </summary>
+        internal static readonly byte[] NORMS_HEADER = new byte[]{(byte) 'N', (byte) 'R', (byte) 'M', unchecked((byte) - 1)};
+        
+        private Directory directory;
+        private System.String segment;
+        private int termIndexInterval;
+        
+        private IList<IndexReader> readers = new List<IndexReader>();
+        private FieldInfos fieldInfos;
+        
+        private int mergedDocs;
+        
+        private CheckAbort checkAbort;
+        
+        // Whether we should merge doc stores (stored fields and
+        // vectors files).  When all segments we are merging
+        // already share the same doc store files, we don't need
+        // to merge the doc stores.
+        private bool mergeDocStores;
+        
+        /// <summary>Maximum number of contiguous documents to bulk-copy
+        /// when merging stored fields 
+        /// </summary>
+        private const int MAX_RAW_MERGE_DOCS = 4192;
+        
+        /// <summary>This ctor used only by test code.
+        /// 
+        /// </summary>
+        /// <param name="dir">The Directory to merge the other segments into
+        /// </param>
+        /// <param name="name">The name of the new segment
+        /// </param>
+        public /*internal*/ SegmentMerger(Directory dir, System.String name)
+        {
+            InitBlock();
+            directory = dir;
+            segment = name;
+            checkAbort = new AnonymousClassCheckAbort(this, null, null);
+        }
+        
+        internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
+        {
+            InitBlock();
+            directory = writer.Directory;
+            segment = name;
+            if (merge != null)
+            {
+                checkAbort = new CheckAbort(merge, directory);
+            }
+            else
+            {
+                checkAbort = new AnonymousClassCheckAbort1(this, null, null);
+            }
+            termIndexInterval = writer.TermIndexInterval;
+        }
+        
+        internal bool HasProx()
+        {
+            return fieldInfos.HasProx();
+        }
+        
+        /// <summary> Add an IndexReader to the collection of readers that are to be merged</summary>
+        /// <param name="reader">
+        /// </param>
+        public /*internal*/ void  Add(IndexReader reader)
+        {
+            readers.Add(reader);
+        }
+        
+        /// <summary> </summary>
+        /// <param name="i">The index of the reader to return
+        /// </param>
+        /// <returns> The ith reader to be merged
+        /// </returns>
+        internal IndexReader SegmentReader(int i)
+        {
+            return readers[i];
+        }
+        
+        /// <summary> Merges the readers specified by the <see cref="Add" /> method into the directory passed to the constructor</summary>
+        /// <returns> The number of documents that were merged
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public /*internal*/ int Merge()
+        {
+            return Merge(true);
+        }
+        
+        /// <summary> Merges the readers specified by the <see cref="Add" /> method
+        /// into the directory passed to the constructor.
+        /// </summary>
+        /// <param name="mergeDocStores">if false, we will not merge the
+        /// stored fields nor vectors files
+        /// </param>
+        /// <returns> The number of documents that were merged
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        internal int Merge(bool mergeDocStores)
+        {
+            
+            this.mergeDocStores = mergeDocStores;
+            
+            // NOTE: it's important to add calls to
+            // checkAbort.work(...) if you make any changes to this
+            // method that will spend alot of time.  The frequency
+            // of this check impacts how long
+            // IndexWriter.close(false) takes to actually stop the
+            // threads.
+            
+            mergedDocs = MergeFields();
+            MergeTerms();
+            MergeNorms();
+            
+            if (mergeDocStores && fieldInfos.HasVectors())
+                MergeVectors();
+            
+            return mergedDocs;
+        }
+        
+        /// <summary> close all IndexReaders that have been added.
+        /// Should not be called before merge().
+        /// </summary>
+        /// <throws>  IOException </throws>
+        internal void  CloseReaders()
+        {
+            foreach(IndexReader reader in readers)
+            {
+                reader.Dispose();
+            }
+        }
 
         internal ICollection<string> GetMergedFiles()
-		{
+        {
             ISet<string> fileSet = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<string>();
-			
-			// Basic files
-			for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.Length; i++)
-			{
-				System.String ext = IndexFileNames.COMPOUND_EXTENSIONS[i];
-				
-				if (ext.Equals(IndexFileNames.PROX_EXTENSION) && !HasProx())
-					continue;
-				
-				if (mergeDocStores || (!ext.Equals(IndexFileNames.FIELDS_EXTENSION) && !ext.Equals(IndexFileNames.FIELDS_INDEX_EXTENSION)))
+            
+            // Basic files
+            for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.Length; i++)
+            {
+                System.String ext = IndexFileNames.COMPOUND_EXTENSIONS[i];
+                
+                if (ext.Equals(IndexFileNames.PROX_EXTENSION) && !HasProx())
+                    continue;
+                
+                if (mergeDocStores || (!ext.Equals(IndexFileNames.FIELDS_EXTENSION) && !ext.Equals(IndexFileNames.FIELDS_INDEX_EXTENSION)))
                     fileSet.Add(segment + "." + ext);
-			}
-			
-			// Fieldable norm files
-			for (int i = 0; i < fieldInfos.Size(); i++)
-			{
-				FieldInfo fi = fieldInfos.FieldInfo(i);
-				if (fi.isIndexed && !fi.omitNorms)
-				{
+            }
+            
+            // Fieldable norm files
+            for (int i = 0; i < fieldInfos.Size(); i++)
+            {
+                FieldInfo fi = fieldInfos.FieldInfo(i);
+                if (fi.isIndexed && !fi.omitNorms)
+                {
                     fileSet.Add(segment + "." + IndexFileNames.NORMS_EXTENSION);
-					break;
-				}
-			}
-			
-			// Vector files
-			if (fieldInfos.HasVectors() && mergeDocStores)
-			{
-				for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.Length; i++)
-				{
+                    break;
+                }
+            }
+            
+            // Vector files
+            if (fieldInfos.HasVectors() && mergeDocStores)
+            {
+                for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.Length; i++)
+                {
                     fileSet.Add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
-				}
-			}
+                }
+            }
 
             return fileSet;
         }
@@ -274,17 +274,17 @@ namespace Lucene.Net.Index
             ICollection<string> files = GetMergedFiles();
             CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
 
-			// Now merge all added files
-			foreach(var file in files)
-			{
-				cfsWriter.AddFile(file);
-			}
-			
-			// Perform the merge
-			cfsWriter.Close();
+            // Now merge all added files
+            foreach(var file in files)
+            {
+                cfsWriter.AddFile(file);
+            }
+            
+            // Perform the merge
+            cfsWriter.Close();
 
             return files;
-		}
+        }
 
         private void AddIndexed(IndexReader reader, FieldInfos fInfos, ICollection<string> names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool storePayloads, bool omitTFAndPositions)
         {
@@ -295,640 +295,640 @@ namespace Lucene.Net.Index
             }
         }
 
-	    private SegmentReader[] matchingSegmentReaders;
-		private int[] rawDocLengths;
-		private int[] rawDocLengths2;
-		
-		private void  SetMatchingSegmentReaders()
-		{
-			// If the i'th reader is a SegmentReader and has
-			// identical fieldName -> number mapping, then this
-			// array will be non-null at position i:
-			int numReaders = readers.Count;
-			matchingSegmentReaders = new SegmentReader[numReaders];
-			
-			// If this reader is a SegmentReader, and all of its
-			// field name -> number mappings match the "merged"
-			// FieldInfos, then we can do a bulk copy of the
-			// stored fields:
-			for (int i = 0; i < numReaders; i++)
-			{
-				IndexReader reader = readers[i];
-				if (reader is SegmentReader)
-				{
-					SegmentReader segmentReader = (SegmentReader) reader;
-					bool same = true;
-					FieldInfos segmentFieldInfos = segmentReader.FieldInfos();
-					int numFieldInfos = segmentFieldInfos.Size();
-					for (int j = 0; same && j < numFieldInfos; j++)
-					{
-						same = fieldInfos.FieldName(j).Equals(segmentFieldInfos.FieldName(j));
-					}
-					if (same)
-					{
-						matchingSegmentReaders[i] = segmentReader;
-					}
-				}
-			}
-			
-			// Used for bulk-reading raw bytes for stored fields
-			rawDocLengths = new int[MAX_RAW_MERGE_DOCS];
-			rawDocLengths2 = new int[MAX_RAW_MERGE_DOCS];
-		}
-		
-		/// <summary> </summary>
-		/// <returns> The number of documents in all of the readers
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		private int MergeFields()
-		{
-			
-			if (!mergeDocStores)
-			{
-				// When we are not merging by doc stores, their field
-				// name -> number mapping are the same.  So, we start
-				// with the fieldInfos of the last segment in this
-				// case, to keep that numbering.
-				SegmentReader sr = (SegmentReader) readers[readers.Count - 1];
-				fieldInfos = (FieldInfos) sr.core.fieldInfos.Clone();
-			}
-			else
-			{
-				fieldInfos = new FieldInfos(); // merge field names
-			}
-			
-			foreach(IndexReader reader in readers)
-			{
-				if (reader is SegmentReader)
-				{
-					SegmentReader segmentReader = (SegmentReader) reader;
-					FieldInfos readerFieldInfos = segmentReader.FieldInfos();
-					int numReaderFieldInfos = readerFieldInfos.Size();
-					for (int j = 0; j < numReaderFieldInfos; j++)
-					{
-						FieldInfo fi = readerFieldInfos.FieldInfo(j);
-						fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTermFreqAndPositions);
-					}
-				}
-				else
-				{
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
-					AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.INDEXED), false, false, false, false, false);
-					fieldInfos.Add(reader.GetFieldNames(FieldOption.UNINDEXED), false);
-				}
-			}
-			fieldInfos.Write(directory, segment + ".fnm");
-			
-			int docCount = 0;
-			
-			SetMatchingSegmentReaders();
-			
-			if (mergeDocStores)
-			{
-				// merge field values
-				FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);
-				
-				try
-				{
-					int idx = 0;
-					foreach(IndexReader reader in readers)
-					{
-						SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
-						FieldsReader matchingFieldsReader = null;
-						if (matchingSegmentReader != null)
-						{
-							FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
-							if (fieldsReader != null && fieldsReader.CanReadRawDocs())
-							{
-								matchingFieldsReader = fieldsReader;
-							}
-						}
-						if (reader.HasDeletions)
-						{
-							docCount += CopyFieldsWithDeletions(fieldsWriter, reader, matchingFieldsReader);
-						}
-						else
-						{
-							docCount += CopyFieldsNoDeletions(fieldsWriter, reader, matchingFieldsReader);
-						}
-					}
-				}
-				finally
-				{
-					fieldsWriter.Dispose();
-				}
-				
-				System.String fileName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
-				long fdxFileLength = directory.FileLength(fileName);
-				
-				if (4 + ((long) docCount) * 8 != fdxFileLength)
-				// This is most likely a bug in Sun JRE 1.6.0_04/_05;
-				// we detect that the bug has struck, here, and
-				// throw an exception to prevent the corruption from
-				// entering the index.  See LUCENE-1282 for
-				// details.
-					throw new System.SystemException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
-			}
-			// If we are skipping the doc stores, that means there
-			// are no deletions in any of these segments, so we
-			// just sum numDocs() of each segment to get total docCount
-			else
-			{
-				foreach(IndexReader reader in readers)
-				{
-					docCount += reader.NumDocs();
-				}
-			}
-			
-			return docCount;
-		}
-		
-		private int CopyFieldsWithDeletions(FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
-		{
-			int docCount = 0;
-			int maxDoc = reader.MaxDoc;
-			if (matchingFieldsReader != null)
-			{
-				// We can bulk-copy because the fieldInfos are "congruent"
-				for (int j = 0; j < maxDoc; )
-				{
-					if (reader.IsDeleted(j))
-					{
-						// skip deleted docs
-						++j;
-						continue;
-					}
-					// We can optimize this case (doing a bulk byte copy) since the field 
-					// numbers are identical
-					int start = j, numDocs = 0;
-					do 
-					{
-						j++;
-						numDocs++;
-						if (j >= maxDoc)
-							break;
-						if (reader.IsDeleted(j))
-						{
-							j++;
-							break;
-						}
-					}
-					while (numDocs < MAX_RAW_MERGE_DOCS);
-					
-					IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
-					fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
-					docCount += numDocs;
-					checkAbort.Work(300 * numDocs);
-				}
-			}
-			else
-			{
-				for (int j = 0; j < maxDoc; j++)
-				{
-					if (reader.IsDeleted(j))
-					{
-						// skip deleted docs
-						continue;
-					}
-					// NOTE: it's very important to first assign to doc then pass it to
-					// termVectorsWriter.addAllDocVectors; see LUCENE-1282
-					Document doc = reader.Document(j);
-					fieldsWriter.AddDocument(doc);
-					docCount++;
-					checkAbort.Work(300);
-				}
-			}
-			return docCount;
-		}
-		
-		private int CopyFieldsNoDeletions(FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
-		{
-			int maxDoc = reader.MaxDoc;
-			int docCount = 0;
-			if (matchingFieldsReader != null)
-			{
-				// We can bulk-copy because the fieldInfos are "congruent"
-				while (docCount < maxDoc)
-				{
-					int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
-					IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, docCount, len);
-					fieldsWriter.AddRawDocuments(stream, rawDocLengths, len);
-					docCount += len;
-					checkAbort.Work(300 * len);
-				}
-			}
-			else
-			{
-				for (; docCount < maxDoc; docCount++)
-				{
-					// NOTE: it's very important to first assign to doc then pass it to
-					// termVectorsWriter.addAllDocVectors; see LUCENE-1282
-					Document doc = reader.Document(docCount);
-					fieldsWriter.AddDocument(doc);
-					checkAbort.Work(300);
-				}
-			}
-			return docCount;
-		}
-		
-		/// <summary> Merge the TermVectors from each of the segments into the new one.</summary>
-		/// <throws>  IOException </throws>
-		private void  MergeVectors()
-		{
-			TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, fieldInfos);
-			
-			try
-			{
-				int idx = 0;
-				foreach(IndexReader reader in readers)
-				{
-					SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
-					TermVectorsReader matchingVectorsReader = null;
-					if (matchingSegmentReader != null)
-					{
-						TermVectorsReader vectorsReader = matchingSegmentReader.GetTermVectorsReaderOrig();
-						
-						// If the TV* files are an older format then they cannot read raw docs:
-						if (vectorsReader != null && vectorsReader.CanReadRawDocs())
-						{
-							matchingVectorsReader = vectorsReader;
-						}
-					}
-					if (reader.HasDeletions)
-					{
-						CopyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
-					}
-					else
-					{
-						CopyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
-					}
-				}
-			}
-			finally
-			{
-				termVectorsWriter.Dispose();
-			}
-			
-			System.String fileName = segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
-			long tvxSize = directory.FileLength(fileName);
-			
-			if (4 + ((long) mergedDocs) * 16 != tvxSize)
-			// This is most likely a bug in Sun JRE 1.6.0_04/_05;
-			// we detect that the bug has struck, here, and
-			// throw an exception to prevent the corruption from
-			// entering the index.  See LUCENE-1282 for
-			// details.
-				throw new System.SystemException("mergeVectors produced an invalid result: mergedDocs is " + mergedDocs + " but tvx size is " + tvxSize + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
-		}
-		
-		private void  CopyVectorsWithDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
-		{
-			int maxDoc = reader.MaxDoc;
-			if (matchingVectorsReader != null)
-			{
-				// We can bulk-copy because the fieldInfos are "congruent"
-				for (int docNum = 0; docNum < maxDoc; )
-				{
-					if (reader.IsDeleted(docNum))
-					{
-						// skip deleted docs
-						++docNum;
-						continue;
-					}
-					// We can optimize this case (doing a bulk byte copy) since the field 
-					// numbers are identical
-					int start = docNum, numDocs = 0;
-					do 
-					{
-						docNum++;
-						numDocs++;
-						if (docNum >= maxDoc)
-							break;
-						if (reader.IsDeleted(docNum))
-						{
-							docNum++;
-							break;
-						}
-					}
-					while (numDocs < MAX_RAW_MERGE_DOCS);
-					
-					matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, start, numDocs);
-					termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, numDocs);
-					checkAbort.Work(300 * numDocs);
-				}
-			}
-			else
-			{
-				for (int docNum = 0; docNum < maxDoc; docNum++)
-				{
-					if (reader.IsDeleted(docNum))
-					{
-						// skip deleted docs
-						continue;
-					}
-					
-					// NOTE: it's very important to first assign to vectors then pass it to
-					// termVectorsWriter.addAllDocVectors; see LUCENE-1282
-					ITermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
-					termVectorsWriter.AddAllDocVectors(vectors);
-					checkAbort.Work(300);
-				}
-			}
-		}
-		
-		private void  CopyVectorsNoDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
-		{
-			int maxDoc = reader.MaxDoc;
-			if (matchingVectorsReader != null)
-			{
-				// We can bulk-copy because the fieldInfos are "congruent"
-				int docCount = 0;
-				while (docCount < maxDoc)
-				{
-					int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
-					matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, docCount, len);
-					termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, len);
-					docCount += len;
-					checkAbort.Work(300 * len);
-				}
-			}
-			else
-			{
-				for (int docNum = 0; docNum < maxDoc; docNum++)
-				{
-					// NOTE: it's very important to first assign to vectors then pass it to
-					// termVectorsWriter.addAllDocVectors; see LUCENE-1282
-					ITermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
-					termVectorsWriter.AddAllDocVectors(vectors);
-					checkAbort.Work(300);
-				}
-			}
-		}
-		
-		private SegmentMergeQueue queue = null;
-		
-		private void  MergeTerms()
-		{
-			
-			SegmentWriteState state = new SegmentWriteState(null, directory, segment, null, mergedDocs, 0, termIndexInterval);
-			
-			FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
-			
-			try
-			{
-				queue = new SegmentMergeQueue(readers.Count);
-				
-				MergeTermInfos(consumer);
-			}
-			finally
-			{
-				consumer.Finish();
-				if (queue != null)
-					queue.Dispose();
-			}
-		}
-		
-		internal bool omitTermFreqAndPositions;
-		
-		private void  MergeTermInfos(FormatPostingsFieldsConsumer consumer)
-		{
-			int base_Renamed = 0;
-			int readerCount = readers.Count;
-			for (int i = 0; i < readerCount; i++)
-			{
-				IndexReader reader = readers[i];
-				TermEnum termEnum = reader.Terms();
-				SegmentMergeInfo smi = new SegmentMergeInfo(base_Renamed, termEnum, reader);
-				int[] docMap = smi.GetDocMap();
-				if (docMap != null)
-				{
-					if (docMaps == null)
-					{
-						docMaps = new int[readerCount][];
-						delCounts = new int[readerCount];
-					}
-					docMaps[i] = docMap;
-					delCounts[i] = smi.reader.MaxDoc - smi.reader.NumDocs();
-				}
-				
-				base_Renamed += reader.NumDocs();
-				
-				System.Diagnostics.Debug.Assert(reader.NumDocs() == reader.MaxDoc - smi.delCount);
-				
-				if (smi.Next())
-					queue.Add(smi);
-				// initialize queue
-				else
-					smi.Dispose();
-			}
-			
-			SegmentMergeInfo[] match = new SegmentMergeInfo[readers.Count];
-			
-			System.String currentField = null;
-			FormatPostingsTermsConsumer termsConsumer = null;
-			
-			while (queue.Size() > 0)
-			{
-				int matchSize = 0; // pop matching terms
-				match[matchSize++] = queue.Pop();
-				Term term = match[0].term;
-				SegmentMergeInfo top = queue.Top();
-				
-				while (top != null && term.CompareTo(top.term) == 0)
-				{
-					match[matchSize++] = queue.Pop();
-					top = queue.Top();
-				}
-				
-				if ((System.Object) currentField != (System.Object) term.Field)
-				{
+        private SegmentReader[] matchingSegmentReaders;
+        private int[] rawDocLengths;
+        private int[] rawDocLengths2;
+        
+        private void  SetMatchingSegmentReaders()
+        {
+            // If the i'th reader is a SegmentReader and has
+            // identical fieldName -> number mapping, then this
+            // array will be non-null at position i:
+            int numReaders = readers.Count;
+            matchingSegmentReaders = new SegmentReader[numReaders];
+            
+            // If this reader is a SegmentReader, and all of its
+            // field name -> number mappings match the "merged"
+            // FieldInfos, then we can do a bulk copy of the
+            // stored fields:
+            for (int i = 0; i < numReaders; i++)
+            {
+                IndexReader reader = readers[i];
+                if (reader is SegmentReader)
+                {
+                    SegmentReader segmentReader = (SegmentReader) reader;
+                    bool same = true;
+                    FieldInfos segmentFieldInfos = segmentReader.FieldInfos();
+                    int numFieldInfos = segmentFieldInfos.Size();
+                    for (int j = 0; same && j < numFieldInfos; j++)
+                    {
+                        same = fieldInfos.FieldName(j).Equals(segmentFieldInfos.FieldName(j));
+                    }
+                    if (same)
+                    {
+                        matchingSegmentReaders[i] = segmentReader;
+                    }
+                }
+            }
+            
+            // Used for bulk-reading raw bytes for stored fields
+            rawDocLengths = new int[MAX_RAW_MERGE_DOCS];
+            rawDocLengths2 = new int[MAX_RAW_MERGE_DOCS];
+        }
+        
+        /// <summary> </summary>
+        /// <returns> The number of documents in all of the readers
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        private int MergeFields()
+        {
+            
+            if (!mergeDocStores)
+            {
+                // When we are not merging by doc stores, their field
+                // name -> number mapping are the same.  So, we start
+                // with the fieldInfos of the last segment in this
+                // case, to keep that numbering.
+                SegmentReader sr = (SegmentReader) readers[readers.Count - 1];
+                fieldInfos = (FieldInfos) sr.core.fieldInfos.Clone();
+            }
+            else
+            {
+                fieldInfos = new FieldInfos(); // merge field names
+            }
+            
+            foreach(IndexReader reader in readers)
+            {
+                if (reader is SegmentReader)
+                {
+                    SegmentReader segmentReader = (SegmentReader) reader;
+                    FieldInfos readerFieldInfos = segmentReader.FieldInfos();
+                    int numReaderFieldInfos = readerFieldInfos.Size();
+                    for (int j = 0; j < numReaderFieldInfos; j++)
+                    {
+                        FieldInfo fi = readerFieldInfos.FieldInfo(j);
+                        fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTermFreqAndPositions);
+                    }
+                }
+                else
+                {
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
+                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.INDEXED), false, false, false, false, false);
+                    fieldInfos.Add(reader.GetFieldNames(FieldOption.UNINDEXED), false);
+                }
+            }
+            fieldInfos.Write(directory, segment + ".fnm");
+            
+            int docCount = 0;
+            
+            SetMatchingSegmentReaders();
+            
+            if (mergeDocStores)
+            {
+                // merge field values
+                FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);
+                
+                try
+                {
+                    int idx = 0;
+                    foreach(IndexReader reader in readers)
+                    {
+                        SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
+                        FieldsReader matchingFieldsReader = null;
+                        if (matchingSegmentReader != null)
+                        {
+                            FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
+                            if (fieldsReader != null && fieldsReader.CanReadRawDocs())
+                            {
+                                matchingFieldsReader = fieldsReader;
+                            }
+                        }
+                        if (reader.HasDeletions)
+                        {
+                            docCount += CopyFieldsWithDeletions(fieldsWriter, reader, matchingFieldsReader);
+                        }
+                        else
+                        {
+                            docCount += CopyFieldsNoDeletions(fieldsWriter, reader, matchingFieldsReader);
+                        }
+                    }
+                }
+                finally
+                {
+                    fieldsWriter.Dispose();
+                }
+                
+                System.String fileName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
+                long fdxFileLength = directory.FileLength(fileName);
+                
+                if (4 + ((long) docCount) * 8 != fdxFileLength)
+                // This is most likely a bug in Sun JRE 1.6.0_04/_05;
+                // we detect that the bug has struck, here, and
+                // throw an exception to prevent the corruption from
+                // entering the index.  See LUCENE-1282 for
+                // details.
+                    throw new System.SystemException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
+            }
+            // If we are skipping the doc stores, that means there
+            // are no deletions in any of these segments, so we
+            // just sum numDocs() of each segment to get total docCount
+            else
+            {
+                foreach(IndexReader reader in readers)
+                {
+                    docCount += reader.NumDocs();
+                }
+            }
+            
+            return docCount;
+        }
+        
+        private int CopyFieldsWithDeletions(FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
+        {
+            int docCount = 0;
+            int maxDoc = reader.MaxDoc;
+            if (matchingFieldsReader != null)
+            {
+                // We can bulk-copy because the fieldInfos are "congruent"
+                for (int j = 0; j < maxDoc; )
+                {
+                    if (reader.IsDeleted(j))
+                    {
+                        // skip deleted docs
+                        ++j;
+                        continue;
+                    }
+                    // We can optimize this case (doing a bulk byte copy) since the field 
+                    // numbers are identical
+                    int start = j, numDocs = 0;
+                    do 
+                    {
+                        j++;
+                        numDocs++;
+                        if (j >= maxDoc)
+                            break;
+                        if (reader.IsDeleted(j))
+                        {
+                            j++;
+                            break;
+                        }
+                    }
+                    while (numDocs < MAX_RAW_MERGE_DOCS);
+                    
+                    IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
+                    fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
+                    docCount += numDocs;
+                    checkAbort.Work(300 * numDocs);
+                }
+            }
+            else
+            {
+                for (int j = 0; j < maxDoc; j++)
+                {
+                    if (reader.IsDeleted(j))
+                    {
+                        // skip deleted docs
+                        continue;
+                    }
+                    // NOTE: it's very important to first assign to doc then pass it to
+                    // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                    Document doc = reader.Document(j);
+                    fieldsWriter.AddDocument(doc);
+                    docCount++;
+                    checkAbort.Work(300);
+                }
+            }
+            return docCount;
+        }
+        
+        private int CopyFieldsNoDeletions(FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
+        {
+            int maxDoc = reader.MaxDoc;
+            int docCount = 0;
+            if (matchingFieldsReader != null)
+            {
+                // We can bulk-copy because the fieldInfos are "congruent"
+                while (docCount < maxDoc)
+                {
+                    int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
+                    IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, docCount, len);
+                    fieldsWriter.AddRawDocuments(stream, rawDocLengths, len);
+                    docCount += len;
+                    checkAbort.Work(300 * len);
+                }
+            }
+            else
+            {
+                for (; docCount < maxDoc; docCount++)
+                {
+                    // NOTE: it's very important to first assign to doc then pass it to
+                    // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                    Document doc = reader.Document(docCount);
+                    fieldsWriter.AddDocument(doc);
+                    checkAbort.Work(300);
+                }
+            }
+            return docCount;
+        }
+        
+        /// <summary> Merge the TermVectors from each of the segments into the new one.</summary>
+        /// <throws>  IOException </throws>
+        private void  MergeVectors()
+        {
+            TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, fieldInfos);
+            
+            try
+            {
+                int idx = 0;
+                foreach(IndexReader reader in readers)
+                {
+                    SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
+                    TermVectorsReader matchingVectorsReader = null;
+                    if (matchingSegmentReader != null)
+                    {
+                        TermVectorsReader vectorsReader = matchingSegmentReader.GetTermVectorsReaderOrig();
+                        
+                        // If the TV* files are an older format then they cannot read raw docs:
+                        if (vectorsReader != null && vectorsReader.CanReadRawDocs())
+                        {
+                            matchingVectorsReader = vectorsReader;
+                        }
+                    }
+                    if (reader.HasDeletions)
+                    {
+                        CopyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
+                    }
+                    else
+                    {
+                        CopyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
+                    }
+                }
+            }
+            finally
+            {
+                termVectorsWriter.Dispose();
+            }
+            
+            System.String fileName = segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
+            long tvxSize = directory.FileLength(fileName);
+            
+            if (4 + ((long) mergedDocs) * 16 != tvxSize)
+            // This is most likely a bug in Sun JRE 1.6.0_04/_05;
+            // we detect that the bug has struck, here, and
+            // throw an exception to prevent the corruption from
+            // entering the index.  See LUCENE-1282 for
+            // details.
+                throw new System.SystemException("mergeVectors produced an invalid result: mergedDocs is " + mergedDocs + " but tvx size is " + tvxSize + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
+        }
+        
+        private void  CopyVectorsWithDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
+        {
+            int maxDoc = reader.MaxDoc;
+            if (matchingVectorsReader != null)
+            {
+                // We can bulk-copy because the fieldInfos are "congruent"
+                for (int docNum = 0; docNum < maxDoc; )
+                {
+                    if (reader.IsDeleted(docNum))
+                    {
+                        // skip deleted docs
+                        ++docNum;
+                        continue;
+                    }
+                    // We can optimize this case (doing a bulk byte copy) since the field 
+                    // numbers are identical
+                    int start = docNum, numDocs = 0;
+                    do 
+                    {
+                        docNum++;
+                        numDocs++;
+                        if (docNum >= maxDoc)
+                            break;
+                        if (reader.IsDeleted(docNum))
+                        {
+                            docNum++;
+                            break;
+                        }
+                    }
+                    while (numDocs < MAX_RAW_MERGE_DOCS);
+                    
+                    matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, start, numDocs);
+                    termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, numDocs);
+                    checkAbort.Work(300 * numDocs);
+                }
+            }
+            else
+            {
+                for (int docNum = 0; docNum < maxDoc; docNum++)
+                {
+                    if (reader.IsDeleted(docNum))
+                    {
+                        // skip deleted docs
+                        continue;
+                    }
+                    
+                    // NOTE: it's very important to first assign to vectors then pass it to
+                    // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                    ITermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
+                    termVectorsWriter.AddAllDocVectors(vectors);
+                    checkAbort.Work(300);
+                }
+            }
+        }
+        
+        private void  CopyVectorsNoDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
+        {
+            int maxDoc = reader.MaxDoc;
+            if (matchingVectorsReader != null)
+            {
+                // We can bulk-copy because the fieldInfos are "congruent"
+                int docCount = 0;
+                while (docCount < maxDoc)
+                {
+                    int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
+                    matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, docCount, len);
+                    termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, len);
+                    docCount += len;
+                    checkAbort.Work(300 * len);
+                }
+            }
+            else
+            {
+                for (int docNum = 0; docNum < maxDoc; docNum++)
+                {
+                    // NOTE: it's very important to first assign to vectors then pass it to
+                    // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                    ITermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
+                    termVectorsWriter.AddAllDocVectors(vectors);
+                    checkAbort.Work(300);
+                }
+            }
+        }
+        
+        private SegmentMergeQueue queue = null;
+        
+        private void  MergeTerms()
+        {
+            
+            SegmentWriteState state = new SegmentWriteState(null, directory, segment, null, mergedDocs, 0, termIndexInterval);
+            
+            FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
+            
+            try
+            {
+                queue = new SegmentMergeQueue(readers.Count);
+                
+                MergeTermInfos(consumer);
+            }
+            finally
+            {
+                consumer.Finish();
+                if (queue != null)
+                    queue.Dispose();
+            }
+        }
+        
+        internal bool omitTermFreqAndPositions;
+        
+        private void  MergeTermInfos(FormatPostingsFieldsConsumer consumer)
+        {
+            int base_Renamed = 0;
+            int readerCount = readers.Count;
+            for (int i = 0; i < readerCount; i++)
+            {
+                IndexReader reader = readers[i];
+                TermEnum termEnum = reader.Terms();
+                SegmentMergeInfo smi = new SegmentMergeInfo(base_Renamed, termEnum, reader);
+                int[] docMap = smi.GetDocMap();
+                if (docMap != null)
+                {
+                    if (docMaps == null)
+                    {
+                        docMaps = new int[readerCount][];
+                        delCounts = new int[readerCount];
+                    }
+                    docMaps[i] = docMap;
+                    delCounts[i] = smi.reader.MaxDoc - smi.reader.NumDocs();
+                }
+                
+                base_Renamed += reader.NumDocs();
+                
+                System.Diagnostics.Debug.Assert(reader.NumDocs() == reader.MaxDoc - smi.delCount);
+                
+                if (smi.Next())
+                    queue.Add(smi);
+                // initialize queue
+                else
+                    smi.Dispose();
+            }
+            
+            SegmentMergeInfo[] match = new SegmentMergeInfo[readers.Count];
+            
+            System.String currentField = null;
+            FormatPostingsTermsConsumer termsConsumer = null;
+            
+            while (queue.Size() > 0)
+            {
+                int matchSize = 0; // pop matching terms
+                match[matchSize++] = queue.Pop();
+                Term term = match[0].term;
+                SegmentMergeInfo top = queue.Top();
+                
+                while (top != null && term.CompareTo(top.term) == 0)
+                {
+                    match[matchSize++] = queue.Pop();
+                    top = queue.Top();
+                }
+                
+                if ((System.Object) currentField != (System.Object) term.Field)
+                {
                     currentField = term.Field;
-					if (termsConsumer != null)
-						termsConsumer.Finish();
-					FieldInfo fieldInfo = fieldInfos.FieldInfo(currentField);
-					termsConsumer = consumer.AddField(fieldInfo);
-					omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
-				}
-				
-				int df = AppendPostings(termsConsumer, match, matchSize); // add new TermInfo
-				
-				checkAbort.Work(df / 3.0);
-				
-				while (matchSize > 0)
-				{
-					SegmentMergeInfo smi = match[--matchSize];
-					if (smi.Next())
-						queue.Add(smi);
-					// restore queue
-					else
-						smi.Dispose(); // done with a segment
-				}
-			}
-		}
-		
-		private byte[] payloadBuffer;
-		private int[][] docMaps;
-		internal int[][] GetDocMaps()
-		{
-			return docMaps;
-		}
-		private int[] delCounts;
-		internal int[] GetDelCounts()
-		{
-			return delCounts;
-		}
-		
-		/// <summary>Process postings from multiple segments all positioned on the
-		/// same term. Writes out merged entries into freqOutput and
-		/// the proxOutput streams.
-		/// 
-		/// </summary>
-		/// <param name="smis">array of segments
-		/// </param>
-		/// <param name="n">number of cells in the array actually occupied
-		/// </param>
-		/// <returns> number of documents across all segments where this term was found
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		private int AppendPostings(FormatPostingsTermsConsumer termsConsumer, SegmentMergeInfo[] smis, int n)
-		{
-			
-			FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(smis[0].term.Text);
-			int df = 0;
-			for (int i = 0; i < n; i++)
-			{
-				SegmentMergeInfo smi = smis[i];
-				TermPositions postings = smi.GetPositions();
-				System.Diagnostics.Debug.Assert(postings != null);
-				int base_Renamed = smi.base_Renamed;
-				int[] docMap = smi.GetDocMap();
-				postings.Seek(smi.termEnum);
-				
-				while (postings.Next())
-				{
-					df++;
-					int doc = postings.Doc;
-					if (docMap != null)
-						doc = docMap[doc]; // map around deletions
-					doc += base_Renamed; // convert to merged space
-					
-					int freq = postings.Freq;
-					FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(doc, freq);
-					
-					if (!omitTermFreqAndPositions)
-					{
-						for (int j = 0; j < freq; j++)
-						{
-							int position = postings.NextPosition();
-							int payloadLength = postings.PayloadLength;
-							if (payloadLength > 0)
-							{
-								if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
-									payloadBuffer = new byte[payloadLength];
-								postings.GetPayload(payloadBuffer, 0);
-							}
-							posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
-						}
-						posConsumer.Finish();
-					}
-				}
-			}
-			docConsumer.Finish();
-			
-			return df;
-		}
-		
-		private void  MergeNorms()
-		{
-			byte[] normBuffer = null;
-			IndexOutput output = null;
-			try
-			{
-				int numFieldInfos = fieldInfos.Size();
-				for (int i = 0; i < numFieldInfos; i++)
-				{
-					FieldInfo fi = fieldInfos.FieldInfo(i);
-					if (fi.isIndexed && !fi.omitNorms)
-					{
-						if (output == null)
-						{
-							output = directory.CreateOutput(segment + "." + IndexFileNames.NORMS_EXTENSION);
-							output.WriteBytes(NORMS_HEADER, NORMS_HEADER.Length);
-						}
-						foreach(IndexReader reader in readers)
-						{
-							int maxDoc = reader.MaxDoc;
-							if (normBuffer == null || normBuffer.Length < maxDoc)
-							{
-								// the buffer is too small for the current segment
-								normBuffer = new byte[maxDoc];
-							}
-							reader.Norms(fi.name, normBuffer, 0);
-							if (!reader.HasDeletions)
-							{
-								//optimized case for segments without deleted docs
-								output.WriteBytes(normBuffer, maxDoc);
-							}
-							else
-							{
-								// this segment has deleted docs, so we have to
-								// check for every doc if it is deleted or not
-								for (int k = 0; k < maxDoc; k++)
-								{
-									if (!reader.IsDeleted(k))
-									{
-										output.WriteByte(normBuffer[k]);
-									}
-								}
-							}
-							checkAbort.Work(maxDoc);
-						}
-					}
-				}
-			}
-			finally
-			{
-				if (output != null)
-				{
-					output.Close();
-				}
-			}
-		}
-		
-		internal class CheckAbort
-		{
-			private double workCount;
-			private MergePolicy.OneMerge merge;
-			private Directory dir;
-			public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
-			{
-				this.merge = merge;
-				this.dir = dir;
-			}
-			
-			/// <summary> Records the fact that roughly units amount of work
-			/// have been done since this method was last called.
-			/// When adding time-consuming code into SegmentMerger,
-			/// you should test different values for units to ensure
-			/// that the time in between calls to merge.checkAborted
-			/// is up to ~ 1 second.
-			/// </summary>
-			public virtual void  Work(double units)
-			{
-				workCount += units;
-				if (workCount >= 10000.0)
-				{
-					merge.CheckAborted(dir);
-					workCount = 0;
-				}
-			}
-		}
-	}
+                    if (termsConsumer != null)
+                        termsConsumer.Finish();
+                    FieldInfo fieldInfo = fieldInfos.FieldInfo(currentField);
+                    termsConsumer = consumer.AddField(fieldInfo);
+                    omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+                }
+                
+                int df = AppendPostings(termsConsumer, match, matchSize); // add new TermInfo
+                
+                checkAbort.Work(df / 3.0);
+                
+                while (matchSize > 0)
+                {
+                    SegmentMergeInfo smi = match[--matchSize];
+                    if (smi.Next())
+                        queue.Add(smi);
+                    // restore queue
+                    else
+                        smi.Dispose(); // done with a segment
+                }
+            }
+        }
+        
+        private byte[] payloadBuffer;
+        private int[][] docMaps;
+        internal int[][] GetDocMaps()
+        {
+            return docMaps;
+        }
+        private int[] delCounts;
+        internal int[] GetDelCounts()
+        {
+            return delCounts;
+        }
+        
+        /// <summary>Process postings from multiple segments all positioned on the
+        /// same term. Writes out merged entries into freqOutput and
+        /// the proxOutput streams.
+        /// 
+        /// </summary>
+        /// <param name="smis">array of segments
+        /// </param>
+        /// <param name="n">number of cells in the array actually occupied
+        /// </param>
+        /// <returns> number of documents across all segments where this term was found
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        private int AppendPostings(FormatPostingsTermsConsumer termsConsumer, SegmentMergeInfo[] smis, int n)
+        {
+            
+            FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(smis[0].term.Text);
+            int df = 0;
+            for (int i = 0; i < n; i++)
+            {
+                SegmentMergeInfo smi = smis[i];
+                TermPositions postings = smi.GetPositions();
+                System.Diagnostics.Debug.Assert(postings != null);
+                int base_Renamed = smi.base_Renamed;
+                int[] docMap = smi.GetDocMap();
+                postings.Seek(smi.termEnum);
+                
+                while (postings.Next())
+                {
+                    df++;
+                    int doc = postings.Doc;
+                    if (docMap != null)
+                        doc = docMap[doc]; // map around deletions
+                    doc += base_Renamed; // convert to merged space
+                    
+                    int freq = postings.Freq;
+                    FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(doc, freq);
+                    
+                    if (!omitTermFreqAndPositions)
+                    {
+                        for (int j = 0; j < freq; j++)
+                        {
+                            int position = postings.NextPosition();
+                            int payloadLength = postings.PayloadLength;
+                            if (payloadLength > 0)
+                            {
+                                if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
+                                    payloadBuffer = new byte[payloadLength];
+                                postings.GetPayload(payloadBuffer, 0);
+                            }
+                            posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
+                        }
+                        posConsumer.Finish();
+                    }
+                }
+            }
+            docConsumer.Finish();
+            
+            return df;
+        }
+        
+        private void  MergeNorms()
+        {
+            byte[] normBuffer = null;
+            IndexOutput output = null;
+            try
+            {
+                int numFieldInfos = fieldInfos.Size();
+                for (int i = 0; i < numFieldInfos; i++)
+                {
+                    FieldInfo fi = fieldInfos.FieldInfo(i);
+                    if (fi.isIndexed && !fi.omitNorms)
+                    {
+                        if (output == null)
+                        {
+                            output = directory.CreateOutput(segment + "." + IndexFileNames.NORMS_EXTENSION);
+                            output.WriteBytes(NORMS_HEADER, NORMS_HEADER.Length);
+                        }
+                        foreach(IndexReader reader in readers)
+                        {
+                            int maxDoc = reader.MaxDoc;
+                            if (normBuffer == null || normBuffer.Length < maxDoc)
+                            {
+                                // the buffer is too small for the current segment
+                                normBuffer = new byte[maxDoc];
+                            }
+                            reader.Norms(fi.name, normBuffer, 0);
+                            if (!reader.HasDeletions)
+                            {
+                                //optimized case for segments without deleted docs
+                                output.WriteBytes(normBuffer, maxDoc);
+                            }
+                            else
+                            {
+                                // this segment has deleted docs, so we have to
+                                // check for every doc if it is deleted or not
+                                for (int k = 0; k < maxDoc; k++)
+                                {
+                                    if (!reader.IsDeleted(k))
+                                    {
+                                        output.WriteByte(normBuffer[k]);
+                                    }
+                                }
+                            }
+                            checkAbort.Work(maxDoc);
+                        }
+                    }
+                }
+            }
+            finally
+            {
+                if (output != null)
+                {
+                    output.Close();
+                }
+            }
+        }
+        
+        internal class CheckAbort
+        {
+            private double workCount;
+            private MergePolicy.OneMerge merge;
+            private Directory dir;
+            public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
+            {
+                this.merge = merge;
+                this.dir = dir;
+            }
+            
+            /// <summary> Records the fact that roughly units amount of work
+            /// have been done since this method was last called.
+            /// When adding time-consuming code into SegmentMerger,
+            /// you should test different values for units to ensure
+            /// that the time in between calls to merge.checkAborted
+            /// is up to ~ 1 second.
+            /// </summary>
+            public virtual void  Work(double units)
+            {
+                workCount += units;
+                if (workCount >= 10000.0)
+                {
+                    merge.CheckAborted(dir);
+                    workCount = 0;
+                }
+            }
+        }
+    }
 }
\ No newline at end of file


[05/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentInfo.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfo.cs b/src/core/Index/SegmentInfo.cs
index 697dda6..4be12ad 100644
--- a/src/core/Index/SegmentInfo.cs
+++ b/src/core/Index/SegmentInfo.cs
@@ -25,345 +25,345 @@ using BitVector = Lucene.Net.Util.BitVector;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Information about a segment such as it's name, directory, and files related
-	/// to the segment.
-	/// 
-	/// * <p/><b>NOTE:</b> This API is new and still experimental
-	/// (subject to change suddenly in the next release)<p/>
-	/// </summary>
-	public sealed class SegmentInfo : System.ICloneable
-	{
-		
-		internal const int NO = - 1;            // e.g. no norms; no deletes;
-		internal const int YES = 1;             // e.g. have norms; have deletes;
-		internal const int CHECK_DIR = 0;       // e.g. must check dir to see if there are norms/deletions
-		internal const int WITHOUT_GEN = 0;     // a file name that has no GEN in it. 
-		
-		public System.String name;              // unique name in dir
-		public int docCount;                    // number of docs in seg
-		public Directory dir;                   // where segment resides
-		
-		private bool preLockless;               // true if this is a segments file written before
-		                                        // lock-less commits (2.1)
-		
-		private long delGen;                    // current generation of del file; NO if there
-		                                        // are no deletes; CHECK_DIR if it's a pre-2.1 segment
-		                                        // (and we must check filesystem); YES or higher if
-		                                        // there are deletes at generation N
-		
-		private long[] normGen;                 // current generation of each field's norm file.
-		                                        // If this array is null, for lockLess this means no 
-		                                        // separate norms.  For preLockLess this means we must 
-		                                        // check filesystem. If this array is not null, its 
-		                                        // values mean: NO says this field has no separate  
-		                                        // norms; CHECK_DIR says it is a preLockLess segment and    
-		                                        // filesystem must be checked; >= YES says this field  
-		                                        // has separate norms with the specified generation
-		
-		private sbyte isCompoundFile;           // NO if it is not; YES if it is; CHECK_DIR if it's
-		                                        // pre-2.1 (ie, must check file system to see
-		                                        // if <name>.cfs and <name>.nrm exist)         
-		
-		private bool hasSingleNormFile;         // true if this segment maintains norms in a single file; 
-		                                        // false otherwise
-		                                        // this is currently false for segments populated by DocumentWriter
-		                                        // and true for newly created merged segments (both
-		                                        // compound and non compound).
-		
-		private IList<string> files;            // cached list of files that this segment uses
-		                                        // in the Directory
-		
-		internal long sizeInBytes = - 1;        // total byte size of all of our files (computed on demand)
-		
-		private int docStoreOffset;             // if this segment shares stored fields & vectors, this
-		                                        // offset is where in that file this segment's docs begin
-		private System.String docStoreSegment;  // name used to derive fields/vectors file we share with
-		                                        // other segments
-		private bool docStoreIsCompoundFile;    // whether doc store files are stored in compound file (*.cfx)
-		
-		private int delCount;                   // How many deleted docs in this segment, or -1 if not yet known
-		                                        // (if it's an older index)
-		
-		private bool hasProx;                   // True if this segment has any fields with omitTermFreqAndPositions==false
+    
+    /// <summary> Information about a segment such as it's name, directory, and files related
+    /// to the segment.
+    /// 
+    /// * <p/><b>NOTE:</b> This API is new and still experimental
+    /// (subject to change suddenly in the next release)<p/>
+    /// </summary>
+    public sealed class SegmentInfo : System.ICloneable
+    {
+        
+        internal const int NO = - 1;            // e.g. no norms; no deletes;
+        internal const int YES = 1;             // e.g. have norms; have deletes;
+        internal const int CHECK_DIR = 0;       // e.g. must check dir to see if there are norms/deletions
+        internal const int WITHOUT_GEN = 0;     // a file name that has no GEN in it. 
+        
+        public System.String name;              // unique name in dir
+        public int docCount;                    // number of docs in seg
+        public Directory dir;                   // where segment resides
+        
+        private bool preLockless;               // true if this is a segments file written before
+                                                // lock-less commits (2.1)
+        
+        private long delGen;                    // current generation of del file; NO if there
+                                                // are no deletes; CHECK_DIR if it's a pre-2.1 segment
+                                                // (and we must check filesystem); YES or higher if
+                                                // there are deletes at generation N
+        
+        private long[] normGen;                 // current generation of each field's norm file.
+                                                // If this array is null, for lockLess this means no 
+                                                // separate norms.  For preLockLess this means we must 
+                                                // check filesystem. If this array is not null, its 
+                                                // values mean: NO says this field has no separate  
+                                                // norms; CHECK_DIR says it is a preLockLess segment and    
+                                                // filesystem must be checked; >= YES says this field  
+                                                // has separate norms with the specified generation
+        
+        private sbyte isCompoundFile;           // NO if it is not; YES if it is; CHECK_DIR if it's
+                                                // pre-2.1 (ie, must check file system to see
+                                                // if <name>.cfs and <name>.nrm exist)         
+        
+        private bool hasSingleNormFile;         // true if this segment maintains norms in a single file; 
+                                                // false otherwise
+                                                // this is currently false for segments populated by DocumentWriter
+                                                // and true for newly created merged segments (both
+                                                // compound and non compound).
+        
+        private IList<string> files;            // cached list of files that this segment uses
+                                                // in the Directory
+        
+        internal long sizeInBytes = - 1;        // total byte size of all of our files (computed on demand)
+        
+        private int docStoreOffset;             // if this segment shares stored fields & vectors, this
+                                                // offset is where in that file this segment's docs begin
+        private System.String docStoreSegment;  // name used to derive fields/vectors file we share with
+                                                // other segments
+        private bool docStoreIsCompoundFile;    // whether doc store files are stored in compound file (*.cfx)
+        
+        private int delCount;                   // How many deleted docs in this segment, or -1 if not yet known
+                                                // (if it's an older index)
+        
+        private bool hasProx;                   // True if this segment has any fields with omitTermFreqAndPositions==false
 
         private IDictionary<string, string> diagnostics;
-		
-		public override System.String ToString()
-		{
-			return "si: " + dir.ToString() + " " + name + " docCount: " + docCount + " delCount: " + delCount + " delFileName: " + GetDelFileName();
-		}
-		
-		public SegmentInfo(System.String name, int docCount, Directory dir)
-		{
-			this.name = name;
-			this.docCount = docCount;
-			this.dir = dir;
-			delGen = NO;
-			isCompoundFile = (sbyte) (CHECK_DIR);
-			preLockless = true;
-			hasSingleNormFile = false;
-			docStoreOffset = - 1;
-			docStoreSegment = name;
-			docStoreIsCompoundFile = false;
-			delCount = 0;
-			hasProx = true;
-		}
-		
-		public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile):this(name, docCount, dir, isCompoundFile, hasSingleNormFile, - 1, null, false, true)
-		{
-		}
-		
-		public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile, int docStoreOffset, System.String docStoreSegment, bool docStoreIsCompoundFile, bool hasProx):this(name, docCount, dir)
-		{
-			this.isCompoundFile = (sbyte) (isCompoundFile?YES:NO);
-			this.hasSingleNormFile = hasSingleNormFile;
-			preLockless = false;
-			this.docStoreOffset = docStoreOffset;
-			this.docStoreSegment = docStoreSegment;
-			this.docStoreIsCompoundFile = docStoreIsCompoundFile;
-			this.hasProx = hasProx;
-			delCount = 0;
-			System.Diagnostics.Debug.Assert(docStoreOffset == - 1 || docStoreSegment != null, "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount);
-		}
-		
-		/// <summary> Copy everything from src SegmentInfo into our instance.</summary>
-		internal void Reset(SegmentInfo src)
-		{
-			ClearFiles();
-			name = src.name;
-			docCount = src.docCount;
-			dir = src.dir;
-			preLockless = src.preLockless;
-			delGen = src.delGen;
-			docStoreOffset = src.docStoreOffset;
-			docStoreIsCompoundFile = src.docStoreIsCompoundFile;
-			if (src.normGen == null)
-			{
-				normGen = null;
-			}
-			else
-			{
-				normGen = new long[src.normGen.Length];
-				Array.Copy(src.normGen, 0, normGen, 0, src.normGen.Length);
-			}
-			isCompoundFile = src.isCompoundFile;
-			hasSingleNormFile = src.hasSingleNormFile;
-			delCount = src.delCount;
-		}
+        
+        public override System.String ToString()
+        {
+            return "si: " + dir.ToString() + " " + name + " docCount: " + docCount + " delCount: " + delCount + " delFileName: " + GetDelFileName();
+        }
+        
+        public SegmentInfo(System.String name, int docCount, Directory dir)
+        {
+            this.name = name;
+            this.docCount = docCount;
+            this.dir = dir;
+            delGen = NO;
+            isCompoundFile = (sbyte) (CHECK_DIR);
+            preLockless = true;
+            hasSingleNormFile = false;
+            docStoreOffset = - 1;
+            docStoreSegment = name;
+            docStoreIsCompoundFile = false;
+            delCount = 0;
+            hasProx = true;
+        }
+        
+        public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile):this(name, docCount, dir, isCompoundFile, hasSingleNormFile, - 1, null, false, true)
+        {
+        }
+        
+        public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile, int docStoreOffset, System.String docStoreSegment, bool docStoreIsCompoundFile, bool hasProx):this(name, docCount, dir)
+        {
+            this.isCompoundFile = (sbyte) (isCompoundFile?YES:NO);
+            this.hasSingleNormFile = hasSingleNormFile;
+            preLockless = false;
+            this.docStoreOffset = docStoreOffset;
+            this.docStoreSegment = docStoreSegment;
+            this.docStoreIsCompoundFile = docStoreIsCompoundFile;
+            this.hasProx = hasProx;
+            delCount = 0;
+            System.Diagnostics.Debug.Assert(docStoreOffset == - 1 || docStoreSegment != null, "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount);
+        }
+        
+        /// <summary> Copy everything from src SegmentInfo into our instance.</summary>
+        internal void Reset(SegmentInfo src)
+        {
+            ClearFiles();
+            name = src.name;
+            docCount = src.docCount;
+            dir = src.dir;
+            preLockless = src.preLockless;
+            delGen = src.delGen;
+            docStoreOffset = src.docStoreOffset;
+            docStoreIsCompoundFile = src.docStoreIsCompoundFile;
+            if (src.normGen == null)
+            {
+                normGen = null;
+            }
+            else
+            {
+                normGen = new long[src.normGen.Length];
+                Array.Copy(src.normGen, 0, normGen, 0, src.normGen.Length);
+            }
+            isCompoundFile = src.isCompoundFile;
+            hasSingleNormFile = src.hasSingleNormFile;
+            delCount = src.delCount;
+        }
 
-	    public IDictionary<string, string> Diagnostics
-	    {
-	        get { return diagnostics; }
-	        internal set { this.diagnostics = value; }
-	    }
+        public IDictionary<string, string> Diagnostics
+        {
+            get { return diagnostics; }
+            internal set { this.diagnostics = value; }
+        }
 
-	    /// <summary> Construct a new SegmentInfo instance by reading a
-		/// previously saved SegmentInfo from input.
-		/// 
-		/// </summary>
-		/// <param name="dir">directory to load from
-		/// </param>
-		/// <param name="format">format of the segments info file
-		/// </param>
-		/// <param name="input">input handle to read segment info from
-		/// </param>
-		internal SegmentInfo(Directory dir, int format, IndexInput input)
-		{
-			this.dir = dir;
-			name = input.ReadString();
-			docCount = input.ReadInt();
-			if (format <= SegmentInfos.FORMAT_LOCKLESS)
-			{
-				delGen = input.ReadLong();
-				if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE)
-				{
-					docStoreOffset = input.ReadInt();
-					if (docStoreOffset != - 1)
-					{
-						docStoreSegment = input.ReadString();
-						docStoreIsCompoundFile = (1 == input.ReadByte());
-					}
-					else
-					{
-						docStoreSegment = name;
-						docStoreIsCompoundFile = false;
-					}
-				}
-				else
-				{
-					docStoreOffset = - 1;
-					docStoreSegment = name;
-					docStoreIsCompoundFile = false;
-				}
-				if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE)
-				{
-					hasSingleNormFile = (1 == input.ReadByte());
-				}
-				else
-				{
-					hasSingleNormFile = false;
-				}
-				int numNormGen = input.ReadInt();
-				if (numNormGen == NO)
-				{
-					normGen = null;
-				}
-				else
-				{
-					normGen = new long[numNormGen];
-					for (int j = 0; j < numNormGen; j++)
-					{
-						normGen[j] = input.ReadLong();
-					}
-				}
-				isCompoundFile = (sbyte) input.ReadByte();
-				preLockless = (isCompoundFile == CHECK_DIR);
-				if (format <= SegmentInfos.FORMAT_DEL_COUNT)
-				{
-					delCount = input.ReadInt();
-					System.Diagnostics.Debug.Assert(delCount <= docCount);
-				}
-				else
-					delCount = - 1;
-				if (format <= SegmentInfos.FORMAT_HAS_PROX)
-					hasProx = input.ReadByte() == 1;
-				else
-					hasProx = true;
-				
-				if (format <= SegmentInfos.FORMAT_DIAGNOSTICS)
-				{
-					diagnostics = input.ReadStringStringMap();
-				}
-				else
-				{
-					diagnostics = new Dictionary<string,string>();
-				}
-			}
-			else
-			{
-				delGen = CHECK_DIR;
-				normGen = null;
-				isCompoundFile = (sbyte) (CHECK_DIR);
-				preLockless = true;
-				hasSingleNormFile = false;
-				docStoreOffset = - 1;
-				docStoreIsCompoundFile = false;
-				docStoreSegment = null;
-				delCount = - 1;
-				hasProx = true;
-				diagnostics = new Dictionary<string,string>();
-			}
-		}
-		
-		internal void  SetNumFields(int numFields)
-		{
-			if (normGen == null)
-			{
-				// normGen is null if we loaded a pre-2.1 segment
-				// file, or, if this segments file hasn't had any
-				// norms set against it yet:
-				normGen = new long[numFields];
-				
-				if (preLockless)
-				{
-					// Do nothing: thus leaving normGen[k]==CHECK_DIR (==0), so that later we know  
-					// we have to check filesystem for norm files, because this is prelockless.
-				}
-				else
-				{
-					// This is a FORMAT_LOCKLESS segment, which means
-					// there are no separate norms:
-					for (int i = 0; i < numFields; i++)
-					{
-						normGen[i] = NO;
-					}
-				}
-			}
-		}
-		
-		/// <summary>Returns total size in bytes of all of files used by
-		/// this segment. 
-		/// </summary>
+        /// <summary> Construct a new SegmentInfo instance by reading a
+        /// previously saved SegmentInfo from input.
+        /// 
+        /// </summary>
+        /// <param name="dir">directory to load from
+        /// </param>
+        /// <param name="format">format of the segments info file
+        /// </param>
+        /// <param name="input">input handle to read segment info from
+        /// </param>
+        internal SegmentInfo(Directory dir, int format, IndexInput input)
+        {
+            this.dir = dir;
+            name = input.ReadString();
+            docCount = input.ReadInt();
+            if (format <= SegmentInfos.FORMAT_LOCKLESS)
+            {
+                delGen = input.ReadLong();
+                if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE)
+                {
+                    docStoreOffset = input.ReadInt();
+                    if (docStoreOffset != - 1)
+                    {
+                        docStoreSegment = input.ReadString();
+                        docStoreIsCompoundFile = (1 == input.ReadByte());
+                    }
+                    else
+                    {
+                        docStoreSegment = name;
+                        docStoreIsCompoundFile = false;
+                    }
+                }
+                else
+                {
+                    docStoreOffset = - 1;
+                    docStoreSegment = name;
+                    docStoreIsCompoundFile = false;
+                }
+                if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE)
+                {
+                    hasSingleNormFile = (1 == input.ReadByte());
+                }
+                else
+                {
+                    hasSingleNormFile = false;
+                }
+                int numNormGen = input.ReadInt();
+                if (numNormGen == NO)
+                {
+                    normGen = null;
+                }
+                else
+                {
+                    normGen = new long[numNormGen];
+                    for (int j = 0; j < numNormGen; j++)
+                    {
+                        normGen[j] = input.ReadLong();
+                    }
+                }
+                isCompoundFile = (sbyte) input.ReadByte();
+                preLockless = (isCompoundFile == CHECK_DIR);
+                if (format <= SegmentInfos.FORMAT_DEL_COUNT)
+                {
+                    delCount = input.ReadInt();
+                    System.Diagnostics.Debug.Assert(delCount <= docCount);
+                }
+                else
+                    delCount = - 1;
+                if (format <= SegmentInfos.FORMAT_HAS_PROX)
+                    hasProx = input.ReadByte() == 1;
+                else
+                    hasProx = true;
+                
+                if (format <= SegmentInfos.FORMAT_DIAGNOSTICS)
+                {
+                    diagnostics = input.ReadStringStringMap();
+                }
+                else
+                {
+                    diagnostics = new Dictionary<string,string>();
+                }
+            }
+            else
+            {
+                delGen = CHECK_DIR;
+                normGen = null;
+                isCompoundFile = (sbyte) (CHECK_DIR);
+                preLockless = true;
+                hasSingleNormFile = false;
+                docStoreOffset = - 1;
+                docStoreIsCompoundFile = false;
+                docStoreSegment = null;
+                delCount = - 1;
+                hasProx = true;
+                diagnostics = new Dictionary<string,string>();
+            }
+        }
+        
+        internal void  SetNumFields(int numFields)
+        {
+            if (normGen == null)
+            {
+                // normGen is null if we loaded a pre-2.1 segment
+                // file, or, if this segments file hasn't had any
+                // norms set against it yet:
+                normGen = new long[numFields];
+                
+                if (preLockless)
+                {
+                    // Do nothing: thus leaving normGen[k]==CHECK_DIR (==0), so that later we know  
+                    // we have to check filesystem for norm files, because this is prelockless.
+                }
+                else
+                {
+                    // This is a FORMAT_LOCKLESS segment, which means
+                    // there are no separate norms:
+                    for (int i = 0; i < numFields; i++)
+                    {
+                        normGen[i] = NO;
+                    }
+                }
+            }
+        }
+        
+        /// <summary>Returns total size in bytes of all of files used by
+        /// this segment. 
+        /// </summary>
         public long SizeInBytes()
-		{
-			if (sizeInBytes == - 1)
-			{
-				IList<string> files = Files();
-				int size = files.Count;
-				sizeInBytes = 0;
-				for (int i = 0; i < size; i++)
-				{
-					System.String fileName = files[i];
-					// We don't count bytes used by a shared doc store
-					// against this segment:
-					if (docStoreOffset == - 1 || !IndexFileNames.IsDocStoreFile(fileName))
-						sizeInBytes += dir.FileLength(fileName);
-				}
-			}
-			return sizeInBytes;
-		}
+        {
+            if (sizeInBytes == - 1)
+            {
+                IList<string> files = Files();
+                int size = files.Count;
+                sizeInBytes = 0;
+                for (int i = 0; i < size; i++)
+                {
+                    System.String fileName = files[i];
+                    // We don't count bytes used by a shared doc store
+                    // against this segment:
+                    if (docStoreOffset == - 1 || !IndexFileNames.IsDocStoreFile(fileName))
+                        sizeInBytes += dir.FileLength(fileName);
+                }
+            }
+            return sizeInBytes;
+        }
 
         public bool HasDeletions()
-		{
-			// Cases:
-			//
-			//   delGen == NO: this means this segment was written
-			//     by the LOCKLESS code and for certain does not have
-			//     deletions yet
-			//
-			//   delGen == CHECK_DIR: this means this segment was written by
-			//     pre-LOCKLESS code which means we must check
-			//     directory to see if .del file exists
-			//
-			//   delGen >= YES: this means this segment was written by
-			//     the LOCKLESS code and for certain has
-			//     deletions
-			//
-			if (delGen == NO)
-			{
-				return false;
-			}
-			else if (delGen >= YES)
-			{
-				return true;
-			}
-			else
-			{
-				return dir.FileExists(GetDelFileName());
-			}
-		}
-		
-		internal void  AdvanceDelGen()
-		{
-			// delGen 0 is reserved for pre-LOCKLESS format
-			if (delGen == NO)
-			{
-				delGen = YES;
-			}
-			else
-			{
-				delGen++;
-			}
-			ClearFiles();
-		}
-		
-		internal void  ClearDelGen()
-		{
-			delGen = NO;
-			ClearFiles();
-		}
-		
-		public System.Object Clone()
-		{
-			SegmentInfo si = new SegmentInfo(name, docCount, dir);
-			si.isCompoundFile = isCompoundFile;
-			si.delGen = delGen;
-			si.delCount = delCount;
-			si.hasProx = hasProx;
-			si.preLockless = preLockless;
-			si.hasSingleNormFile = hasSingleNormFile;
-		    si.diagnostics = new HashMap<string, string>(this.diagnostics);
+        {
+            // Cases:
+            //
+            //   delGen == NO: this means this segment was written
+            //     by the LOCKLESS code and for certain does not have
+            //     deletions yet
+            //
+            //   delGen == CHECK_DIR: this means this segment was written by
+            //     pre-LOCKLESS code which means we must check
+            //     directory to see if .del file exists
+            //
+            //   delGen >= YES: this means this segment was written by
+            //     the LOCKLESS code and for certain has
+            //     deletions
+            //
+            if (delGen == NO)
+            {
+                return false;
+            }
+            else if (delGen >= YES)
+            {
+                return true;
+            }
+            else
+            {
+                return dir.FileExists(GetDelFileName());
+            }
+        }
+        
+        internal void  AdvanceDelGen()
+        {
+            // delGen 0 is reserved for pre-LOCKLESS format
+            if (delGen == NO)
+            {
+                delGen = YES;
+            }
+            else
+            {
+                delGen++;
+            }
+            ClearFiles();
+        }
+        
+        internal void  ClearDelGen()
+        {
+            delGen = NO;
+            ClearFiles();
+        }
+        
+        public System.Object Clone()
+        {
+            SegmentInfo si = new SegmentInfo(name, docCount, dir);
+            si.isCompoundFile = isCompoundFile;
+            si.delGen = delGen;
+            si.delCount = delCount;
+            si.hasProx = hasProx;
+            si.preLockless = preLockless;
+            si.hasSingleNormFile = hasSingleNormFile;
+            si.diagnostics = new HashMap<string, string>(this.diagnostics);
             if (this.diagnostics != null)
             {
                 si.diagnostics = new System.Collections.Generic.Dictionary<string, string>();
@@ -372,14 +372,14 @@ namespace Lucene.Net.Index
                     si.diagnostics.Add(o,diagnostics[o]);
                 }
             }
-			if (normGen != null)
-			{
-				si.normGen = new long[normGen.Length];
-				normGen.CopyTo(si.normGen, 0);
-			}
-			si.docStoreOffset = docStoreOffset;
-			si.docStoreSegment = docStoreSegment;
-			si.docStoreIsCompoundFile = docStoreIsCompoundFile;
+            if (normGen != null)
+            {
+                si.normGen = new long[normGen.Length];
+                normGen.CopyTo(si.normGen, 0);
+            }
+            si.docStoreOffset = docStoreOffset;
+            si.docStoreSegment = docStoreSegment;
+            si.docStoreIsCompoundFile = docStoreIsCompoundFile;
             if (this.files != null)
             {
                 si.files = new System.Collections.Generic.List<string>();
@@ -389,474 +389,474 @@ namespace Lucene.Net.Index
                 }
             }
             
-			return si;
-		}
+            return si;
+        }
 
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public System.String GetDelFileName()
-		{
-			if (delGen == NO)
-			{
-				// In this case we know there is no deletion filename
-				// against this segment
-				return null;
-			}
-			else
-			{
-				// If delGen is CHECK_DIR, it's the pre-lockless-commit file format
-				return IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
-			}
-		}
+        {
+            if (delGen == NO)
+            {
+                // In this case we know there is no deletion filename
+                // against this segment
+                return null;
+            }
+            else
+            {
+                // If delGen is CHECK_DIR, it's the pre-lockless-commit file format
+                return IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
+            }
+        }
 
         /// <summary> Returns true if this field for this segment has saved a separate norms file (_&lt;segment&gt;_N.sX).
-		/// 
-		/// </summary>
-		/// <param name="fieldNumber">the field index to check
-		/// </param>
-		public bool HasSeparateNorms(int fieldNumber)
-		{
-			if ((normGen == null && preLockless) || (normGen != null && normGen[fieldNumber] == CHECK_DIR))
-			{
-				// Must fallback to directory file exists check:
-				System.String fileName = name + ".s" + fieldNumber;
-				return dir.FileExists(fileName);
-			}
-			else if (normGen == null || normGen[fieldNumber] == NO)
-			{
-				return false;
-			}
-			else
-			{
-				return true;
-			}
-		}
-		
-		/// <summary> Returns true if any fields in this segment have separate norms.</summary>
-		public bool HasSeparateNorms()
-		{
-			if (normGen == null)
-			{
-				if (!preLockless)
-				{
-					// This means we were created w/ LOCKLESS code and no
-					// norms are written yet:
-					return false;
-				}
-				else
-				{
-					// This means this segment was saved with pre-LOCKLESS
-					// code.  So we must fallback to the original
-					// directory list check:
-					System.String[] result = dir.ListAll();
-					if (result == null)
-					{
+        /// 
+        /// </summary>
+        /// <param name="fieldNumber">the field index to check
+        /// </param>
+        public bool HasSeparateNorms(int fieldNumber)
+        {
+            if ((normGen == null && preLockless) || (normGen != null && normGen[fieldNumber] == CHECK_DIR))
+            {
+                // Must fallback to directory file exists check:
+                System.String fileName = name + ".s" + fieldNumber;
+                return dir.FileExists(fileName);
+            }
+            else if (normGen == null || normGen[fieldNumber] == NO)
+            {
+                return false;
+            }
+            else
+            {
+                return true;
+            }
+        }
+        
+        /// <summary> Returns true if any fields in this segment have separate norms.</summary>
+        public bool HasSeparateNorms()
+        {
+            if (normGen == null)
+            {
+                if (!preLockless)
+                {
+                    // This means we were created w/ LOCKLESS code and no
+                    // norms are written yet:
+                    return false;
+                }
+                else
+                {
+                    // This means this segment was saved with pre-LOCKLESS
+                    // code.  So we must fallback to the original
+                    // directory list check:
+                    System.String[] result = dir.ListAll();
+                    if (result == null)
+                    {
                         throw new System.IO.IOException("cannot read directory " + dir + ": ListAll() returned null");
-					}
+                    }
 
-				    IndexFileNameFilter filter = IndexFileNameFilter.Filter;
-					System.String pattern;
-					pattern = name + ".s";
-					int patternLength = pattern.Length;
-					for (int i = 0; i < result.Length; i++)
-					{
-					    string fileName = result[i];
-						if (filter.Accept(null, fileName) && fileName.StartsWith(pattern) && char.IsDigit(fileName[patternLength]))
-							return true;
-					}
-					return false;
-				}
-			}
-			else
-			{
-				// This means this segment was saved with LOCKLESS
-				// code so we first check whether any normGen's are >= 1
-				// (meaning they definitely have separate norms):
-				for (int i = 0; i < normGen.Length; i++)
-				{
-					if (normGen[i] >= YES)
-					{
-						return true;
-					}
-				}
-				// Next we look for any == 0.  These cases were
-				// pre-LOCKLESS and must be checked in directory:
-				for (int i = 0; i < normGen.Length; i++)
-				{
-					if (normGen[i] == CHECK_DIR)
-					{
-						if (HasSeparateNorms(i))
-						{
-							return true;
-						}
-					}
-				}
-			}
-			
-			return false;
-		}
-		
-		/// <summary> Increment the generation count for the norms file for
-		/// this field.
-		/// 
-		/// </summary>
-		/// <param name="fieldIndex">field whose norm file will be rewritten
-		/// </param>
-		internal void  AdvanceNormGen(int fieldIndex)
-		{
-			if (normGen[fieldIndex] == NO)
-			{
-				normGen[fieldIndex] = YES;
-			}
-			else
-			{
-				normGen[fieldIndex]++;
-			}
-			ClearFiles();
-		}
-		
-		/// <summary> Get the file name for the norms file for this field.
-		/// 
-		/// </summary>
-		/// <param name="number">field index
-		/// </param>
-		public System.String GetNormFileName(int number)
-		{
-			System.String prefix;
-			
-			long gen;
-			if (normGen == null)
-			{
-				gen = CHECK_DIR;
-			}
-			else
-			{
-				gen = normGen[number];
-			}
-			
-			if (HasSeparateNorms(number))
-			{
-				// case 1: separate norm
-				prefix = ".s";
-				return IndexFileNames.FileNameFromGeneration(name, prefix + number, gen);
-			}
-			
-			if (hasSingleNormFile)
-			{
-				// case 2: lockless (or nrm file exists) - single file for all norms 
-				prefix = "." + IndexFileNames.NORMS_EXTENSION;
-				return IndexFileNames.FileNameFromGeneration(name, prefix, WITHOUT_GEN);
-			}
-			
-			// case 3: norm file for each field
-			prefix = ".f";
-			return IndexFileNames.FileNameFromGeneration(name, prefix + number, WITHOUT_GEN);
-		}
+                    IndexFileNameFilter filter = IndexFileNameFilter.Filter;
+                    System.String pattern;
+                    pattern = name + ".s";
+                    int patternLength = pattern.Length;
+                    for (int i = 0; i < result.Length; i++)
+                    {
+                        string fileName = result[i];
+                        if (filter.Accept(null, fileName) && fileName.StartsWith(pattern) && char.IsDigit(fileName[patternLength]))
+                            return true;
+                    }
+                    return false;
+                }
+            }
+            else
+            {
+                // This means this segment was saved with LOCKLESS
+                // code so we first check whether any normGen's are >= 1
+                // (meaning they definitely have separate norms):
+                for (int i = 0; i < normGen.Length; i++)
+                {
+                    if (normGen[i] >= YES)
+                    {
+                        return true;
+                    }
+                }
+                // Next we look for any == 0.  These cases were
+                // pre-LOCKLESS and must be checked in directory:
+                for (int i = 0; i < normGen.Length; i++)
+                {
+                    if (normGen[i] == CHECK_DIR)
+                    {
+                        if (HasSeparateNorms(i))
+                        {
+                            return true;
+                        }
+                    }
+                }
+            }
+            
+            return false;
+        }
+        
+        /// <summary> Increment the generation count for the norms file for
+        /// this field.
+        /// 
+        /// </summary>
+        /// <param name="fieldIndex">field whose norm file will be rewritten
+        /// </param>
+        internal void  AdvanceNormGen(int fieldIndex)
+        {
+            if (normGen[fieldIndex] == NO)
+            {
+                normGen[fieldIndex] = YES;
+            }
+            else
+            {
+                normGen[fieldIndex]++;
+            }
+            ClearFiles();
+        }
+        
+        /// <summary> Get the file name for the norms file for this field.
+        /// 
+        /// </summary>
+        /// <param name="number">field index
+        /// </param>
+        public System.String GetNormFileName(int number)
+        {
+            System.String prefix;
+            
+            long gen;
+            if (normGen == null)
+            {
+                gen = CHECK_DIR;
+            }
+            else
+            {
+                gen = normGen[number];
+            }
+            
+            if (HasSeparateNorms(number))
+            {
+                // case 1: separate norm
+                prefix = ".s";
+                return IndexFileNames.FileNameFromGeneration(name, prefix + number, gen);
+            }
+            
+            if (hasSingleNormFile)
+            {
+                // case 2: lockless (or nrm file exists) - single file for all norms 
+                prefix = "." + IndexFileNames.NORMS_EXTENSION;
+                return IndexFileNames.FileNameFromGeneration(name, prefix, WITHOUT_GEN);
+            }
+            
+            // case 3: norm file for each field
+            prefix = ".f";
+            return IndexFileNames.FileNameFromGeneration(name, prefix + number, WITHOUT_GEN);
+        }
 
-	    /// <summary> Returns true if this segment is stored as a compound
-	    /// file; else, false.
-	    /// </summary>
-	    internal void SetUseCompoundFile(bool value)
-	    {
-	        if (value)
-	        {
-	            this.isCompoundFile = (sbyte) (YES);
-	        }
-	        else
-	        {
-	            this.isCompoundFile = (sbyte) (NO);
-	        }
-	        ClearFiles();
-	    }
+        /// <summary> Returns true if this segment is stored as a compound
+        /// file; else, false.
+        /// </summary>
+        internal void SetUseCompoundFile(bool value)
+        {
+            if (value)
+            {
+                this.isCompoundFile = (sbyte) (YES);
+            }
+            else
+            {
+                this.isCompoundFile = (sbyte) (NO);
+            }
+            ClearFiles();
+        }
 
-	    /// <summary> Returns true if this segment is stored as a compound
-	    /// file; else, false.
-	    /// </summary>
+        /// <summary> Returns true if this segment is stored as a compound
+        /// file; else, false.
+        /// </summary>
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public bool GetUseCompoundFile()
-	    {
-	        if (isCompoundFile == NO)
-	        {
-	            return false;
-	        }
-	        if (isCompoundFile == YES)
-	        {
-	            return true;
-	        }
-	        return dir.FileExists(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
-	    }
+        {
+            if (isCompoundFile == NO)
+            {
+                return false;
+            }
+            if (isCompoundFile == YES)
+            {
+                return true;
+            }
+            return dir.FileExists(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+        }
 
-	    [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
+        [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public int GetDelCount()
-		{
-			if (delCount == - 1)
-			{
-				if (HasDeletions())
-				{
-					System.String delFileName = GetDelFileName();
-					delCount = new BitVector(dir, delFileName).Count();
-				}
-				else
-					delCount = 0;
-			}
-			System.Diagnostics.Debug.Assert(delCount <= docCount);
-			return delCount;
-		}
-		
-		internal void  SetDelCount(int delCount)
-		{
-			this.delCount = delCount;
-			System.Diagnostics.Debug.Assert(delCount <= docCount);
-		}
+        {
+            if (delCount == - 1)
+            {
+                if (HasDeletions())
+                {
+                    System.String delFileName = GetDelFileName();
+                    delCount = new BitVector(dir, delFileName).Count();
+                }
+                else
+                    delCount = 0;
+            }
+            System.Diagnostics.Debug.Assert(delCount <= docCount);
+            return delCount;
+        }
+        
+        internal void  SetDelCount(int delCount)
+        {
+            this.delCount = delCount;
+            System.Diagnostics.Debug.Assert(delCount <= docCount);
+        }
 
-	    public int DocStoreOffset
-	    {
-	        get { return docStoreOffset; }
-	        internal set
-	        {
-	            docStoreOffset = value;
-	            ClearFiles();
-	        }
-	    }
+        public int DocStoreOffset
+        {
+            get { return docStoreOffset; }
+            internal set
+            {
+                docStoreOffset = value;
+                ClearFiles();
+            }
+        }
 
-	    public bool DocStoreIsCompoundFile
-	    {
-	        get { return docStoreIsCompoundFile; }
-	        internal set
-	        {
-	            docStoreIsCompoundFile = value;
-	            ClearFiles();
-	        }
-	    }
+        public bool DocStoreIsCompoundFile
+        {
+            get { return docStoreIsCompoundFile; }
+            internal set
+            {
+                docStoreIsCompoundFile = value;
+                ClearFiles();
+            }
+        }
 
-	    public string DocStoreSegment
-	    {
-	        get { return docStoreSegment; }
-	    }
+        public string DocStoreSegment
+        {
+            get { return docStoreSegment; }
+        }
 
         internal void SetDocStore(int offset, System.String segment, bool isCompoundFile)
-		{
-			docStoreOffset = offset;
-			docStoreSegment = segment;
-			docStoreIsCompoundFile = isCompoundFile;
-		}
-		
-		/// <summary> Save this segment's info.</summary>
-		internal void  Write(IndexOutput output)
-		{
-			output.WriteString(name);
-			output.WriteInt(docCount);
-			output.WriteLong(delGen);
-			output.WriteInt(docStoreOffset);
-			if (docStoreOffset != - 1)
-			{
-				output.WriteString(docStoreSegment);
-				output.WriteByte((byte) (docStoreIsCompoundFile?1:0));
-			}
-			
-			output.WriteByte((byte) (hasSingleNormFile?1:0));
-			if (normGen == null)
-			{
-				output.WriteInt(NO);
-			}
-			else
-			{
-				output.WriteInt(normGen.Length);
-				for (int j = 0; j < normGen.Length; j++)
-				{
-					output.WriteLong(normGen[j]);
-				}
-			}
-			output.WriteByte((byte) isCompoundFile);
-			output.WriteInt(delCount);
-			output.WriteByte((byte) (hasProx?1:0));
-			output.WriteStringStringMap(diagnostics);
-		}
+        {
+            docStoreOffset = offset;
+            docStoreSegment = segment;
+            docStoreIsCompoundFile = isCompoundFile;
+        }
+        
+        /// <summary> Save this segment's info.</summary>
+        internal void  Write(IndexOutput output)
+        {
+            output.WriteString(name);
+            output.WriteInt(docCount);
+            output.WriteLong(delGen);
+            output.WriteInt(docStoreOffset);
+            if (docStoreOffset != - 1)
+            {
+                output.WriteString(docStoreSegment);
+                output.WriteByte((byte) (docStoreIsCompoundFile?1:0));
+            }
+            
+            output.WriteByte((byte) (hasSingleNormFile?1:0));
+            if (normGen == null)
+            {
+                output.WriteInt(NO);
+            }
+            else
+            {
+                output.WriteInt(normGen.Length);
+                for (int j = 0; j < normGen.Length; j++)
+                {
+                    output.WriteLong(normGen[j]);
+                }
+            }
+            output.WriteByte((byte) isCompoundFile);
+            output.WriteInt(delCount);
+            output.WriteByte((byte) (hasProx?1:0));
+            output.WriteStringStringMap(diagnostics);
+        }
 
-	    public bool HasProx
-	    {
-	        get { return hasProx; }
-	        internal set
-	        {
-	            this.hasProx = value;
-	            ClearFiles();
-	        }
-	    }
+        public bool HasProx
+        {
+            get { return hasProx; }
+            internal set
+            {
+                this.hasProx = value;
+                ClearFiles();
+            }
+        }
 
-	    private void  AddIfExists(IList<string> files, System.String fileName)
-		{
-			if (dir.FileExists(fileName))
-				files.Add(fileName);
-		}
-		
-		/*
-		* Return all files referenced by this SegmentInfo.  The
-		* returns List is a locally cached List so you should not
-		* modify it.
-		*/
-		
-		public IList<string> Files()
-		{
-			
-			if (files != null)
-			{
-				// Already cached:
-				return files;
-			}
+        private void  AddIfExists(IList<string> files, System.String fileName)
+        {
+            if (dir.FileExists(fileName))
+                files.Add(fileName);
+        }
+        
+        /*
+        * Return all files referenced by this SegmentInfo.  The
+        * returns List is a locally cached List so you should not
+        * modify it.
+        */
+        
+        public IList<string> Files()
+        {
+            
+            if (files != null)
+            {
+                // Already cached:
+                return files;
+            }
 
             var fileList = new System.Collections.Generic.List<string>();
-			
-			bool useCompoundFile = GetUseCompoundFile();
-			
-			if (useCompoundFile)
-			{
+            
+            bool useCompoundFile = GetUseCompoundFile();
+            
+            if (useCompoundFile)
+            {
                 fileList.Add(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
-			}
-			else
-			{
-				System.String[] exts = IndexFileNames.NON_STORE_INDEX_EXTENSIONS;
-				for (int i = 0; i < exts.Length; i++)
+            }
+            else
+            {
+                System.String[] exts = IndexFileNames.NON_STORE_INDEX_EXTENSIONS;
+                for (int i = 0; i < exts.Length; i++)
                     AddIfExists(fileList, name + "." + exts[i]);
-			}
-			
-			if (docStoreOffset != - 1)
-			{
-				// We are sharing doc stores (stored fields, term
-				// vectors) with other segments
-				System.Diagnostics.Debug.Assert(docStoreSegment != null);
-				if (docStoreIsCompoundFile)
-				{
+            }
+            
+            if (docStoreOffset != - 1)
+            {
+                // We are sharing doc stores (stored fields, term
+                // vectors) with other segments
+                System.Diagnostics.Debug.Assert(docStoreSegment != null);
+                if (docStoreIsCompoundFile)
+                {
                     fileList.Add(docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
-				}
-				else
-				{
-					System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
-					for (int i = 0; i < exts.Length; i++)
+                }
+                else
+                {
+                    System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
+                    for (int i = 0; i < exts.Length; i++)
                         AddIfExists(fileList, docStoreSegment + "." + exts[i]);
-				}
-			}
-			else if (!useCompoundFile)
-			{
-				// We are not sharing, and, these files were not
-				// included in the compound file
-				System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
-				for (int i = 0; i < exts.Length; i++)
+                }
+            }
+            else if (!useCompoundFile)
+            {
+                // We are not sharing, and, these files were not
+                // included in the compound file
+                System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
+                for (int i = 0; i < exts.Length; i++)
                     AddIfExists(fileList, name + "." + exts[i]);
-			}
-			
-			System.String delFileName = IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
-			if (delFileName != null && (delGen >= YES || dir.FileExists(delFileName)))
-			{
+            }
+            
+            System.String delFileName = IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
+            if (delFileName != null && (delGen >= YES || dir.FileExists(delFileName)))
+            {
                 fileList.Add(delFileName);
-			}
-			
-			// Careful logic for norms files    
-			if (normGen != null)
-			{
-				for (int i = 0; i < normGen.Length; i++)
-				{
-					long gen = normGen[i];
-					if (gen >= YES)
-					{
-						// Definitely a separate norm file, with generation:
+            }
+            
+            // Careful logic for norms files    
+            if (normGen != null)
+            {
+                for (int i = 0; i < normGen.Length; i++)
+                {
+                    long gen = normGen[i];
+                    if (gen >= YES)
+                    {
+                        // Definitely a separate norm file, with generation:
                         fileList.Add(IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i, gen));
-					}
-					else if (NO == gen)
-					{
-						// No separate norms but maybe plain norms
-						// in the non compound file case:
-						if (!hasSingleNormFile && !useCompoundFile)
-						{
-							System.String fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
-							if (dir.FileExists(fileName))
-							{
+                    }
+                    else if (NO == gen)
+                    {
+                        // No separate norms but maybe plain norms
+                        // in the non compound file case:
+                        if (!hasSingleNormFile && !useCompoundFile)
+                        {
+                            System.String fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
+                            if (dir.FileExists(fileName))
+                            {
                                 fileList.Add(fileName);
-							}
-						}
-					}
-					else if (CHECK_DIR == gen)
-					{
-						// Pre-2.1: we have to check file existence
-						System.String fileName = null;
-						if (useCompoundFile)
-						{
-							fileName = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i;
-						}
-						else if (!hasSingleNormFile)
-						{
-							fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
-						}
-						if (fileName != null && dir.FileExists(fileName))
-						{
+                            }
+                        }
+                    }
+                    else if (CHECK_DIR == gen)
+                    {
+                        // Pre-2.1: we have to check file existence
+                        System.String fileName = null;
+                        if (useCompoundFile)
+                        {
+                            fileName = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i;
+                        }
+                        else if (!hasSingleNormFile)
+                        {
+                            fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
+                        }
+                        if (fileName != null && dir.FileExists(fileName))
+                        {
                             fileList.Add(fileName);
-						}
-					}
-				}
-			}
-			else if (preLockless || (!hasSingleNormFile && !useCompoundFile))
-			{
-				// Pre-2.1: we have to scan the dir to find all
-				// matching _X.sN/_X.fN files for our segment:
-				System.String prefix;
-				if (useCompoundFile)
-					prefix = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION;
-				else
-					prefix = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION;
-				int prefixLength = prefix.Length;
-				System.String[] allFiles = dir.ListAll();
-				IndexFileNameFilter filter = IndexFileNameFilter.Filter;
-				for (int i = 0; i < allFiles.Length; i++)
-				{
-					System.String fileName = allFiles[i];
-					if (filter.Accept(null, fileName) && fileName.Length > prefixLength && System.Char.IsDigit(fileName[prefixLength]) && fileName.StartsWith(prefix))
-					{
-						fileList.Add(fileName);
-					}
-				}
-			}
+                        }
+                    }
+                }
+            }
+            else if (preLockless || (!hasSingleNormFile && !useCompoundFile))
+            {
+                // Pre-2.1: we have to scan the dir to find all
+                // matching _X.sN/_X.fN files for our segment:
+                System.String prefix;
+                if (useCompoundFile)
+                    prefix = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION;
+                else
+                    prefix = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION;
+                int prefixLength = prefix.Length;
+                System.String[] allFiles = dir.ListAll();
+                IndexFileNameFilter filter = IndexFileNameFilter.Filter;
+                for (int i = 0; i < allFiles.Length; i++)
+                {
+                    System.String fileName = allFiles[i];
+                    if (filter.Accept(null, fileName) && fileName.Length > prefixLength && System.Char.IsDigit(fileName[prefixLength]) && fileName.StartsWith(prefix))
+                    {
+                        fileList.Add(fileName);
+                    }
+                }
+            }
             //System.Diagnostics.Debug.Assert();
             files = fileList;
-			return files;
-		}
-		
-		/* Called whenever any change is made that affects which
-		* files this segment has. */
-		private void  ClearFiles()
-		{
-			files = null;
-			sizeInBytes = - 1;
-		}
-		
-		/// <summary>Used for debugging </summary>
-		public System.String SegString(Directory dir)
-		{
-			System.String cfs;
-			try
-			{
-				if (GetUseCompoundFile())
-					cfs = "c";
-				else
-					cfs = "C";
-			}
-			catch (System.IO.IOException)
-			{
-				cfs = "?";
-			}
-			
-			System.String docStore;
-			
-			if (docStoreOffset != - 1)
-				docStore = "->" + docStoreSegment;
-			else
-				docStore = "";
-			
-			return name + ":" + cfs + (this.dir == dir?"":"x") + docCount + docStore;
-		}
-		
-		/// <summary>We consider another SegmentInfo instance equal if it
-		/// has the same dir and same name. 
-		/// </summary>
-		public  override bool Equals(System.Object obj)
-		{
+            return files;
+        }
+        
+        /* Called whenever any change is made that affects which
+        * files this segment has. */
+        private void  ClearFiles()
+        {
+            files = null;
+            sizeInBytes = - 1;
+        }
+        
+        /// <summary>Used for debugging </summary>
+        public System.String SegString(Directory dir)
+        {
+            System.String cfs;
+            try
+            {
+                if (GetUseCompoundFile())
+                    cfs = "c";
+                else
+                    cfs = "C";
+            }
+            catch (System.IO.IOException)
+            {
+                cfs = "?";
+            }
+            
+            System.String docStore;
+            
+            if (docStoreOffset != - 1)
+                docStore = "->" + docStoreSegment;
+            else
+                docStore = "";
+            
+            return name + ":" + cfs + (this.dir == dir?"":"x") + docCount + docStore;
+        }
+        
+        /// <summary>We consider another SegmentInfo instance equal if it
+        /// has the same dir and same name. 
+        /// </summary>
+        public  override bool Equals(System.Object obj)
+        {
             if (this == obj) return true;
 
             if (obj is SegmentInfo)
@@ -864,12 +864,12 @@ namespace Lucene.Net.Index
                 SegmentInfo other = (SegmentInfo) obj;
                 return other.dir == dir && other.name.Equals(name);
             }
-		    return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return dir.GetHashCode() + name.GetHashCode();
-		}
-	}
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return dir.GetHashCode() + name.GetHashCode();
+        }
+    }
 }
\ No newline at end of file


[47/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/RegexQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/RegexQuery.cs b/src/contrib/Regex/RegexQuery.cs
index 1516414..1dc452b 100644
--- a/src/contrib/Regex/RegexQuery.cs
+++ b/src/contrib/Regex/RegexQuery.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -23,34 +23,34 @@ using Lucene.Net.Util;
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// Regular expression based query.
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexQuery.java.htm</remarks>
-	public class RegexQuery : MultiTermQuery, IRegexQueryCapable, IEquatable<RegexQuery>
-	{
-		private IRegexCapabilities _regexImpl = new CSharpRegexCapabilities();
-	    public Term Term { get; private set; }
+    /// <summary>
+    /// Regular expression based query.
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexQuery.java.htm</remarks>
+    public class RegexQuery : MultiTermQuery, IRegexQueryCapable, IEquatable<RegexQuery>
+    {
+        private IRegexCapabilities _regexImpl = new CSharpRegexCapabilities();
+        public Term Term { get; private set; }
 
-		public RegexQuery(Term term)
-		{
+        public RegexQuery(Term term)
+        {
             Term = term;
-		}
+        }
 
-		/// <summary>Construct the enumeration to be used, expanding the pattern term. </summary>
-		protected override FilteredTermEnum GetEnum(IndexReader reader)
-		{
-			return new RegexTermEnum(reader, Term, _regexImpl);
-		}
+        /// <summary>Construct the enumeration to be used, expanding the pattern term. </summary>
+        protected override FilteredTermEnum GetEnum(IndexReader reader)
+        {
+            return new RegexTermEnum(reader, Term, _regexImpl);
+        }
 
-	    public IRegexCapabilities RegexImplementation
-	    {
-	        set { _regexImpl = value; }
-	        get { return _regexImpl; }
-	    }
+        public IRegexCapabilities RegexImplementation
+        {
+            set { _regexImpl = value; }
+            get { return _regexImpl; }
+        }
 
 
-	    public override String ToString(String field)
+        public override String ToString(String field)
         {
             StringBuilder buffer = new StringBuilder();
             if (!Term.Field.Equals(field))
@@ -63,33 +63,33 @@ namespace Contrib.Regex
             return buffer.ToString();
         }
 
-	    /// <summary>
-		/// Indicates whether the current object is equal to another object of the same type.
-		/// </summary>
-		/// <returns>
-		/// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
-		/// </returns>
-		/// <param name="other">An object to compare with this object</param>
-		public bool Equals(RegexQuery other)
-		{
-			if (other == null) return false;
-			if (this == other) return true;
+        /// <summary>
+        /// Indicates whether the current object is equal to another object of the same type.
+        /// </summary>
+        /// <returns>
+        /// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
+        /// </returns>
+        /// <param name="other">An object to compare with this object</param>
+        public bool Equals(RegexQuery other)
+        {
+            if (other == null) return false;
+            if (this == other) return true;
 
-			if (!base.Equals(other)) return false;
-			return _regexImpl.Equals(other._regexImpl);
-		}
+            if (!base.Equals(other)) return false;
+            return _regexImpl.Equals(other._regexImpl);
+        }
 
-		public override bool Equals(object obj)
-		{
-			if ((obj == null) || (obj as RegexQuery == null)) return false;
-			if (this == obj) return true;
+        public override bool Equals(object obj)
+        {
+            if ((obj == null) || (obj as RegexQuery == null)) return false;
+            if (this == obj) return true;
 
-			return Equals((RegexQuery) obj);
-		}
+            return Equals((RegexQuery) obj);
+        }
 
-		public override int GetHashCode()
-		{
-			return 29 * base.GetHashCode() + _regexImpl.GetHashCode();
-		}
-	}
+        public override int GetHashCode()
+        {
+            return 29 * base.GetHashCode() + _regexImpl.GetHashCode();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/RegexTermEnum.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/RegexTermEnum.cs b/src/contrib/Regex/RegexTermEnum.cs
index 3cf480e..50c0480 100644
--- a/src/contrib/Regex/RegexTermEnum.cs
+++ b/src/contrib/Regex/RegexTermEnum.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -20,65 +20,65 @@ using Lucene.Net.Search;
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// Subclass of FilteredTermEnum for enumerating all terms that match the
-	/// specified regular expression term using the specified regular expression
-	/// implementation.
-	/// <para>Term enumerations are always ordered by Term.compareTo().  Each term in
-	/// the enumeration is greater than all that precede it.</para>
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexTermEnum.java.htm</remarks>
-	public class RegexTermEnum : FilteredTermEnum
-	{
-		private string _sField = "";
-		private string _sPre = "";
-		private bool _bEndEnum;
-		private readonly IRegexCapabilities _regexImpl;
+    /// <summary>
+    /// Subclass of FilteredTermEnum for enumerating all terms that match the
+    /// specified regular expression term using the specified regular expression
+    /// implementation.
+    /// <para>Term enumerations are always ordered by Term.compareTo().  Each term in
+    /// the enumeration is greater than all that precede it.</para>
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexTermEnum.java.htm</remarks>
+    public class RegexTermEnum : FilteredTermEnum
+    {
+        private string _sField = "";
+        private string _sPre = "";
+        private bool _bEndEnum;
+        private readonly IRegexCapabilities _regexImpl;
 
-		public RegexTermEnum(IndexReader reader, Term term, IRegexCapabilities regexImpl)
-		{
-			_sField = term.Field;
-			string sText = term.Text;
-			
-			_regexImpl = regexImpl;
+        public RegexTermEnum(IndexReader reader, Term term, IRegexCapabilities regexImpl)
+        {
+            _sField = term.Field;
+            string sText = term.Text;
+            
+            _regexImpl = regexImpl;
 
-			_regexImpl.Compile(sText);
+            _regexImpl.Compile(sText);
 
-			_sPre = _regexImpl.Prefix() ?? "";
+            _sPre = _regexImpl.Prefix() ?? "";
 
-			SetEnum(reader.Terms(new Term(term.Field, _sPre)));
-		}
+            SetEnum(reader.Terms(new Term(term.Field, _sPre)));
+        }
 
-		/// <summary>Equality compare on the term </summary>
-		protected override bool TermCompare(Term term)
-		{
-			if (_sField == term.Field)
-			{
-				string sSearchText = term.Text;
-				if (sSearchText.StartsWith(_sPre)) return _regexImpl.Match(sSearchText);
-			} //eif
+        /// <summary>Equality compare on the term </summary>
+        protected override bool TermCompare(Term term)
+        {
+            if (_sField == term.Field)
+            {
+                string sSearchText = term.Text;
+                if (sSearchText.StartsWith(_sPre)) return _regexImpl.Match(sSearchText);
+            } //eif
 
-			_bEndEnum = true;
-			return false;
-		}
+            _bEndEnum = true;
+            return false;
+        }
 
-		/// <summary>Equality measure on the term </summary>
-		public override float Difference()
-		{
-			// TODO: adjust difference based on distance of searchTerm.text() and term().text()
-			return 1.0F;
-		}
+        /// <summary>Equality measure on the term </summary>
+        public override float Difference()
+        {
+            // TODO: adjust difference based on distance of searchTerm.text() and term().text()
+            return 1.0F;
+        }
 
-		/// <summary>Indicates the end of the enumeration has been reached </summary>
-		public override bool EndEnum()
-		{
-			return _bEndEnum;
-		}
+        /// <summary>Indicates the end of the enumeration has been reached </summary>
+        public override bool EndEnum()
+        {
+            return _bEndEnum;
+        }
 
-		//public override void Close()
-		//{
-		//    base.Close();
-		//    _sField = null;
-		//}
-	}
+        //public override void Close()
+        //{
+        //    base.Close();
+        //    _sField = null;
+        //}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/SpanRegexQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/SpanRegexQuery.cs b/src/contrib/Regex/SpanRegexQuery.cs
index 45f04b8..8bde844 100644
--- a/src/contrib/Regex/SpanRegexQuery.cs
+++ b/src/contrib/Regex/SpanRegexQuery.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -25,131 +25,131 @@ using Lucene.Net.Util;
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// A SpanQuery version of <see cref="RegexQuery"/> allowing regular expression queries to be nested
-	/// within other SpanQuery subclasses.
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/SpanRegexQuery.java.htm</remarks>
-	public class SpanRegexQuery : SpanQuery, IRegexQueryCapable, IEquatable<SpanRegexQuery>
-	{
-		private IRegexCapabilities _regexImpl = new CSharpRegexCapabilities();
-		private readonly Term _term;
-
-		public SpanRegexQuery(Term term)
-		{
-			_term = term;
-		}
-
-	    public Term Term
-	    {
-	        get { return _term; }
-	    }
-
-	    public override string ToString(string field)
-		{
-			StringBuilder sb = new StringBuilder();
-			sb.Append("SpanRegexQuery(");
-			sb.Append(_term);
-			sb.Append(')');
-			sb.Append(ToStringUtils.Boost(Boost));
-			return sb.ToString();
-		}
-
-		public override Query Rewrite(IndexReader reader)
-		{
-			RegexQuery orig = new RegexQuery(_term);
-			orig.RegexImplementation = _regexImpl;
-
-			// RegexQuery (via MultiTermQuery).Rewrite always returns a BooleanQuery
-			orig.RewriteMethod = MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE;	//@@
-			BooleanQuery bq = (BooleanQuery) orig.Rewrite(reader);
-
-			BooleanClause[] clauses = bq.GetClauses();
-			SpanQuery[] sqs = new SpanQuery[clauses.Length];
-			for (int i = 0; i < clauses.Length; i++)
-			{
-				BooleanClause clause = clauses[i];
-
-				// Clauses from RegexQuery.Rewrite are always TermQuery's
-				TermQuery tq = (TermQuery) clause.Query;
-
-				sqs[i] = new SpanTermQuery(tq.Term);
-				sqs[i].Boost = tq.Boost;
-			} //efor
-
-			SpanOrQuery query = new SpanOrQuery(sqs);
-			query.Boost = orig.Boost;
-
-			return query;
-		}
-
-		/// <summary>Expert: Returns the matches for this query in an index.  Used internally
-		/// to search for spans. 
-		/// </summary>
-		public override Lucene.Net.Search.Spans.Spans GetSpans(IndexReader reader)
-		{
-			throw new InvalidOperationException("Query should have been rewritten");
-		}
-
-		/// <summary>Returns the name of the field matched by this query.</summary>
-		public override string Field
-		{
+    /// <summary>
+    /// A SpanQuery version of <see cref="RegexQuery"/> allowing regular expression queries to be nested
+    /// within other SpanQuery subclasses.
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/SpanRegexQuery.java.htm</remarks>
+    public class SpanRegexQuery : SpanQuery, IRegexQueryCapable, IEquatable<SpanRegexQuery>
+    {
+        private IRegexCapabilities _regexImpl = new CSharpRegexCapabilities();
+        private readonly Term _term;
+
+        public SpanRegexQuery(Term term)
+        {
+            _term = term;
+        }
+
+        public Term Term
+        {
+            get { return _term; }
+        }
+
+        public override string ToString(string field)
+        {
+            StringBuilder sb = new StringBuilder();
+            sb.Append("SpanRegexQuery(");
+            sb.Append(_term);
+            sb.Append(')');
+            sb.Append(ToStringUtils.Boost(Boost));
+            return sb.ToString();
+        }
+
+        public override Query Rewrite(IndexReader reader)
+        {
+            RegexQuery orig = new RegexQuery(_term);
+            orig.RegexImplementation = _regexImpl;
+
+            // RegexQuery (via MultiTermQuery).Rewrite always returns a BooleanQuery
+            orig.RewriteMethod = MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE;    //@@
+            BooleanQuery bq = (BooleanQuery) orig.Rewrite(reader);
+
+            BooleanClause[] clauses = bq.GetClauses();
+            SpanQuery[] sqs = new SpanQuery[clauses.Length];
+            for (int i = 0; i < clauses.Length; i++)
+            {
+                BooleanClause clause = clauses[i];
+
+                // Clauses from RegexQuery.Rewrite are always TermQuery's
+                TermQuery tq = (TermQuery) clause.Query;
+
+                sqs[i] = new SpanTermQuery(tq.Term);
+                sqs[i].Boost = tq.Boost;
+            } //efor
+
+            SpanOrQuery query = new SpanOrQuery(sqs);
+            query.Boost = orig.Boost;
+
+            return query;
+        }
+
+        /// <summary>Expert: Returns the matches for this query in an index.  Used internally
+        /// to search for spans. 
+        /// </summary>
+        public override Lucene.Net.Search.Spans.Spans GetSpans(IndexReader reader)
+        {
+            throw new InvalidOperationException("Query should have been rewritten");
+        }
+
+        /// <summary>Returns the name of the field matched by this query.</summary>
+        public override string Field
+        {
             get
             {
                 return _term.Field;
             }
-		}
+        }
 
         public ICollection<Term> GetTerms()
         {
             ICollection<Term> terms = new List<Term>(){_term};
-		    return terms;
+            return terms;
         }
 
-	    public IRegexCapabilities RegexImplementation
-	    {
-	        set { _regexImpl = value; }
-	        get { return _regexImpl; }
-	    }
-
-	    /// <summary>
-		/// Indicates whether the current object is equal to another object of the same type.
-		/// </summary>
-		/// <returns>
-		/// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
-		/// </returns>
-		/// <param name="other">An object to compare with this object.
-		///                 </param>
-		public bool Equals(SpanRegexQuery other)
-		{
-			if (other == null) return false;
-			if (ReferenceEquals(this, other)) return true;
-
-			if (!_regexImpl.Equals(other._regexImpl)) return false;
-			if (!_term.Equals(other._term)) return false;
-
-			return true;
-		}
-
-		/// <summary>
-		/// True if this object equals the specified object.
-		/// </summary>
-		/// <param name="obj">object</param>
-		/// <returns>true on equality</returns>
-		public override bool Equals(object obj)
-		{
-			if (obj as SpanRegexQuery == null) return false;
-
-			return Equals((SpanRegexQuery) obj);
-		}
-
-		/// <summary>
-		/// Get hash code for this object.
-		/// </summary>
-		/// <returns>hash code</returns>
-		public override int GetHashCode()
-		{
-			return 29 * _regexImpl.GetHashCode() + _term.GetHashCode();
-		}
-	}
+        public IRegexCapabilities RegexImplementation
+        {
+            set { _regexImpl = value; }
+            get { return _regexImpl; }
+        }
+
+        /// <summary>
+        /// Indicates whether the current object is equal to another object of the same type.
+        /// </summary>
+        /// <returns>
+        /// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
+        /// </returns>
+        /// <param name="other">An object to compare with this object.
+        ///                 </param>
+        public bool Equals(SpanRegexQuery other)
+        {
+            if (other == null) return false;
+            if (ReferenceEquals(this, other)) return true;
+
+            if (!_regexImpl.Equals(other._regexImpl)) return false;
+            if (!_term.Equals(other._term)) return false;
+
+            return true;
+        }
+
+        /// <summary>
+        /// True if this object equals the specified object.
+        /// </summary>
+        /// <param name="obj">object</param>
+        /// <returns>true on equality</returns>
+        public override bool Equals(object obj)
+        {
+            if (obj as SpanRegexQuery == null) return false;
+
+            return Equals((SpanRegexQuery) obj);
+        }
+
+        /// <summary>
+        /// Get hash code for this object.
+        /// </summary>
+        /// <returns>hash code</returns>
+        public override int GetHashCode()
+        {
+            return 29 * _regexImpl.GetHashCode() + _term.GetHashCode();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/Extensions.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/Extensions.cs b/src/contrib/SimpleFacetedSearch/Extensions.cs
index 1eb56ff..31bfd63 100644
--- a/src/contrib/SimpleFacetedSearch/Extensions.cs
+++ b/src/contrib/SimpleFacetedSearch/Extensions.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/FacetName.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/FacetName.cs b/src/contrib/SimpleFacetedSearch/FacetName.cs
index 0d4038a..ea3f74a 100644
--- a/src/contrib/SimpleFacetedSearch/FacetName.cs
+++ b/src/contrib/SimpleFacetedSearch/FacetName.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/FieldValuesBitSets.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/FieldValuesBitSets.cs b/src/contrib/SimpleFacetedSearch/FieldValuesBitSets.cs
index b32ce44..3c719f6 100644
--- a/src/contrib/SimpleFacetedSearch/FieldValuesBitSets.cs
+++ b/src/contrib/SimpleFacetedSearch/FieldValuesBitSets.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/Hits.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/Hits.cs b/src/contrib/SimpleFacetedSearch/Hits.cs
index bc4490b..f5c3eb3 100644
--- a/src/contrib/SimpleFacetedSearch/Hits.cs
+++ b/src/contrib/SimpleFacetedSearch/Hits.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/HitsPerFacet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/HitsPerFacet.cs b/src/contrib/SimpleFacetedSearch/HitsPerFacet.cs
index 5678183..6c7c756 100644
--- a/src/contrib/SimpleFacetedSearch/HitsPerFacet.cs
+++ b/src/contrib/SimpleFacetedSearch/HitsPerFacet.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/Properties/AssemblyInfo.cs b/src/contrib/SimpleFacetedSearch/Properties/AssemblyInfo.cs
index ca1395b..54f888c 100644
--- a/src/contrib/SimpleFacetedSearch/Properties/AssemblyInfo.cs
+++ b/src/contrib/SimpleFacetedSearch/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SimpleFacetedSearch/SimpleFacetedSearch.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SimpleFacetedSearch/SimpleFacetedSearch.cs b/src/contrib/SimpleFacetedSearch/SimpleFacetedSearch.cs
index cab3a65..6a558f7 100644
--- a/src/contrib/SimpleFacetedSearch/SimpleFacetedSearch.cs
+++ b/src/contrib/SimpleFacetedSearch/SimpleFacetedSearch.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs b/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
index aa273b5..e662c26 100644
--- a/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
+++ b/src/contrib/Snowball/Lucene.Net/Analysis/Snowball/SnowballFilter.cs
@@ -25,53 +25,53 @@ using SF.Snowball.Ext;
 
 namespace Lucene.Net.Analysis.Snowball
 {
-	
-	/// <summary>A filter that stems words using a Snowball-generated stemmer.
-	/// 
-	/// Available stemmers are listed in <see cref="SF.Snowball.Ext"/>.  The name of a
-	/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
-	/// <see cref="EnglishStemmer"/> is named "English".
-	/// </summary>
-	
-	public sealed class SnowballFilter : TokenFilter
-	{
-		private static readonly System.Object[] EMPTY_ARGS = new System.Object[0];
-		
-		private SnowballProgram stemmer;
-	    private ITermAttribute termAtt;
-		//private System.Reflection.MethodInfo stemMethod;
+    
+    /// <summary>A filter that stems words using a Snowball-generated stemmer.
+    /// 
+    /// Available stemmers are listed in <see cref="SF.Snowball.Ext"/>.  The name of a
+    /// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
+    /// <see cref="EnglishStemmer"/> is named "English".
+    /// </summary>
+    
+    public sealed class SnowballFilter : TokenFilter
+    {
+        private static readonly System.Object[] EMPTY_ARGS = new System.Object[0];
+        
+        private SnowballProgram stemmer;
+        private ITermAttribute termAtt;
+        //private System.Reflection.MethodInfo stemMethod;
 
-	    public SnowballFilter(TokenStream input, SnowballProgram stemmer)
+        public SnowballFilter(TokenStream input, SnowballProgram stemmer)
             : base(input)
-	    {
-	        this.stemmer = stemmer;
+        {
+            this.stemmer = stemmer;
             termAtt = AddAttribute<ITermAttribute>();
-	    }
+        }
 
-		/// <summary>Construct the named stemming filter.
-		/// 
-		/// </summary>
+        /// <summary>Construct the named stemming filter.
+        /// 
+        /// </summary>
         /// <param name="input">the input tokens to stem
-		/// </param>
-		/// <param name="name">the name of a stemmer
-		/// </param>
-		public SnowballFilter(TokenStream input, System.String name) : base(input)
-		{
-			try
-			{
-				System.Type stemClass = System.Type.GetType("SF.Snowball.Ext." + name + "Stemmer");
-				stemmer = (SnowballProgram) System.Activator.CreateInstance(stemClass);
-			}
-			catch (System.Exception e)
-			{
-				throw new System.SystemException(e.ToString());
-			}
-		    termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		/// <summary>Returns the next input Token, after being stemmed </summary>
+        /// </param>
+        /// <param name="name">the name of a stemmer
+        /// </param>
+        public SnowballFilter(TokenStream input, System.String name) : base(input)
+        {
+            try
+            {
+                System.Type stemClass = System.Type.GetType("SF.Snowball.Ext." + name + "Stemmer");
+                stemmer = (SnowballProgram) System.Activator.CreateInstance(stemClass);
+            }
+            catch (System.Exception e)
+            {
+                throw new System.SystemException(e.ToString());
+            }
+            termAtt = AddAttribute<ITermAttribute>();
+        }
+        
+        /// <summary>Returns the next input Token, after being stemmed </summary>
         public sealed override bool IncrementToken()
-		{
+        {
             if (input.IncrementToken())
             {
                 String originalTerm = termAtt.Term;
@@ -87,6 +87,6 @@ namespace Lucene.Net.Analysis.Snowball
             {
                 return false;
             }
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Among.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Among.cs b/src/contrib/Snowball/SF/Snowball/Among.cs
index 62b4d75..57753c8 100644
--- a/src/contrib/Snowball/SF/Snowball/Among.cs
+++ b/src/contrib/Snowball/SF/Snowball/Among.cs
@@ -17,40 +17,40 @@
 using System;
 namespace SF.Snowball
 {
-	
-	public class Among
-	{
-		public Among(System.String s, int substring_i, int result, System.String methodname, SnowballProgram methodobject)
-		{
-			this.s_size = s.Length;
-			this.s = s;
-			this.substring_i = substring_i;
-			this.result = result;
-			this.methodobject = methodobject;
-			if (methodname.Length == 0)
-			{
-				this.method = null;
-			}
-			else
-			{
-				try
-				{
-					this.method = methodobject.GetType().GetMethod(methodname, System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.DeclaredOnly, null, new System.Type[0], null);
-				}
-				catch (System.MethodAccessException)
-				{
-					// FIXME - debug message
-					this.method = null;
-				}
-			}
-		}
-		
-		public int s_size; /* search string */
-		public System.String s; /* search string */
-		public int substring_i; /* index to longest matching substring */
-		public int result; /* result of the lookup */
-		public System.Reflection.MethodInfo method; /* method to use if substring matches */
-		public SnowballProgram methodobject; /* object to invoke method on */
-	}
-	
+    
+    public class Among
+    {
+        public Among(System.String s, int substring_i, int result, System.String methodname, SnowballProgram methodobject)
+        {
+            this.s_size = s.Length;
+            this.s = s;
+            this.substring_i = substring_i;
+            this.result = result;
+            this.methodobject = methodobject;
+            if (methodname.Length == 0)
+            {
+                this.method = null;
+            }
+            else
+            {
+                try
+                {
+                    this.method = methodobject.GetType().GetMethod(methodname, System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.DeclaredOnly, null, new System.Type[0], null);
+                }
+                catch (System.MethodAccessException)
+                {
+                    // FIXME - debug message
+                    this.method = null;
+                }
+            }
+        }
+        
+        public int s_size; /* search string */
+        public System.String s; /* search string */
+        public int substring_i; /* index to longest matching substring */
+        public int result; /* result of the lookup */
+        public System.Reflection.MethodInfo method; /* method to use if substring matches */
+        public SnowballProgram methodobject; /* object to invoke method on */
+    }
+    
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/DanishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/DanishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/DanishStemmer.cs
index 5a1ea5d..b109bff 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/DanishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/DanishStemmer.cs
@@ -23,434 +23,434 @@ namespace SF.Snowball.Ext
 {
 #pragma warning disable 162,164
 
-	/// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class DanishStemmer : SnowballProgram
-	{
-		public DanishStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
+    /// <summary> Generated class implementing code defined by a snowball script.</summary>
+    public class DanishStemmer : SnowballProgram
+    {
+        public DanishStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
             a_0 = new Among[] { new Among("hed", -1, 1, "", this), new Among("ethed", 0, 1, "", this), new Among("ered", -1, 1, "", this), new Among("e", -1, 1, "", this), new Among("erede", 3, 1, "", this), new Among("ende", 3, 1, "", this), new Among("erende", 5, 1, "", this), new Among("ene", 3, 1, "", this), new Among("erne", 3, 1, "", this), new Among("ere", 3, 1, "", this), new Among("en", -1, 1, "", this), new Among("heden", 10, 1, "", this), new Among("eren", 10, 1, "", this), new Among("er", -1, 1, "", this), new Among("heder", 13, 1, "", this), new Among("erer", 13, 1, "", this), new Among("s", -1, 2, "", this), new Among("heds", 16, 1, "", this), new Among("es", 16, 1, "", this), new Among("endes", 18, 1, "", this), new Among("erendes", 19, 1, "", this), new Among("enes", 18, 1, "", this), new Among("ernes", 18, 1, "", this), new Among("eres", 18, 1, "", this), new Among("ens", 16, 1, "", this), new Among("hedens", 24, 1, "", this), new Among("erens", 24, 1, "", this), ne
 w Among("ers", 16, 1, "", this), new Among("ets", 16, 1, "", this), new Among("erets", 28, 1, "", this), new Among("et", -1, 1, "", this), new Among("eret", 30, 1, "", this) };
-			a_1 = new Among[]{new Among("gd", - 1, - 1, "", this), new Among("dt", - 1, - 1, "", this), new Among("gt", - 1, - 1, "", this), new Among("kt", - 1, - 1, "", this)};
-			a_2 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lig", 0, 1, "", this), new Among("elig", 1, 1, "", this), new Among("els", - 1, 1, "", this), new Among("l\u00F8st", - 1, 2, "", this)};
-		}
-		
-		private Among[] a_0;
-		
-		private Among[] a_1;
-		private Among[] a_2;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (48), (char) (0), (char) (128)};
-		private static readonly char[] g_s_ending = new char[]{(char) (239), (char) (254), (char) (42), (char) (3), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (16)};
-		
-		private int I_p1;
-		private System.Text.StringBuilder S_ch = new System.Text.StringBuilder();
-		
-		protected internal virtual void  copy_from(DanishStemmer other)
-		{
-			I_p1 = other.I_p1;
-			S_ch = other.S_ch;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			// (, line 29
-			I_p1 = limit;
-			// goto, line 33
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 248)))
-					{
-						goto lab1_brk;
-					}
-					cursor = v_1;
-					goto golab0_brk;
-				}
-				while (false);
+            a_1 = new Among[]{new Among("gd", - 1, - 1, "", this), new Among("dt", - 1, - 1, "", this), new Among("gt", - 1, - 1, "", this), new Among("kt", - 1, - 1, "", this)};
+            a_2 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lig", 0, 1, "", this), new Among("elig", 1, 1, "", this), new Among("els", - 1, 1, "", this), new Among("l\u00F8st", - 1, 2, "", this)};
+        }
+        
+        private Among[] a_0;
+        
+        private Among[] a_1;
+        private Among[] a_2;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (48), (char) (0), (char) (128)};
+        private static readonly char[] g_s_ending = new char[]{(char) (239), (char) (254), (char) (42), (char) (3), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (16)};
+        
+        private int I_p1;
+        private System.Text.StringBuilder S_ch = new System.Text.StringBuilder();
+        
+        protected internal virtual void  copy_from(DanishStemmer other)
+        {
+            I_p1 = other.I_p1;
+            S_ch = other.S_ch;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            // (, line 29
+            I_p1 = limit;
+            // goto, line 33
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 248)))
+                    {
+                        goto lab1_brk;
+                    }
+                    cursor = v_1;
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
 
-				cursor = v_1;
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                cursor = v_1;
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
 
-			// gopast, line 33
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 248)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            // gopast, line 33
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 248)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
 
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
 
-			// setmark p1, line 33
-			I_p1 = cursor;
-			// try, line 34
-			do 
-			{
-				// (, line 34
-				if (!(I_p1 < 3))
-				{
-					goto lab4_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            // setmark p1, line 33
+            I_p1 = cursor;
+            // try, line 34
+            do 
+            {
+                // (, line 34
+                if (!(I_p1 < 3))
+                {
+                    goto lab4_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_main_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 39
-			// setlimit, line 40
-			v_1 = limit - cursor;
-			// tomark, line 40
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 40
-			// [, line 40
-			ket = cursor;
-			// substring, line 40
-			among_var = find_among_b(a_0, 32);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 40
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 47
-					// delete, line 47
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 49
-					if (!(in_grouping_b(g_s_ending, 97, 229)))
-					{
-						return false;
-					}
-					// delete, line 49
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_consonant_pair()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 53
-			// test, line 54
-			v_1 = limit - cursor;
-			// (, line 54
-			// setlimit, line 55
-			v_2 = limit - cursor;
-			// tomark, line 55
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_3 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_2;
-			// (, line 55
-			// [, line 55
-			ket = cursor;
-			// substring, line 55
-			if (find_among_b(a_1, 4) == 0)
-			{
-				limit_backward = v_3;
-				return false;
-			}
-			// ], line 55
-			bra = cursor;
-			limit_backward = v_3;
-			cursor = limit - v_1;
-			// next, line 61
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 61
-			bra = cursor;
-			// delete, line 61
-			slice_del();
-			return true;
-		}
-		
-		private bool r_other_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 64
-			// do, line 65
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 65
-				// [, line 65
-				ket = cursor;
-				// literal, line 65
-				if (!(eq_s_b(2, "st")))
-				{
-					goto lab0_brk;
-				}
-				// ], line 65
-				bra = cursor;
-				// literal, line 65
-				if (!(eq_s_b(2, "ig")))
-				{
-					goto lab0_brk;
-				}
-				// delete, line 65
-				slice_del();
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        private bool r_main_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 39
+            // setlimit, line 40
+            v_1 = limit - cursor;
+            // tomark, line 40
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 40
+            // [, line 40
+            ket = cursor;
+            // substring, line 40
+            among_var = find_among_b(a_0, 32);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 40
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 47
+                    // delete, line 47
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 49
+                    if (!(in_grouping_b(g_s_ending, 97, 229)))
+                    {
+                        return false;
+                    }
+                    // delete, line 49
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_consonant_pair()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 53
+            // test, line 54
+            v_1 = limit - cursor;
+            // (, line 54
+            // setlimit, line 55
+            v_2 = limit - cursor;
+            // tomark, line 55
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_3 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_2;
+            // (, line 55
+            // [, line 55
+            ket = cursor;
+            // substring, line 55
+            if (find_among_b(a_1, 4) == 0)
+            {
+                limit_backward = v_3;
+                return false;
+            }
+            // ], line 55
+            bra = cursor;
+            limit_backward = v_3;
+            cursor = limit - v_1;
+            // next, line 61
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // ], line 61
+            bra = cursor;
+            // delete, line 61
+            slice_del();
+            return true;
+        }
+        
+        private bool r_other_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 64
+            // do, line 65
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 65
+                // [, line 65
+                ket = cursor;
+                // literal, line 65
+                if (!(eq_s_b(2, "st")))
+                {
+                    goto lab0_brk;
+                }
+                // ], line 65
+                bra = cursor;
+                // literal, line 65
+                if (!(eq_s_b(2, "ig")))
+                {
+                    goto lab0_brk;
+                }
+                // delete, line 65
+                slice_del();
+            }
+            while (false);
 
 lab0_brk: ;
 
-			cursor = limit - v_1;
-			// setlimit, line 66
-			v_2 = limit - cursor;
-			// tomark, line 66
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_3 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_2;
-			// (, line 66
-			// [, line 66
-			ket = cursor;
-			// substring, line 66
-			among_var = find_among_b(a_2, 5);
-			if (among_var == 0)
-			{
-				limit_backward = v_3;
-				return false;
-			}
-			// ], line 66
-			bra = cursor;
-			limit_backward = v_3;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 69
-					// delete, line 69
-					slice_del();
-					// do, line 69
-					v_4 = limit - cursor;
-					do 
-					{
-						// call consonant_pair, line 69
-						if (!r_consonant_pair())
-						{
-							goto lab1_brk;
-						}
-					}
-					while (false);
+            cursor = limit - v_1;
+            // setlimit, line 66
+            v_2 = limit - cursor;
+            // tomark, line 66
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_3 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_2;
+            // (, line 66
+            // [, line 66
+            ket = cursor;
+            // substring, line 66
+            among_var = find_among_b(a_2, 5);
+            if (among_var == 0)
+            {
+                limit_backward = v_3;
+                return false;
+            }
+            // ], line 66
+            bra = cursor;
+            limit_backward = v_3;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 69
+                    // delete, line 69
+                    slice_del();
+                    // do, line 69
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // call consonant_pair, line 69
+                        if (!r_consonant_pair())
+                        {
+                            goto lab1_brk;
+                        }
+                    }
+                    while (false);
 
 lab1_brk: ;
 
-					cursor = limit - v_4;
-					break;
-				
-				case 2: 
-					// (, line 71
-					// <-, line 71
-					slice_from("l\u00F8s");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_undouble()
-		{
-			int v_1;
-			int v_2;
-			// (, line 74
-			// setlimit, line 75
-			v_1 = limit - cursor;
-			// tomark, line 75
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 75
-			// [, line 75
-			ket = cursor;
-			if (!(out_grouping_b(g_v, 97, 248)))
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 75
-			bra = cursor;
-			// -> ch, line 75
-			S_ch = slice_to(S_ch);
-			limit_backward = v_2;
-			// name ch, line 76
-			if (!(eq_v_b(S_ch)))
-			{
-				return false;
-			}
-			// delete, line 77
-			slice_del();
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 81
-			// do, line 83
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 83
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+                    cursor = limit - v_4;
+                    break;
+                
+                case 2: 
+                    // (, line 71
+                    // <-, line 71
+                    slice_from("l\u00F8s");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_undouble()
+        {
+            int v_1;
+            int v_2;
+            // (, line 74
+            // setlimit, line 75
+            v_1 = limit - cursor;
+            // tomark, line 75
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 75
+            // [, line 75
+            ket = cursor;
+            if (!(out_grouping_b(g_v, 97, 248)))
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 75
+            bra = cursor;
+            // -> ch, line 75
+            S_ch = slice_to(S_ch);
+            limit_backward = v_2;
+            // name ch, line 76
+            if (!(eq_v_b(S_ch)))
+            {
+                return false;
+            }
+            // delete, line 77
+            slice_del();
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 81
+            // do, line 83
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 83
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			cursor = v_1;
-			// backwards, line 84
-			limit_backward = cursor; cursor = limit;
-			// (, line 84
-			// do, line 85
-			v_2 = limit - cursor;
-			do 
-			{
-				// call main_suffix, line 85
-				if (!r_main_suffix())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            cursor = v_1;
+            // backwards, line 84
+            limit_backward = cursor; cursor = limit;
+            // (, line 84
+            // do, line 85
+            v_2 = limit - cursor;
+            do 
+            {
+                // call main_suffix, line 85
+                if (!r_main_suffix())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 86
-			v_3 = limit - cursor;
-			do 
-			{
-				// call consonant_pair, line 86
-				if (!r_consonant_pair())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_2;
+            // do, line 86
+            v_3 = limit - cursor;
+            do 
+            {
+                // call consonant_pair, line 86
+                if (!r_consonant_pair())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
 
-			cursor = limit - v_3;
-			// do, line 87
-			v_4 = limit - cursor;
-			do 
-			{
-				// call other_suffix, line 87
-				if (!r_other_suffix())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            cursor = limit - v_3;
+            // do, line 87
+            v_4 = limit - cursor;
+            do 
+            {
+                // call other_suffix, line 87
+                if (!r_other_suffix())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
 
-			cursor = limit - v_4;
-			// do, line 88
-			v_5 = limit - cursor;
-			do 
-			{
-				// call undouble, line 88
-				if (!r_undouble())
-				{
-					goto lab4_brk;
-				}
-			}
-			while (false);
+            cursor = limit - v_4;
+            // do, line 88
+            v_5 = limit - cursor;
+            do 
+            {
+                // call undouble, line 88
+                if (!r_undouble())
+                {
+                    goto lab4_brk;
+                }
+            }
+            while (false);
 
 lab4_brk: ;
 
-			cursor = limit - v_5;
-			cursor = limit_backward; return true;
-		}
-	}
+            cursor = limit - v_5;
+            cursor = limit_backward; return true;
+        }
+    }
 }


[38/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/LovinsStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/LovinsStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/LovinsStemmer.cs
index 26d2432..dfb1451 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/LovinsStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/LovinsStemmer.cs
@@ -24,1920 +24,1920 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
     
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class LovinsStemmer : SnowballProgram
-	{
-		public LovinsStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("d", - 1, - 1, "", this), new Among("f", - 1, - 1, "", this), new Among("ph", - 1, - 1, "", this), new Among("th", - 1, - 1, "", this), new Among("l", - 1, - 1, "", this), new Among("er", - 1, - 1, "", this), new Among("or", - 1, - 1, "", this), new Among("es", - 1, - 1, "", this), new Among("t", - 1, - 1, "", this)};
-			a_1 = new Among[]{new Among("s'", - 1, 1, "r_A", this), new Among("a", - 1, 1, "r_A", this), new Among("ia", 1, 1, "r_A", this), new Among("ata", 1, 1, "r_A", this), new Among("ic", - 1, 1, "r_A", this), new Among("aic", 4, 1, "r_A", this), new Among("allic", 4, 1, "r_BB", this), new Among("aric", 4, 1, "r_A", this), new Among("atic", 4, 1, "r_B", this), new Among("itic", 4, 1, "r_H", this), new Among("antic", 4, 1, "r_C", this), new Among("istic", 4, 1, "r_A", this), new Among("alistic", 11, 1, "r_B", this), new Among("aristic", 11, 1, "r_A", this), new Among("ivistic", 11, 1, "r_A", this), new Among("ed", - 1, 1, "r_E", this), new Among("anced", 15, 1, "r_B", this), new Among("enced", 15, 1, "r_A", this), new Among("ished", 15, 1, "r_A", this), new Among("ied", 15, 1, "r_A", this), new Among("ened", 15, 1, "r_E", this), new Among("ioned", 15, 1, "r_A", this), new Among("ated", 15, 1, "r_I", this), new Among("ented", 15, 1, "r_C", this), new Among("ized", 15, 1, "r_F", this), ne
 w Among("arized", 24, 1, "r_A", this), new Among("oid", - 1, 1, "r_A", this), new Among("aroid", 26, 1, "r_A", this), new Among("hood", - 1, 1, "r_A", this), new Among("ehood", 28, 1, "r_A", this), new Among("ihood", 28, 1, "r_A", this), new Among("elihood", 30, 1, "r_E", this), new Among("ward", - 1, 1, "r_A", this), new Among("e", - 1, 1, "r_A", this), new Among("ae", 33, 1, "r_A", this), new Among("ance", 33, 1, "r_B", this), new Among("icance", 35, 1, "r_A", this), new Among("ence", 33, 1, "r_A", this), new Among("ide", 33, 1, "r_L", this), new Among("icide", 38, 1, "r_A", this), new Among("otide", 38, 1, "r_A", this), new Among("age", 33, 1, "r_B", this), new Among("able", 33, 1, "r_A", this), new Among("atable", 42, 1, "r_A", this), new Among("izable", 42, 1, "r_E", this), new Among("arizable", 44, 1, "r_A", this), new Among("ible", 33, 1, "r_A", this), new Among("encible", 46, 1, "r_A", this), new Among("ene", 33, 1, "r_E", this), new Among("ine", 33, 1, "r_M", this), new Amo
 ng("idine", 49, 1, "r_I", this), new 
-				Among("one", 33, 1, "r_R", this), new Among("ature", 33, 1, "r_E", this), new Among("eature", 52, 1, "r_Z", this), new Among("ese", 33, 1, "r_A", this), new Among("wise", 33, 1, "r_A", this), new Among("ate", 33, 1, "r_A", this), new Among("entiate", 56, 1, "r_A", this), new Among("inate", 56, 1, "r_A", this), new Among("ionate", 56, 1, "r_D", this), new Among("ite", 33, 1, "r_AA", this), new Among("ive", 33, 1, "r_A", this), new Among("ative", 61, 1, "r_A", this), new Among("ize", 33, 1, "r_F", this), new Among("alize", 63, 1, "r_A", this), new Among("icalize", 64, 1, "r_A", this), new Among("ialize", 64, 1, "r_A", this), new Among("entialize", 66, 1, "r_A", this), new Among("ionalize", 64, 1, "r_A", this), new Among("arize", 63, 1, "r_A", this), new Among("ing", - 1, 1, "r_N", this), new Among("ancing", 70, 1, "r_B", this), new Among("encing", 70, 1, "r_A", this), new Among("aging", 70, 1, "r_B", this), new Among("ening", 70, 1, "r_E", this), new Among("ioning", 70, 1, "r_A", 
 this), new Among("ating", 70, 1, "r_I", this), new Among("enting", 70, 1, "r_C", this), new Among("ying", 70, 1, "r_B", this), new Among("izing", 70, 1, "r_F", this), new Among("arizing", 79, 1, "r_A", this), new Among("ish", - 1, 1, "r_C", this), new Among("yish", 81, 1, "r_A", this), new Among("i", - 1, 1, "r_A", this), new Among("al", - 1, 1, "r_BB", this), new Among("ical", 84, 1, "r_A", this), new Among("aical", 85, 1, "r_A", this), new Among("istical", 85, 1, "r_A", this), new Among("oidal", 84, 1, "r_A", this), new Among("eal", 84, 1, "r_Y", this), new Among("ial", 84, 1, "r_A", this), new Among("ancial", 90, 1, "r_A", this), new Among("arial", 90, 1, "r_A", this), new Among("ential", 90, 1, "r_A", this), new Among("ional", 84, 1, "r_A", this), new Among("ational", 94, 1, "r_B", this), new Among("izational", 95, 1, "r_A", this), new Among("ental", 84, 1, "r_A", this), new Among("ful", - 1, 1, "r_A", this), new Among("eful", 98, 1, "r_A", this), new Among("iful", 98, 1, "r_A",
  this), new Among("yl", - 1, 1, 
-				"r_R", this), new Among("ism", - 1, 1, "r_B", this), new Among("icism", 102, 1, "r_A", this), new Among("oidism", 102, 1, "r_A", this), new Among("alism", 102, 1, "r_B", this), new Among("icalism", 105, 1, "r_A", this), new Among("ionalism", 105, 1, "r_A", this), new Among("inism", 102, 1, "r_J", this), new Among("ativism", 102, 1, "r_A", this), new Among("um", - 1, 1, "r_U", this), new Among("ium", 110, 1, "r_A", this), new Among("ian", - 1, 1, "r_A", this), new Among("ician", 112, 1, "r_A", this), new Among("en", - 1, 1, "r_F", this), new Among("ogen", 114, 1, "r_A", this), new Among("on", - 1, 1, "r_S", this), new Among("ion", 116, 1, "r_Q", this), new Among("ation", 117, 1, "r_B", this), new Among("ication", 118, 1, "r_G", this), new Among("entiation", 118, 1, "r_A", this), new Among("ination", 118, 1, "r_A", this), new Among("isation", 118, 1, "r_A", this), new Among("arisation", 122, 1, "r_A", this), new Among("entation", 118, 1, "r_A", this), new Among("ization", 118, 1, 
 "r_F", this), new Among("arization", 125, 1, "r_A", this), new Among("action", 117, 1, "r_G", this), new Among("o", - 1, 1, "r_A", this), new Among("ar", - 1, 1, "r_X", this), new Among("ear", 129, 1, "r_Y", this), new Among("ier", - 1, 1, "r_A", this), new Among("ariser", - 1, 1, "r_A", this), new Among("izer", - 1, 1, "r_F", this), new Among("arizer", 133, 1, "r_A", this), new Among("or", - 1, 1, "r_T", this), new Among("ator", 135, 1, "r_A", this), new Among("s", - 1, 1, "r_W", this), new Among("'s", 137, 1, "r_A", this), new Among("as", 137, 1, "r_B", this), new Among("ics", 137, 1, "r_A", this), new Among("istics", 140, 1, "r_A", this), new Among("es", 137, 1, "r_E", this), new Among("ances", 142, 1, "r_B", this), new Among("ences", 142, 1, "r_A", this), new Among("ides", 142, 1, "r_L", this), new Among("oides", 145, 1, "r_A", this), new Among("ages", 142, 1, "r_B", this), new Among("ies", 142, 1, "r_P", this), new Among("acies", 148, 1, "r_A", this), new Among("ancies", 148, 1
 , "r_A", this), new Among("encies", 
-				148, 1, "r_A", this), new Among("aries", 148, 1, "r_A", this), new Among("ities", 148, 1, "r_A", this), new Among("alities", 153, 1, "r_A", this), new Among("ivities", 153, 1, "r_A", this), new Among("ines", 142, 1, "r_M", this), new Among("nesses", 142, 1, "r_A", this), new Among("ates", 142, 1, "r_A", this), new Among("atives", 142, 1, "r_A", this), new Among("ings", 137, 1, "r_N", this), new Among("is", 137, 1, "r_A", this), new Among("als", 137, 1, "r_BB", this), new Among("ials", 162, 1, "r_A", this), new Among("entials", 163, 1, "r_A", this), new Among("ionals", 162, 1, "r_A", this), new Among("isms", 137, 1, "r_B", this), new Among("ians", 137, 1, "r_A", this), new Among("icians", 167, 1, "r_A", this), new Among("ions", 137, 1, "r_B", this), new Among("ations", 169, 1, "r_B", this), new Among("arisations", 170, 1, "r_A", this), new Among("entations", 170, 1, "r_A", this), new Among("izations", 170, 1, "r_A", this), new Among("arizations", 173, 1, "r_A", this), new Among("
 ars", 137, 1, "r_O", this), new Among("iers", 137, 1, "r_A", this), new Among("izers", 137, 1, "r_F", this), new Among("ators", 137, 1, "r_A", this), new Among("less", 137, 1, "r_A", this), new Among("eless", 179, 1, "r_A", this), new Among("ness", 137, 1, "r_A", this), new Among("eness", 181, 1, "r_E", this), new Among("ableness", 182, 1, "r_A", this), new Among("eableness", 183, 1, "r_E", this), new Among("ibleness", 182, 1, "r_A", this), new Among("ateness", 182, 1, "r_A", this), new Among("iteness", 182, 1, "r_A", this), new Among("iveness", 182, 1, "r_A", this), new Among("ativeness", 188, 1, "r_A", this), new Among("ingness", 181, 1, "r_A", this), new Among("ishness", 181, 1, "r_A", this), new Among("iness", 181, 1, "r_A", this), new Among("ariness", 192, 1, "r_E", this), new Among("alness", 181, 1, "r_A", this), new Among("icalness", 194, 1, "r_A", this), new Among("antialness", 194, 1, "r_A", this), new Among("entialness", 194, 1, "r_A", this), new Among("ionalness", 194, 1,
  "r_A", this), new Among("fulness", 
-				181, 1, "r_A", this), new Among("lessness", 181, 1, "r_A", this), new Among("ousness", 181, 1, "r_A", this), new Among("eousness", 201, 1, "r_A", this), new Among("iousness", 201, 1, "r_A", this), new Among("itousness", 201, 1, "r_A", this), new Among("entness", 181, 1, "r_A", this), new Among("ants", 137, 1, "r_B", this), new Among("ists", 137, 1, "r_A", this), new Among("icists", 207, 1, "r_A", this), new Among("us", 137, 1, "r_V", this), new Among("ous", 209, 1, "r_A", this), new Among("eous", 210, 1, "r_A", this), new Among("aceous", 211, 1, "r_A", this), new Among("antaneous", 211, 1, "r_A", this), new Among("ious", 210, 1, "r_A", this), new Among("acious", 214, 1, "r_B", this), new Among("itous", 210, 1, "r_A", this), new Among("ant", - 1, 1, "r_B", this), new Among("icant", 217, 1, "r_A", this), new Among("ent", - 1, 1, "r_C", this), new Among("ement", 219, 1, "r_A", this), new Among("izement", 220, 1, "r_A", this), new Among("ist", - 1, 1, "r_A", this), new Among("icist"
 , 222, 1, "r_A", this), new Among("alist", 222, 1, "r_A", this), new Among("icalist", 224, 1, "r_A", this), new Among("ialist", 224, 1, "r_A", this), new Among("ionist", 222, 1, "r_A", this), new Among("entist", 222, 1, "r_A", this), new Among("y", - 1, 1, "r_B", this), new Among("acy", 229, 1, "r_A", this), new Among("ancy", 229, 1, "r_B", this), new Among("ency", 229, 1, "r_A", this), new Among("ly", 229, 1, "r_B", this), new Among("ealy", 233, 1, "r_Y", this), new Among("ably", 233, 1, "r_A", this), new Among("ibly", 233, 1, "r_A", this), new Among("edly", 233, 1, "r_E", this), new Among("iedly", 237, 1, "r_A", this), new Among("ely", 233, 1, "r_E", this), new Among("ately", 239, 1, "r_A", this), new Among("ively", 239, 1, "r_A", this), new Among("atively", 241, 1, "r_A", this), new Among("ingly", 233, 1, "r_B", this), new Among("atingly", 243, 1, "r_A", this), new Among("ily", 233, 1, "r_A", this), new Among("lily", 245, 1, "r_A", this), new Among("arily", 245, 1, "r_A", this), 
 new Among("ally", 233, 1, "r_B", 
-				this), new Among("ically", 248, 1, "r_A", this), new Among("aically", 249, 1, "r_A", this), new Among("allically", 249, 1, "r_C", this), new Among("istically", 249, 1, "r_A", this), new Among("alistically", 252, 1, "r_B", this), new Among("oidally", 248, 1, "r_A", this), new Among("ially", 248, 1, "r_A", this), new Among("entially", 255, 1, "r_A", this), new Among("ionally", 248, 1, "r_A", this), new Among("ationally", 257, 1, "r_B", this), new Among("izationally", 258, 1, "r_B", this), new Among("entally", 248, 1, "r_A", this), new Among("fully", 233, 1, "r_A", this), new Among("efully", 261, 1, "r_A", this), new Among("ifully", 261, 1, "r_A", this), new Among("enly", 233, 1, "r_E", this), new Among("arly", 233, 1, "r_K", this), new Among("early", 265, 1, "r_Y", this), new Among("lessly", 233, 1, "r_A", this), new Among("ously", 233, 1, "r_A", this), new Among("eously", 268, 1, "r_A", this), new Among("iously", 268, 1, "r_A", this), new Among("ently", 233, 1, "r_A", this), new 
 Among("ary", 229, 1, "r_F", this), new Among("ery", 229, 1, "r_E", this), new Among("icianry", 229, 1, "r_A", this), new Among("atory", 229, 1, "r_A", this), new Among("ity", 229, 1, "r_A", this), new Among("acity", 276, 1, "r_A", this), new Among("icity", 276, 1, "r_A", this), new Among("eity", 276, 1, "r_A", this), new Among("ality", 276, 1, "r_A", this), new Among("icality", 280, 1, "r_A", this), new Among("iality", 280, 1, "r_A", this), new Among("antiality", 282, 1, "r_A", this), new Among("entiality", 282, 1, "r_A", this), new Among("ionality", 280, 1, "r_A", this), new Among("elity", 276, 1, "r_A", this), new Among("ability", 276, 1, "r_A", this), new Among("izability", 287, 1, "r_A", this), new Among("arizability", 288, 1, "r_A", this), new Among("ibility", 276, 1, "r_A", this), new Among("inity", 276, 1, "r_CC", this), new Among("arity", 276, 1, "r_B", this), new Among("ivity", 276, 1, "r_A", this)};
-			a_2 = new Among[]{new Among("bb", - 1, - 1, "", this), new Among("dd", - 1, - 1, "", this), new Among("gg", - 1, - 1, "", this), new Among("ll", - 1, - 1, "", this), new Among("mm", - 1, - 1, "", this), new Among("nn", - 1, - 1, "", this), new Among("pp", - 1, - 1, "", this), new Among("rr", - 1, - 1, "", this), new Among("ss", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
-			a_3 = new Among[]{new Among("uad", - 1, 18, "", this), new Among("vad", - 1, 19, "", this), new Among("cid", - 1, 20, "", this), new Among("lid", - 1, 21, "", this), new Among("erid", - 1, 22, "", this), new Among("pand", - 1, 23, "", this), new Among("end", - 1, 24, "", this), new Among("ond", - 1, 25, "", this), new Among("lud", - 1, 26, "", this), new Among("rud", - 1, 27, "", this), new Among("ul", - 1, 9, "", this), new Among("her", - 1, 28, "", this), new Among("metr", - 1, 7, "", this), new Among("istr", - 1, 6, "", this), new Among("urs", - 1, 5, "", this), new Among("uct", - 1, 2, "", this), new Among("et", - 1, 32, "", this), new Among("mit", - 1, 29, "", this), new Among("ent", - 1, 30, "", this), new Among("umpt", - 1, 3, "", this), new Among("rpt", - 1, 4, "", this), new Among("ert", - 1, 31, "", this), new Among("yt", - 1, 33, "", this), new Among("iev", - 1, 1, "", this), new Among("olv", - 1, 8, "", this), new Among("ax", - 1, 14, "", this), new Among("ex", - 1, 1
 5, "", this), new Among("bex", 26, 10, "", this), new Among("dex", 26, 11, "", this), new Among("pex", 26, 12, "", this), new Among("tex", 26, 13, "", this), new Among("ix", - 1, 16, "", this), new Among("lux", - 1, 17, "", this), new Among("yz", - 1, 34, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		
-		protected internal virtual void  copy_from(LovinsStemmer other)
-		{
-			base.copy_from(other);
-		}
-		
-		private bool r_A()
-		{
-			// (, line 21
-			// hop, line 21
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			return true;
-		}
-		
-		private bool r_B()
-		{
-			// (, line 22
-			// hop, line 22
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			return true;
-		}
-		
-		private bool r_C()
-		{
-			// (, line 23
-			// hop, line 23
-			{
-				int c = cursor - 4;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			return true;
-		}
-		
-		private bool r_D()
-		{
-			// (, line 24
-			// hop, line 24
-			{
-				int c = cursor - 5;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			return true;
-		}
-		
-		private bool r_E()
-		{
-			int v_1;
-			int v_2;
-			// (, line 25
-			// test, line 25
-			v_1 = limit - cursor;
-			// hop, line 25
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 25
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 25
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab0_brk;
-					}
-					return false;
-				}
-				while (false);
+    public class LovinsStemmer : SnowballProgram
+    {
+        public LovinsStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("d", - 1, - 1, "", this), new Among("f", - 1, - 1, "", this), new Among("ph", - 1, - 1, "", this), new Among("th", - 1, - 1, "", this), new Among("l", - 1, - 1, "", this), new Among("er", - 1, - 1, "", this), new Among("or", - 1, - 1, "", this), new Among("es", - 1, - 1, "", this), new Among("t", - 1, - 1, "", this)};
+            a_1 = new Among[]{new Among("s'", - 1, 1, "r_A", this), new Among("a", - 1, 1, "r_A", this), new Among("ia", 1, 1, "r_A", this), new Among("ata", 1, 1, "r_A", this), new Among("ic", - 1, 1, "r_A", this), new Among("aic", 4, 1, "r_A", this), new Among("allic", 4, 1, "r_BB", this), new Among("aric", 4, 1, "r_A", this), new Among("atic", 4, 1, "r_B", this), new Among("itic", 4, 1, "r_H", this), new Among("antic", 4, 1, "r_C", this), new Among("istic", 4, 1, "r_A", this), new Among("alistic", 11, 1, "r_B", this), new Among("aristic", 11, 1, "r_A", this), new Among("ivistic", 11, 1, "r_A", this), new Among("ed", - 1, 1, "r_E", this), new Among("anced", 15, 1, "r_B", this), new Among("enced", 15, 1, "r_A", this), new Among("ished", 15, 1, "r_A", this), new Among("ied", 15, 1, "r_A", this), new Among("ened", 15, 1, "r_E", this), new Among("ioned", 15, 1, "r_A", this), new Among("ated", 15, 1, "r_I", this), new Among("ented", 15, 1, "r_C", this), new Among("ized", 15, 1, "r_F", 
 this), new Among("arized", 24, 1, "r_A", this), new Among("oid", - 1, 1, "r_A", this), new Among("aroid", 26, 1, "r_A", this), new Among("hood", - 1, 1, "r_A", this), new Among("ehood", 28, 1, "r_A", this), new Among("ihood", 28, 1, "r_A", this), new Among("elihood", 30, 1, "r_E", this), new Among("ward", - 1, 1, "r_A", this), new Among("e", - 1, 1, "r_A", this), new Among("ae", 33, 1, "r_A", this), new Among("ance", 33, 1, "r_B", this), new Among("icance", 35, 1, "r_A", this), new Among("ence", 33, 1, "r_A", this), new Among("ide", 33, 1, "r_L", this), new Among("icide", 38, 1, "r_A", this), new Among("otide", 38, 1, "r_A", this), new Among("age", 33, 1, "r_B", this), new Among("able", 33, 1, "r_A", this), new Among("atable", 42, 1, "r_A", this), new Among("izable", 42, 1, "r_E", this), new Among("arizable", 44, 1, "r_A", this), new Among("ible", 33, 1, "r_A", this), new Among("encible", 46, 1, "r_A", this), new Among("ene", 33, 1, "r_E", this), new Among("ine", 33, 1, "r_M", this)
 , new Among("idine", 49, 1, "r_I", this), new 
+                Among("one", 33, 1, "r_R", this), new Among("ature", 33, 1, "r_E", this), new Among("eature", 52, 1, "r_Z", this), new Among("ese", 33, 1, "r_A", this), new Among("wise", 33, 1, "r_A", this), new Among("ate", 33, 1, "r_A", this), new Among("entiate", 56, 1, "r_A", this), new Among("inate", 56, 1, "r_A", this), new Among("ionate", 56, 1, "r_D", this), new Among("ite", 33, 1, "r_AA", this), new Among("ive", 33, 1, "r_A", this), new Among("ative", 61, 1, "r_A", this), new Among("ize", 33, 1, "r_F", this), new Among("alize", 63, 1, "r_A", this), new Among("icalize", 64, 1, "r_A", this), new Among("ialize", 64, 1, "r_A", this), new Among("entialize", 66, 1, "r_A", this), new Among("ionalize", 64, 1, "r_A", this), new Among("arize", 63, 1, "r_A", this), new Among("ing", - 1, 1, "r_N", this), new Among("ancing", 70, 1, "r_B", this), new Among("encing", 70, 1, "r_A", this), new Among("aging", 70, 1, "r_B", this), new Among("ening", 70, 1, "r_E", this), new Among("ioning", 70
 , 1, "r_A", this), new Among("ating", 70, 1, "r_I", this), new Among("enting", 70, 1, "r_C", this), new Among("ying", 70, 1, "r_B", this), new Among("izing", 70, 1, "r_F", this), new Among("arizing", 79, 1, "r_A", this), new Among("ish", - 1, 1, "r_C", this), new Among("yish", 81, 1, "r_A", this), new Among("i", - 1, 1, "r_A", this), new Among("al", - 1, 1, "r_BB", this), new Among("ical", 84, 1, "r_A", this), new Among("aical", 85, 1, "r_A", this), new Among("istical", 85, 1, "r_A", this), new Among("oidal", 84, 1, "r_A", this), new Among("eal", 84, 1, "r_Y", this), new Among("ial", 84, 1, "r_A", this), new Among("ancial", 90, 1, "r_A", this), new Among("arial", 90, 1, "r_A", this), new Among("ential", 90, 1, "r_A", this), new Among("ional", 84, 1, "r_A", this), new Among("ational", 94, 1, "r_B", this), new Among("izational", 95, 1, "r_A", this), new Among("ental", 84, 1, "r_A", this), new Among("ful", - 1, 1, "r_A", this), new Among("eful", 98, 1, "r_A", this), new Among("iful", 9
 8, 1, "r_A", this), new Among("yl", - 1, 1, 
+                "r_R", this), new Among("ism", - 1, 1, "r_B", this), new Among("icism", 102, 1, "r_A", this), new Among("oidism", 102, 1, "r_A", this), new Among("alism", 102, 1, "r_B", this), new Among("icalism", 105, 1, "r_A", this), new Among("ionalism", 105, 1, "r_A", this), new Among("inism", 102, 1, "r_J", this), new Among("ativism", 102, 1, "r_A", this), new Among("um", - 1, 1, "r_U", this), new Among("ium", 110, 1, "r_A", this), new Among("ian", - 1, 1, "r_A", this), new Among("ician", 112, 1, "r_A", this), new Among("en", - 1, 1, "r_F", this), new Among("ogen", 114, 1, "r_A", this), new Among("on", - 1, 1, "r_S", this), new Among("ion", 116, 1, "r_Q", this), new Among("ation", 117, 1, "r_B", this), new Among("ication", 118, 1, "r_G", this), new Among("entiation", 118, 1, "r_A", this), new Among("ination", 118, 1, "r_A", this), new Among("isation", 118, 1, "r_A", this), new Among("arisation", 122, 1, "r_A", this), new Among("entation", 118, 1, "r_A", this), new Among("izatio
 n", 118, 1, "r_F", this), new Among("arization", 125, 1, "r_A", this), new Among("action", 117, 1, "r_G", this), new Among("o", - 1, 1, "r_A", this), new Among("ar", - 1, 1, "r_X", this), new Among("ear", 129, 1, "r_Y", this), new Among("ier", - 1, 1, "r_A", this), new Among("ariser", - 1, 1, "r_A", this), new Among("izer", - 1, 1, "r_F", this), new Among("arizer", 133, 1, "r_A", this), new Among("or", - 1, 1, "r_T", this), new Among("ator", 135, 1, "r_A", this), new Among("s", - 1, 1, "r_W", this), new Among("'s", 137, 1, "r_A", this), new Among("as", 137, 1, "r_B", this), new Among("ics", 137, 1, "r_A", this), new Among("istics", 140, 1, "r_A", this), new Among("es", 137, 1, "r_E", this), new Among("ances", 142, 1, "r_B", this), new Among("ences", 142, 1, "r_A", this), new Among("ides", 142, 1, "r_L", this), new Among("oides", 145, 1, "r_A", this), new Among("ages", 142, 1, "r_B", this), new Among("ies", 142, 1, "r_P", this), new Among("acies", 148, 1, "r_A", this), new Among("anc
 ies", 148, 1, "r_A", this), new Among("encies", 
+                148, 1, "r_A", this), new Among("aries", 148, 1, "r_A", this), new Among("ities", 148, 1, "r_A", this), new Among("alities", 153, 1, "r_A", this), new Among("ivities", 153, 1, "r_A", this), new Among("ines", 142, 1, "r_M", this), new Among("nesses", 142, 1, "r_A", this), new Among("ates", 142, 1, "r_A", this), new Among("atives", 142, 1, "r_A", this), new Among("ings", 137, 1, "r_N", this), new Among("is", 137, 1, "r_A", this), new Among("als", 137, 1, "r_BB", this), new Among("ials", 162, 1, "r_A", this), new Among("entials", 163, 1, "r_A", this), new Among("ionals", 162, 1, "r_A", this), new Among("isms", 137, 1, "r_B", this), new Among("ians", 137, 1, "r_A", this), new Among("icians", 167, 1, "r_A", this), new Among("ions", 137, 1, "r_B", this), new Among("ations", 169, 1, "r_B", this), new Among("arisations", 170, 1, "r_A", this), new Among("entations", 170, 1, "r_A", this), new Among("izations", 170, 1, "r_A", this), new Among("arizations", 173, 1, "r_A", this),
  new Among("ars", 137, 1, "r_O", this), new Among("iers", 137, 1, "r_A", this), new Among("izers", 137, 1, "r_F", this), new Among("ators", 137, 1, "r_A", this), new Among("less", 137, 1, "r_A", this), new Among("eless", 179, 1, "r_A", this), new Among("ness", 137, 1, "r_A", this), new Among("eness", 181, 1, "r_E", this), new Among("ableness", 182, 1, "r_A", this), new Among("eableness", 183, 1, "r_E", this), new Among("ibleness", 182, 1, "r_A", this), new Among("ateness", 182, 1, "r_A", this), new Among("iteness", 182, 1, "r_A", this), new Among("iveness", 182, 1, "r_A", this), new Among("ativeness", 188, 1, "r_A", this), new Among("ingness", 181, 1, "r_A", this), new Among("ishness", 181, 1, "r_A", this), new Among("iness", 181, 1, "r_A", this), new Among("ariness", 192, 1, "r_E", this), new Among("alness", 181, 1, "r_A", this), new Among("icalness", 194, 1, "r_A", this), new Among("antialness", 194, 1, "r_A", this), new Among("entialness", 194, 1, "r_A", this), new Among("ionalne
 ss", 194, 1, "r_A", this), new Among("fulness", 
+                181, 1, "r_A", this), new Among("lessness", 181, 1, "r_A", this), new Among("ousness", 181, 1, "r_A", this), new Among("eousness", 201, 1, "r_A", this), new Among("iousness", 201, 1, "r_A", this), new Among("itousness", 201, 1, "r_A", this), new Among("entness", 181, 1, "r_A", this), new Among("ants", 137, 1, "r_B", this), new Among("ists", 137, 1, "r_A", this), new Among("icists", 207, 1, "r_A", this), new Among("us", 137, 1, "r_V", this), new Among("ous", 209, 1, "r_A", this), new Among("eous", 210, 1, "r_A", this), new Among("aceous", 211, 1, "r_A", this), new Among("antaneous", 211, 1, "r_A", this), new Among("ious", 210, 1, "r_A", this), new Among("acious", 214, 1, "r_B", this), new Among("itous", 210, 1, "r_A", this), new Among("ant", - 1, 1, "r_B", this), new Among("icant", 217, 1, "r_A", this), new Among("ent", - 1, 1, "r_C", this), new Among("ement", 219, 1, "r_A", this), new Among("izement", 220, 1, "r_A", this), new Among("ist", - 1, 1, "r_A", this), new A
 mong("icist", 222, 1, "r_A", this), new Among("alist", 222, 1, "r_A", this), new Among("icalist", 224, 1, "r_A", this), new Among("ialist", 224, 1, "r_A", this), new Among("ionist", 222, 1, "r_A", this), new Among("entist", 222, 1, "r_A", this), new Among("y", - 1, 1, "r_B", this), new Among("acy", 229, 1, "r_A", this), new Among("ancy", 229, 1, "r_B", this), new Among("ency", 229, 1, "r_A", this), new Among("ly", 229, 1, "r_B", this), new Among("ealy", 233, 1, "r_Y", this), new Among("ably", 233, 1, "r_A", this), new Among("ibly", 233, 1, "r_A", this), new Among("edly", 233, 1, "r_E", this), new Among("iedly", 237, 1, "r_A", this), new Among("ely", 233, 1, "r_E", this), new Among("ately", 239, 1, "r_A", this), new Among("ively", 239, 1, "r_A", this), new Among("atively", 241, 1, "r_A", this), new Among("ingly", 233, 1, "r_B", this), new Among("atingly", 243, 1, "r_A", this), new Among("ily", 233, 1, "r_A", this), new Among("lily", 245, 1, "r_A", this), new Among("arily", 245, 1, "r
 _A", this), new Among("ally", 233, 1, "r_B", 
+                this), new Among("ically", 248, 1, "r_A", this), new Among("aically", 249, 1, "r_A", this), new Among("allically", 249, 1, "r_C", this), new Among("istically", 249, 1, "r_A", this), new Among("alistically", 252, 1, "r_B", this), new Among("oidally", 248, 1, "r_A", this), new Among("ially", 248, 1, "r_A", this), new Among("entially", 255, 1, "r_A", this), new Among("ionally", 248, 1, "r_A", this), new Among("ationally", 257, 1, "r_B", this), new Among("izationally", 258, 1, "r_B", this), new Among("entally", 248, 1, "r_A", this), new Among("fully", 233, 1, "r_A", this), new Among("efully", 261, 1, "r_A", this), new Among("ifully", 261, 1, "r_A", this), new Among("enly", 233, 1, "r_E", this), new Among("arly", 233, 1, "r_K", this), new Among("early", 265, 1, "r_Y", this), new Among("lessly", 233, 1, "r_A", this), new Among("ously", 233, 1, "r_A", this), new Among("eously", 268, 1, "r_A", this), new Among("iously", 268, 1, "r_A", this), new Among("ently", 233, 1, "r_A",
  this), new Among("ary", 229, 1, "r_F", this), new Among("ery", 229, 1, "r_E", this), new Among("icianry", 229, 1, "r_A", this), new Among("atory", 229, 1, "r_A", this), new Among("ity", 229, 1, "r_A", this), new Among("acity", 276, 1, "r_A", this), new Among("icity", 276, 1, "r_A", this), new Among("eity", 276, 1, "r_A", this), new Among("ality", 276, 1, "r_A", this), new Among("icality", 280, 1, "r_A", this), new Among("iality", 280, 1, "r_A", this), new Among("antiality", 282, 1, "r_A", this), new Among("entiality", 282, 1, "r_A", this), new Among("ionality", 280, 1, "r_A", this), new Among("elity", 276, 1, "r_A", this), new Among("ability", 276, 1, "r_A", this), new Among("izability", 287, 1, "r_A", this), new Among("arizability", 288, 1, "r_A", this), new Among("ibility", 276, 1, "r_A", this), new Among("inity", 276, 1, "r_CC", this), new Among("arity", 276, 1, "r_B", this), new Among("ivity", 276, 1, "r_A", this)};
+            a_2 = new Among[]{new Among("bb", - 1, - 1, "", this), new Among("dd", - 1, - 1, "", this), new Among("gg", - 1, - 1, "", this), new Among("ll", - 1, - 1, "", this), new Among("mm", - 1, - 1, "", this), new Among("nn", - 1, - 1, "", this), new Among("pp", - 1, - 1, "", this), new Among("rr", - 1, - 1, "", this), new Among("ss", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
+            a_3 = new Among[]{new Among("uad", - 1, 18, "", this), new Among("vad", - 1, 19, "", this), new Among("cid", - 1, 20, "", this), new Among("lid", - 1, 21, "", this), new Among("erid", - 1, 22, "", this), new Among("pand", - 1, 23, "", this), new Among("end", - 1, 24, "", this), new Among("ond", - 1, 25, "", this), new Among("lud", - 1, 26, "", this), new Among("rud", - 1, 27, "", this), new Among("ul", - 1, 9, "", this), new Among("her", - 1, 28, "", this), new Among("metr", - 1, 7, "", this), new Among("istr", - 1, 6, "", this), new Among("urs", - 1, 5, "", this), new Among("uct", - 1, 2, "", this), new Among("et", - 1, 32, "", this), new Among("mit", - 1, 29, "", this), new Among("ent", - 1, 30, "", this), new Among("umpt", - 1, 3, "", this), new Among("rpt", - 1, 4, "", this), new Among("ert", - 1, 31, "", this), new Among("yt", - 1, 33, "", this), new Among("iev", - 1, 1, "", this), new Among("olv", - 1, 8, "", this), new Among("ax", - 1, 14, "", this), new Among("ex
 ", - 1, 15, "", this), new Among("bex", 26, 10, "", this), new Among("dex", 26, 11, "", this), new Among("pex", 26, 12, "", this), new Among("tex", 26, 13, "", this), new Among("ix", - 1, 16, "", this), new Among("lux", - 1, 17, "", this), new Among("yz", - 1, 34, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        
+        protected internal virtual void  copy_from(LovinsStemmer other)
+        {
+            base.copy_from(other);
+        }
+        
+        private bool r_A()
+        {
+            // (, line 21
+            // hop, line 21
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            return true;
+        }
+        
+        private bool r_B()
+        {
+            // (, line 22
+            // hop, line 22
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            return true;
+        }
+        
+        private bool r_C()
+        {
+            // (, line 23
+            // hop, line 23
+            {
+                int c = cursor - 4;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            return true;
+        }
+        
+        private bool r_D()
+        {
+            // (, line 24
+            // hop, line 24
+            {
+                int c = cursor - 5;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            return true;
+        }
+        
+        private bool r_E()
+        {
+            int v_1;
+            int v_2;
+            // (, line 25
+            // test, line 25
+            v_1 = limit - cursor;
+            // hop, line 25
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 25
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 25
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab0_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab0_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			return true;
-		}
-		
-		private bool r_F()
-		{
-			int v_1;
-			int v_2;
-			// (, line 26
-			// test, line 26
-			v_1 = limit - cursor;
-			// hop, line 26
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 26
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 26
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab1_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            return true;
+        }
+        
+        private bool r_F()
+        {
+            int v_1;
+            int v_2;
+            // (, line 26
+            // test, line 26
+            v_1 = limit - cursor;
+            // hop, line 26
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 26
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 26
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab1_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			return true;
-		}
-		
-		private bool r_G()
-		{
-			int v_1;
-			// (, line 27
-			// test, line 27
-			v_1 = limit - cursor;
-			// hop, line 27
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// literal, line 27
-			if (!(eq_s_b(1, "f")))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_H()
-		{
-			int v_1;
-			int v_2;
-			// (, line 28
-			// test, line 28
-			v_1 = limit - cursor;
-			// hop, line 28
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 28
+                
+                cursor = limit - v_2;
+            }
+            return true;
+        }
+        
+        private bool r_G()
+        {
+            int v_1;
+            // (, line 27
+            // test, line 27
+            v_1 = limit - cursor;
+            // hop, line 27
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // literal, line 27
+            if (!(eq_s_b(1, "f")))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_H()
+        {
+            int v_1;
+            int v_2;
+            // (, line 28
+            // test, line 28
+            v_1 = limit - cursor;
+            // hop, line 28
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 28
 lab1: 
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 28
-					if (!(eq_s_b(1, "t")))
-					{
-						goto lab1_brk;
-					}
-					goto lab1_brk;
-				}
-				while (false);
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 28
+                    if (!(eq_s_b(1, "t")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab1_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 28
-				if (!(eq_s_b(2, "ll")))
-				{
-					return false;
-				}
-			}
-			while (false);
-			return true;
-		}
-		
-		private bool r_I()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 29
-			// test, line 29
-			v_1 = limit - cursor;
-			// hop, line 29
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 29
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 29
-					if (!(eq_s_b(1, "o")))
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 28
+                if (!(eq_s_b(2, "ll")))
+                {
+                    return false;
+                }
+            }
+            while (false);
+            return true;
+        }
+        
+        private bool r_I()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 29
+            // test, line 29
+            v_1 = limit - cursor;
+            // hop, line 29
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 29
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 29
+                    if (!(eq_s_b(1, "o")))
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 29
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 29
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 29
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 29
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			return true;
-		}
-		
-		private bool r_J()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 30
-			// test, line 30
-			v_1 = limit - cursor;
-			// hop, line 30
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 30
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 30
-					if (!(eq_s_b(1, "a")))
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_3;
+            }
+            return true;
+        }
+        
+        private bool r_J()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 30
+            // test, line 30
+            v_1 = limit - cursor;
+            // hop, line 30
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 30
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 30
+                    if (!(eq_s_b(1, "a")))
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 30
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 30
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 30
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 30
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			return true;
-		}
-		
-		private bool r_K()
-		{
-			int v_1;
-			int v_2;
-			// (, line 31
-			// test, line 31
-			v_1 = limit - cursor;
-			// hop, line 31
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 31
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 31
-					if (!(eq_s_b(1, "l")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_3;
+            }
+            return true;
+        }
+        
+        private bool r_K()
+        {
+            int v_1;
+            int v_2;
+            // (, line 31
+            // test, line 31
+            v_1 = limit - cursor;
+            // hop, line 31
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 31
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 31
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				do 
-				{
-					// literal, line 31
-					if (!(eq_s_b(1, "i")))
-					{
-						goto lab2_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                do 
+                {
+                    // literal, line 31
+                    if (!(eq_s_b(1, "i")))
+                    {
+                        goto lab2_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-				// (, line 31
-				// literal, line 31
-				if (!(eq_s_b(1, "e")))
-				{
-					return false;
-				}
-				// next, line 31
-				if (cursor <= limit_backward)
-				{
-					return false;
-				}
-				cursor--;
-				// literal, line 31
-				if (!(eq_s_b(1, "u")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+                // (, line 31
+                // literal, line 31
+                if (!(eq_s_b(1, "e")))
+                {
+                    return false;
+                }
+                // next, line 31
+                if (cursor <= limit_backward)
+                {
+                    return false;
+                }
+                cursor--;
+                // literal, line 31
+                if (!(eq_s_b(1, "u")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_L()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 32
-			// test, line 32
-			v_1 = limit - cursor;
-			// hop, line 32
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 32
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 32
-					if (!(eq_s_b(1, "u")))
-					{
-						goto lab0_brk;
-					}
-					return false;
-				}
-				while (false);
+            return true;
+        }
+        
+        private bool r_L()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 32
+            // test, line 32
+            v_1 = limit - cursor;
+            // hop, line 32
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 32
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 32
+                    if (!(eq_s_b(1, "u")))
+                    {
+                        goto lab0_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab0_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 32
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 32
-					if (!(eq_s_b(1, "x")))
-					{
-						goto lab1_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 32
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 32
+                    if (!(eq_s_b(1, "x")))
+                    {
+                        goto lab1_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			// not, line 32
-			{
-				v_4 = limit - cursor;
-				do 
-				{
-					// (, line 32
-					// literal, line 32
-					if (!(eq_s_b(1, "s")))
-					{
-						goto lab2_brk;
-					}
-					// not, line 32
-					{
-						v_5 = limit - cursor;
-						do 
-						{
-							// literal, line 32
-							if (!(eq_s_b(1, "o")))
-							{
-								goto lab3_brk;
-							}
-							goto lab2_brk;
-						}
-						while (false);
+                
+                cursor = limit - v_3;
+            }
+            // not, line 32
+            {
+                v_4 = limit - cursor;
+                do 
+                {
+                    // (, line 32
+                    // literal, line 32
+                    if (!(eq_s_b(1, "s")))
+                    {
+                        goto lab2_brk;
+                    }
+                    // not, line 32
+                    {
+                        v_5 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 32
+                            if (!(eq_s_b(1, "o")))
+                            {
+                                goto lab3_brk;
+                            }
+                            goto lab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = limit - v_5;
-					}
-					return false;
-				}
-				while (false);
+                        
+                        cursor = limit - v_5;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_4;
-			}
-			return true;
-		}
-		
-		private bool r_M()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 33
-			// test, line 33
-			v_1 = limit - cursor;
-			// hop, line 33
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 33
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 33
-					if (!(eq_s_b(1, "a")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_4;
+            }
+            return true;
+        }
+        
+        private bool r_M()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 33
+            // test, line 33
+            v_1 = limit - cursor;
+            // hop, line 33
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 33
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 33
+                    if (!(eq_s_b(1, "a")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 33
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 33
-					if (!(eq_s_b(1, "c")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 33
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 33
+                    if (!(eq_s_b(1, "c")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			// not, line 33
-			{
-				v_4 = limit - cursor;
-				do 
-				{
-					// literal, line 33
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_3;
+            }
+            // not, line 33
+            {
+                v_4 = limit - cursor;
+                do 
+                {
+                    // literal, line 33
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_4;
-			}
-			// not, line 33
-			{
-				v_5 = limit - cursor;
-				do 
-				{
-					// literal, line 33
-					if (!(eq_s_b(1, "m")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_4;
+            }
+            // not, line 33
+            {
+                v_5 = limit - cursor;
+                do 
+                {
+                    // literal, line 33
+                    if (!(eq_s_b(1, "m")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_5;
-			}
-			return true;
-		}
-		
-		private bool r_N()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 34
-			// test, line 34
-			v_1 = limit - cursor;
-			// hop, line 34
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// (, line 34
-			// hop, line 34
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			// or, line 34
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// not, line 34
-					{
-						v_3 = limit - cursor;
-						do 
-						{
-							// literal, line 34
-							if (!(eq_s_b(1, "s")))
-							{
-								goto lab2_brk;
-							}
-							goto lab1_brk;
-						}
-						while (false);
+                
+                cursor = limit - v_5;
+            }
+            return true;
+        }
+        
+        private bool r_N()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 34
+            // test, line 34
+            v_1 = limit - cursor;
+            // hop, line 34
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // (, line 34
+            // hop, line 34
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            // or, line 34
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // not, line 34
+                    {
+                        v_3 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 34
+                            if (!(eq_s_b(1, "s")))
+                            {
+                                goto lab2_brk;
+                            }
+                            goto lab1_brk;
+                        }
+                        while (false);
 
 lab2_brk: ;
-						
-						cursor = limit - v_3;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                        
+                        cursor = limit - v_3;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				// hop, line 34
-				{
-					int c = cursor - 2;
-					if (limit_backward > c || c > limit)
-					{
-						return false;
-					}
-					cursor = c;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+                // hop, line 34
+                {
+                    int c = cursor - 2;
+                    if (limit_backward > c || c > limit)
+                    {
+                        return false;
+                    }
+                    cursor = c;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_O()
-		{
-			int v_1;
-			int v_2;
-			// (, line 35
-			// test, line 35
-			v_1 = limit - cursor;
-			// hop, line 35
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 35
+            return true;
+        }
+        
+        private bool r_O()
+        {
+            int v_1;
+            int v_2;
+            // (, line 35
+            // test, line 35
+            v_1 = limit - cursor;
+            // hop, line 35
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 35
 lab4: 
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 35
-					if (!(eq_s_b(1, "l")))
-					{
-						goto lab4_brk;
-					}
-					goto lab4_brk;
-				}
-				while (false);
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 35
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        goto lab4_brk;
+                    }
+                    goto lab4_brk;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 35
-				if (!(eq_s_b(1, "i")))
-				{
-					return false;
-				}
-			}
-			while (false);
-			return true;
-		}
-		
-		private bool r_P()
-		{
-			int v_1;
-			int v_2;
-			// (, line 36
-			// test, line 36
-			v_1 = limit - cursor;
-			// hop, line 36
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 36
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 36
-					if (!(eq_s_b(1, "c")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 35
+                if (!(eq_s_b(1, "i")))
+                {
+                    return false;
+                }
+            }
+            while (false);
+            return true;
+        }
+        
+        private bool r_P()
+        {
+            int v_1;
+            int v_2;
+            // (, line 36
+            // test, line 36
+            v_1 = limit - cursor;
+            // hop, line 36
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 36
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 36
+                    if (!(eq_s_b(1, "c")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			return true;
-		}
-		
-		private bool r_Q()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 37
-			// test, line 37
-			v_1 = limit - cursor;
-			// hop, line 37
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// test, line 37
-			v_2 = limit - cursor;
-			// hop, line 37
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_2;
-			// not, line 37
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 37
-					if (!(eq_s_b(1, "l")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            return true;
+        }
+        
+        private bool r_Q()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 37
+            // test, line 37
+            v_1 = limit - cursor;
+            // hop, line 37
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // test, line 37
+            v_2 = limit - cursor;
+            // hop, line 37
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_2;
+            // not, line 37
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 37
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			// not, line 37
-			{
-				v_4 = limit - cursor;
-				do 
-				{
-					// literal, line 37
-					if (!(eq_s_b(1, "n")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_3;
+            }
+            // not, line 37
+            {
+                v_4 = limit - cursor;
+                do 
+                {
+                    // literal, line 37
+                    if (!(eq_s_b(1, "n")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_4;
-			}
-			return true;
-		}
-		
-		private bool r_R()
-		{
-			int v_1;
-			int v_2;
-			// (, line 38
-			// test, line 38
-			v_1 = limit - cursor;
-			// hop, line 38
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 38
+                
+                cursor = limit - v_4;
+            }
+            return true;
+        }
+        
+        private bool r_R()
+        {
+            int v_1;
+            int v_2;
+            // (, line 38
+            // test, line 38
+            v_1 = limit - cursor;
+            // hop, line 38
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 38
 lab4: 
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 38
-					if (!(eq_s_b(1, "n")))
-					{
-						goto lab4_brk;
-					}
-					goto lab4_brk;
-				}
-				while (false);
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 38
+                    if (!(eq_s_b(1, "n")))
+                    {
+                        goto lab4_brk;
+                    }
+                    goto lab4_brk;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 38
-				if (!(eq_s_b(1, "r")))
-				{
-					return false;
-				}
-			}
-			while (false);
-			return true;
-		}
-		
-		private bool r_S()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 39
-			// test, line 39
-			v_1 = limit - cursor;
-			// hop, line 39
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 39
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 39
-					if (!(eq_s_b(2, "dr")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 38
+                if (!(eq_s_b(1, "r")))
+                {
+                    return false;
+                }
+            }
+            while (false);
+            return true;
+        }
+        
+        private bool r_S()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 39
+            // test, line 39
+            v_1 = limit - cursor;
+            // hop, line 39
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 39
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 39
+                    if (!(eq_s_b(2, "dr")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				// (, line 39
-				// literal, line 39
-				if (!(eq_s_b(1, "t")))
-				{
-					return false;
-				}
-				// not, line 39
-				{
-					v_3 = limit - cursor;
-					do 
-					{
-						// literal, line 39
-						if (!(eq_s_b(1, "t")))
-						{
-							goto lab2_brk;
-						}
-						return false;
-					}
-					while (false);
+                
+                cursor = limit - v_2;
+                // (, line 39
+                // literal, line 39
+                if (!(eq_s_b(1, "t")))
+                {
+                    return false;
+                }
+                // not, line 39
+                {
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // literal, line 39
+                        if (!(eq_s_b(1, "t")))
+                        {
+                            goto lab2_brk;
+                        }
+                        return false;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = limit - v_3;
-				}
-			}
-			while (false);
+                    
+                    cursor = limit - v_3;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_T()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 40
-			// test, line 40
-			v_1 = limit - cursor;
-			// hop, line 40
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 40
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 40
-					if (!(eq_s_b(1, "s")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+            return true;
+        }
+        
+        private bool r_T()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 40
+            // test, line 40
+            v_1 = limit - cursor;
+            // hop, line 40
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 40
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 40
+                    if (!(eq_s_b(1, "s")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				// (, line 40
-				// literal, line 40
-				if (!(eq_s_b(1, "t")))
-				{
-					return false;
-				}
-				// not, line 40
-				{
-					v_3 = limit - cursor;
-					do 
-					{
-						// literal, line 40
-						if (!(eq_s_b(1, "o")))
-						{
-							goto lab2_brk;
-						}
-						return false;
-					}
-					while (false);
+                
+                cursor = limit - v_2;
+                // (, line 40
+                // literal, line 40
+                if (!(eq_s_b(1, "t")))
+                {
+                    return false;
+                }
+                // not, line 40
+                {
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // literal, line 40
+                        if (!(eq_s_b(1, "o")))
+                        {
+                            goto lab2_brk;
+                        }
+                        return false;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = limit - v_3;
-				}
-			}
-			while (false);
+                    
+                    cursor = limit - v_3;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_U()
-		{
-			int v_1;
-			int v_2;
-			// (, line 41
-			// test, line 41
-			v_1 = limit - cursor;
-			// hop, line 41
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 41
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 41
-					if (!(eq_s_b(1, "l")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+            return true;
+        }
+        
+        private bool r_U()
+        {
+            int v_1;
+            int v_2;
+            // (, line 41
+            // test, line 41
+            v_1 = limit - cursor;
+            // hop, line 41
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 41
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 41
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				do 
-				{
-					// literal, line 41
-					if (!(eq_s_b(1, "m")))
-					{
-						goto lab2_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                do 
+                {
+                    // literal, line 41
+                    if (!(eq_s_b(1, "m")))
+                    {
+                        goto lab2_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-				do 
-				{
-					// literal, line 41
-					if (!(eq_s_b(1, "n")))
-					{
-						goto lab3_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                do 
+                {
+                    // literal, line 41
+                    if (!(eq_s_b(1, "n")))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 41
-				if (!(eq_s_b(1, "r")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 41
+                if (!(eq_s_b(1, "r")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_V()
-		{
-			int v_1;
-			// (, line 42
-			// test, line 42
-			v_1 = limit - cursor;
-			// hop, line 42
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// literal, line 42
-			if (!(eq_s_b(1, "c")))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_W()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 43
-			// test, line 43
-			v_1 = limit - cursor;
-			// hop, line 43
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 43
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 43
-					if (!(eq_s_b(1, "s")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+            return true;
+        }
+        
+        private bool r_V()
+        {
+            int v_1;
+            // (, line 42
+            // test, line 42
+            v_1 = limit - cursor;
+            // hop, line 42
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // literal, line 42
+            if (!(eq_s_b(1, "c")))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_W()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 43
+            // test, line 43
+            v_1 = limit - cursor;
+            // hop, line 43
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 43
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 43
+                    if (!(eq_s_b(1, "s")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 43
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 43
-					if (!(eq_s_b(1, "u")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 43
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 43
+                    if (!(eq_s_b(1, "u")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			return true;
-		}
-		
-		private bool r_X()
-		{
-			int v_1;
-			int v_2;
-			// (, line 44
-			// test, line 44
-			v_1 = limit - cursor;
-			// hop, line 44
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// or, line 44
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 44
-					if (!(eq_s_b(1, "l")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_3;
+            }
+            return true;
+        }
+        
+        private bool r_X()
+        {
+            int v_1;
+            int v_2;
+            // (, line 44
+            // test, line 44
+            v_1 = limit - cursor;
+            // hop, line 44
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // or, line 44
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 44
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				do 
-				{
-					// literal, line 44
-					if (!(eq_s_b(1, "i")))
-					{
-						goto lab2_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                do 
+                {
+                    // literal, line 44
+                    if (!(eq_s_b(1, "i")))
+                    {
+                        goto lab2_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-				// (, line 44
-				// literal, line 44
-				if (!(eq_s_b(1, "e")))
-				{
-					return false;
-				}
-				// next, line 44
-				if (cursor <= limit_backward)
-				{
-					return false;
-				}
-				cursor--;
-				// literal, line 44
-				if (!(eq_s_b(1, "u")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+                // (, line 44
+                // literal, line 44
+                if (!(eq_s_b(1, "e")))
+                {
+                    return false;
+                }
+                // next, line 44
+                if (cursor <= limit_backward)
+                {
+                    return false;
+                }
+                cursor--;
+                // literal, line 44
+                if (!(eq_s_b(1, "u")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_Y()
-		{
-			int v_1;
-			// (, line 45
-			// test, line 45
-			v_1 = limit - cursor;
-			// hop, line 45
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// literal, line 45
-			if (!(eq_s_b(2, "in")))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_Z()
-		{
-			int v_1;
-			int v_2;
-			// (, line 46
-			// test, line 46
-			v_1 = limit - cursor;
-			// hop, line 46
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 46
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 46
-					if (!(eq_s_b(1, "f")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+            return true;
+        }
+        
+        private bool r_Y()
+        {
+            int v_1;
+            // (, line 45
+            // test, line 45
+            v_1 = limit - cursor;
+            // hop, line 45
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // literal, line 45
+            if (!(eq_s_b(2, "in")))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_Z()
+        {
+            int v_1;
+            int v_2;
+            // (, line 46
+            // test, line 46
+            v_1 = limit - cursor;
+            // hop, line 46
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 46
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 46
+                    if (!(eq_s_b(1, "f")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			return true;
-		}
-		
-		private bool r_AA()
-		{
-			int v_1;
-			// (, line 47
-			// test, line 47
-			v_1 = limit - cursor;
-			// hop, line 47
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// among, line 47
-			if (find_among_b(a_0, 9) == 0)
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_BB()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 49
-			// test, line 49
-			v_1 = limit - cursor;
-			// hop, line 49
-			{
-				int c = cursor - 3;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// not, line 49
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 49
-					if (!(eq_s_b(3, "met")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            return true;
+        }
+        
+        private bool r_AA()
+        {
+            int v_1;
+            // (, line 47
+            // test, line 47
+            v_1 = limit - cursor;
+            // hop, line 47
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // among, line 47
+            if (find_among_b(a_0, 9) == 0)
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_BB()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 49
+            // test, line 49
+            v_1 = limit - cursor;
+            // hop, line 49
+            {
+                int c = cursor - 3;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // not, line 49
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 49
+                    if (!(eq_s_b(3, "met")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// not, line 49
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 49
-					if (!(eq_s_b(4, "ryst")))
-					{
-						goto lab4_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+            }
+            // not, line 49
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 49
+                    if (!(eq_s_b(4, "ryst")))
+                    {
+                        goto lab4_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = limit - v_3;
-			}
-			return true;
-		}
-		
-		private bool r_CC()
-		{
-			int v_1;
-			// (, line 50
-			// test, line 50
-			v_1 = limit - cursor;
-			// hop, line 50
-			{
-				int c = cursor - 2;
-				if (limit_backward > c || c > limit)
-				{
-					return false;
-				}
-				cursor = c;
-			}
-			cursor = limit - v_1;
-			// literal, line 50
-			if (!(eq_s_b(1, "l")))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_endings()
-		{
-			int among_var;
-			// (, line 55
-			// [, line 56
-			ket = cursor;
-			// substring, line 56
-			among_var = find_among_b(a_1, 294);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 56
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 145
-					// delete, line 145
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_undouble()
-		{
-			int v_1;
-			// (, line 151
-			// test, line 152
-			v_1 = limit - cursor;
-			// substring, line 152
-			if (find_among_b(a_2, 10) == 0)
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			// [, line 154
-			ket = cursor;
-			// next, line 154
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 154
-			bra = cursor;
-			// delete, line 154
-			slice_del();
-			return true;
-		}
-		
-		private bool r_respell()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			// (, line 159
-			// [, line 160
-			ket = cursor;
-			// substring, line 160
-			among_var = find_among_b(a_3, 34);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 160
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 161
-					// <-, line 161
-					slice_from("ief");
-					break;
-				
-				case 2: 
-					// (, line 162
-					// <-, line 162
-					slice_from("uc");
-					break;
-				
-				case 3: 
-					// (, line 163
-					// <-, line 163
-					slice_from("um");
-					break;
-				
-				case 4: 
-					// (, line 164
-					// <-, line 164
-					slice_from("rb");
-					break;
-				
-				case 5: 
-					// (, line 165
-					// <-, line 165
-					slice_from("ur");
-					break;
-				
-				case 6: 
-					// (, line 166
-					// <-, line 166
-					slice_from("ister");
-					break;
-				
-				case 7: 
-					// (, line 167
-					// <-, line 167
-					slice_from("meter");
-					break;
-				
-				case 8: 
-					// (, line 168
-					// <-, line 168
-					slice_from("olut");
-					break;
-				
-				case 9: 
-					// (, line 169
-					// not, line 169
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// literal, line 169
-							if (!(eq_s_b(1, "a")))
-							{
-								goto lab4_brk;
-							}
-							return false;
-						}
-						while (false);
+                
+                cursor = limit - v_3;
+            }
+            return true;
+        }
+        
+        private bool r_CC()
+        {
+            int v_1;
+            // (, line 50
+            // test, line 50
+            v_1 = limit - cursor;
+            // hop, line 50
+            {
+                int c = cursor - 2;
+                if (limit_backward > c || c > limit)
+                {
+                    return false;
+                }
+                cursor = c;
+            }
+            cursor = limit - v_1;
+            // literal, line 50
+            if (!(eq_s_b(1, "l")))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_endings()
+        {
+            int among_var;
+            // (, line 55
+            // [, line 56
+            ket = cursor;
+            // substring, line 56
+            among_var = find_among_b(a_1, 294);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 56
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 


<TRUNCATED>

[34/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/SwedishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/SwedishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/SwedishStemmer.cs
index c47fd36..03f53fc 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/SwedishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/SwedishStemmer.cs
@@ -23,336 +23,336 @@ namespace SF.Snowball.Ext
 {
 #pragma warning disable 162,164
 
-	/// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class SwedishStemmer : SnowballProgram
-	{
-		public SwedishStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("a", - 1, 1, "", this), new Among("arna", 0, 1, "", this), new Among("erna", 0, 1, "", this), new Among("heterna", 2, 1, "", this), new Among("orna", 0, 1, "", this), new Among("ad", - 1, 1, "", this), new Among("e", - 1, 1, "", this), new Among("ade", 6, 1, "", this), new Among("ande", 6, 1, "", this), new Among("arne", 6, 1, "", this), new Among("are", 6, 1, "", this), new Among("aste", 6, 1, "", this), new Among("en", - 1, 1, "", this), new Among("anden", 12, 1, "", this), new Among("aren", 12, 1, "", this), new Among("heten", 12, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("ar", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("heter", 18, 1, "", this), new Among("or", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("as", 21, 1, "", this), new Among("arnas", 22, 1, "", this), new Among("ernas", 22, 1, "", this), new Among("ornas", 22, 1, "", this), new Among("es", 21, 1, "", this), new Among("ades", 26,
  1, "", this), new Among("andes", 26, 1, "", this), new Among("ens", 21, 1, "", this), new Among("arens", 29, 1, "", this), new Among("hetens", 29, 1, "", this), new Among("erns", 21, 1, "", this), new Among("at", - 1, 1, "", this), new Among("andet", - 1, 1, "", this), new Among("het", - 1, 1, "", this), new Among("ast", - 1, 1, "", this)};
-			a_1 = new Among[]{new Among("dd", - 1, - 1, "", this), new Among("gd", - 1, - 1, "", this), new Among("nn", - 1, - 1, "", this), new Among("dt", - 1, - 1, "", this), new Among("gt", - 1, - 1, "", this), new Among("kt", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
-			a_2 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lig", 0, 1, "", this), new Among("els", - 1, 1, "", this), new Among("fullt", - 1, 3, "", this), new Among("l\u00F6st", - 1, 2, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (24), (char) (0), (char) (32)};
-		private static readonly char[] g_s_ending = new char[]{(char) (119), (char) (127), (char) (149)};
-		
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(SwedishStemmer other)
-		{
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			// (, line 26
-			I_p1 = limit;
-			// goto, line 30
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 246)))
-					{
-						goto lab1_brk;
-					}
-					cursor = v_1;
-					goto golab0_brk;
-				}
-				while (false);
+    /// <summary> Generated class implementing code defined by a snowball script.</summary>
+    public class SwedishStemmer : SnowballProgram
+    {
+        public SwedishStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("a", - 1, 1, "", this), new Among("arna", 0, 1, "", this), new Among("erna", 0, 1, "", this), new Among("heterna", 2, 1, "", this), new Among("orna", 0, 1, "", this), new Among("ad", - 1, 1, "", this), new Among("e", - 1, 1, "", this), new Among("ade", 6, 1, "", this), new Among("ande", 6, 1, "", this), new Among("arne", 6, 1, "", this), new Among("are", 6, 1, "", this), new Among("aste", 6, 1, "", this), new Among("en", - 1, 1, "", this), new Among("anden", 12, 1, "", this), new Among("aren", 12, 1, "", this), new Among("heten", 12, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("ar", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("heter", 18, 1, "", this), new Among("or", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("as", 21, 1, "", this), new Among("arnas", 22, 1, "", this), new Among("ernas", 22, 1, "", this), new Among("ornas", 22, 1, "", this), new Among("es", 21, 1, "", this), new Among("a
 des", 26, 1, "", this), new Among("andes", 26, 1, "", this), new Among("ens", 21, 1, "", this), new Among("arens", 29, 1, "", this), new Among("hetens", 29, 1, "", this), new Among("erns", 21, 1, "", this), new Among("at", - 1, 1, "", this), new Among("andet", - 1, 1, "", this), new Among("het", - 1, 1, "", this), new Among("ast", - 1, 1, "", this)};
+            a_1 = new Among[]{new Among("dd", - 1, - 1, "", this), new Among("gd", - 1, - 1, "", this), new Among("nn", - 1, - 1, "", this), new Among("dt", - 1, - 1, "", this), new Among("gt", - 1, - 1, "", this), new Among("kt", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
+            a_2 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lig", 0, 1, "", this), new Among("els", - 1, 1, "", this), new Among("fullt", - 1, 3, "", this), new Among("l\u00F6st", - 1, 2, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (24), (char) (0), (char) (32)};
+        private static readonly char[] g_s_ending = new char[]{(char) (119), (char) (127), (char) (149)};
+        
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(SwedishStemmer other)
+        {
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            // (, line 26
+            I_p1 = limit;
+            // goto, line 30
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 246)))
+                    {
+                        goto lab1_brk;
+                    }
+                    cursor = v_1;
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_1;
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                cursor = v_1;
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 30
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 246)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 30
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 246)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 30
-			I_p1 = cursor;
-			// try, line 31
-			do 
-			{
-				// (, line 31
-				if (!(I_p1 < 3))
-				{
-					goto lab4_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            
+            // setmark p1, line 30
+            I_p1 = cursor;
+            // try, line 31
+            do 
+            {
+                // (, line 31
+                if (!(I_p1 < 3))
+                {
+                    goto lab4_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_main_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 36
-			// setlimit, line 37
-			v_1 = limit - cursor;
-			// tomark, line 37
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 37
-			// [, line 37
-			ket = cursor;
-			// substring, line 37
-			among_var = find_among_b(a_0, 37);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 37
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 44
-					// delete, line 44
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 46
-					if (!(in_grouping_b(g_s_ending, 98, 121)))
-					{
-						return false;
-					}
-					// delete, line 46
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_consonant_pair()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// setlimit, line 50
-			v_1 = limit - cursor;
-			// tomark, line 50
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 50
-			// and, line 52
-			v_3 = limit - cursor;
-			// among, line 51
-			if (find_among_b(a_1, 7) == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			cursor = limit - v_3;
-			// (, line 52
-			// [, line 52
-			ket = cursor;
-			// next, line 52
-			if (cursor <= limit_backward)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			cursor--;
-			// ], line 52
-			bra = cursor;
-			// delete, line 52
-			slice_del();
-			limit_backward = v_2;
-			return true;
-		}
-		
-		private bool r_other_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// setlimit, line 55
-			v_1 = limit - cursor;
-			// tomark, line 55
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 55
-			// [, line 56
-			ket = cursor;
-			// substring, line 56
-			among_var = find_among_b(a_2, 5);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 56
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					limit_backward = v_2;
-					return false;
-				
-				case 1: 
-					// (, line 57
-					// delete, line 57
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 58
-					// <-, line 58
-					slice_from("l\u00F6s");
-					break;
-				
-				case 3: 
-					// (, line 59
-					// <-, line 59
-					slice_from("full");
-					break;
-				}
-			limit_backward = v_2;
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 64
-			// do, line 66
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 66
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        private bool r_main_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 36
+            // setlimit, line 37
+            v_1 = limit - cursor;
+            // tomark, line 37
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 37
+            // [, line 37
+            ket = cursor;
+            // substring, line 37
+            among_var = find_among_b(a_0, 37);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 37
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 44
+                    // delete, line 44
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 46
+                    if (!(in_grouping_b(g_s_ending, 98, 121)))
+                    {
+                        return false;
+                    }
+                    // delete, line 46
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_consonant_pair()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // setlimit, line 50
+            v_1 = limit - cursor;
+            // tomark, line 50
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 50
+            // and, line 52
+            v_3 = limit - cursor;
+            // among, line 51
+            if (find_among_b(a_1, 7) == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            cursor = limit - v_3;
+            // (, line 52
+            // [, line 52
+            ket = cursor;
+            // next, line 52
+            if (cursor <= limit_backward)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            cursor--;
+            // ], line 52
+            bra = cursor;
+            // delete, line 52
+            slice_del();
+            limit_backward = v_2;
+            return true;
+        }
+        
+        private bool r_other_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // setlimit, line 55
+            v_1 = limit - cursor;
+            // tomark, line 55
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 55
+            // [, line 56
+            ket = cursor;
+            // substring, line 56
+            among_var = find_among_b(a_2, 5);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 56
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    limit_backward = v_2;
+                    return false;
+                
+                case 1: 
+                    // (, line 57
+                    // delete, line 57
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 58
+                    // <-, line 58
+                    slice_from("l\u00F6s");
+                    break;
+                
+                case 3: 
+                    // (, line 59
+                    // <-, line 59
+                    slice_from("full");
+                    break;
+                }
+            limit_backward = v_2;
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 64
+            // do, line 66
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 66
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// backwards, line 67
-			limit_backward = cursor; cursor = limit;
-			// (, line 67
-			// do, line 68
-			v_2 = limit - cursor;
-			do 
-			{
-				// call main_suffix, line 68
-				if (!r_main_suffix())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // backwards, line 67
+            limit_backward = cursor; cursor = limit;
+            // (, line 67
+            // do, line 68
+            v_2 = limit - cursor;
+            do 
+            {
+                // call main_suffix, line 68
+                if (!r_main_suffix())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 69
-			v_3 = limit - cursor;
-			do 
-			{
-				// call consonant_pair, line 69
-				if (!r_consonant_pair())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_2;
+            // do, line 69
+            v_3 = limit - cursor;
+            do 
+            {
+                // call consonant_pair, line 69
+                if (!r_consonant_pair())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 70
-			v_4 = limit - cursor;
-			do 
-			{
-				// call other_suffix, line 70
-				if (!r_other_suffix())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            // do, line 70
+            v_4 = limit - cursor;
+            do 
+            {
+                // call other_suffix, line 70
+                if (!r_other_suffix())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = limit - v_4;
-			cursor = limit_backward; return true;
-		}
-	}
+            
+            cursor = limit - v_4;
+            cursor = limit_backward; return true;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
index 403647f..29a0d26 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/TurkishStemmer.cs
@@ -56,205 +56,205 @@ using Among = SF.Snowball.Among;
 using SnowballProgram = SF.Snowball.SnowballProgram;
 namespace SF.Snowball.Ext
 {
-    	/* Generated class implementing code defined by a snowball script.
-	*
-	*/
+        /* Generated class implementing code defined by a snowball script.
+    *
+    */
     public class TurkishStemmer : SnowballProgram
     {
 
         public TurkishStemmer()
         {
             a_0 = new Among[] {
-				new Among("m", -1, -1, "", null),
-				new Among("n", -1, -1, "", null),
-				new Among("miz", -1, -1, "", null),
-				new Among("niz", -1, -1, "", null),
-				new Among("muz", -1, -1, "", null),
-				new Among("nuz", -1, -1, "", null),
-				new Among("m\u00FCz", -1, -1, "", null),
-				new Among("n\u00FCz", -1, -1, "", null),
-				new Among("m\u0131z", -1, -1, "", null),
-				new Among("n\u0131z", -1, -1, "", null)
-			};
+                new Among("m", -1, -1, "", null),
+                new Among("n", -1, -1, "", null),
+                new Among("miz", -1, -1, "", null),
+                new Among("niz", -1, -1, "", null),
+                new Among("muz", -1, -1, "", null),
+                new Among("nuz", -1, -1, "", null),
+                new Among("m\u00FCz", -1, -1, "", null),
+                new Among("n\u00FCz", -1, -1, "", null),
+                new Among("m\u0131z", -1, -1, "", null),
+                new Among("n\u0131z", -1, -1, "", null)
+            };
 
             a_1 = new Among[] {
-				new Among("leri", -1, -1, "", null),
-				new Among("lar\u0131", -1, -1, "", null)
-			};
+                new Among("leri", -1, -1, "", null),
+                new Among("lar\u0131", -1, -1, "", null)
+            };
 
             a_2 = new Among[] {
-				new Among("ni", -1, -1, "", null),
-				new Among("nu", -1, -1, "", null),
-				new Among("n\u00FC", -1, -1, "", null),
-				new Among("n\u0131", -1, -1, "", null)
-			};
+                new Among("ni", -1, -1, "", null),
+                new Among("nu", -1, -1, "", null),
+                new Among("n\u00FC", -1, -1, "", null),
+                new Among("n\u0131", -1, -1, "", null)
+            };
 
             a_3 = new Among[] {
-				new Among("in", -1, -1, "", null),
-				new Among("un", -1, -1, "", null),
-				new Among("\u00FCn", -1, -1, "", null),
-				new Among("\u0131n", -1, -1, "", null)
-			};
+                new Among("in", -1, -1, "", null),
+                new Among("un", -1, -1, "", null),
+                new Among("\u00FCn", -1, -1, "", null),
+                new Among("\u0131n", -1, -1, "", null)
+            };
 
             a_4 = new Among[] {
-				new Among("a", -1, -1, "", null),
-				new Among("e", -1, -1, "", null)
-			};
+                new Among("a", -1, -1, "", null),
+                new Among("e", -1, -1, "", null)
+            };
 
             a_5 = new Among[] {
-				new Among("na", -1, -1, "", null),
-				new Among("ne", -1, -1, "", null)
-			};
+                new Among("na", -1, -1, "", null),
+                new Among("ne", -1, -1, "", null)
+            };
 
             a_6 = new Among[] {
-				new Among("da", -1, -1, "", null),
-				new Among("ta", -1, -1, "", null),
-				new Among("de", -1, -1, "", null),
-				new Among("te", -1, -1, "", null)
-			};
+                new Among("da", -1, -1, "", null),
+                new Among("ta", -1, -1, "", null),
+                new Among("de", -1, -1, "", null),
+                new Among("te", -1, -1, "", null)
+            };
 
             a_7 = new Among[] {
-				new Among("nda", -1, -1, "", null),
-				new Among("nde", -1, -1, "", null)
-			};
+                new Among("nda", -1, -1, "", null),
+                new Among("nde", -1, -1, "", null)
+            };
 
             a_8 = new Among[] {
-				new Among("dan", -1, -1, "", null),
-				new Among("tan", -1, -1, "", null),
-				new Among("den", -1, -1, "", null),
-				new Among("ten", -1, -1, "", null)
-			};
+                new Among("dan", -1, -1, "", null),
+                new Among("tan", -1, -1, "", null),
+                new Among("den", -1, -1, "", null),
+                new Among("ten", -1, -1, "", null)
+            };
 
             a_9 = new Among[] {
-				new Among("ndan", -1, -1, "", null),
-				new Among("nden", -1, -1, "", null)
-			};
+                new Among("ndan", -1, -1, "", null),
+                new Among("nden", -1, -1, "", null)
+            };
 
             a_10 = new Among[] {
-				new Among("la", -1, -1, "", null),
-				new Among("le", -1, -1, "", null)
-			};
+                new Among("la", -1, -1, "", null),
+                new Among("le", -1, -1, "", null)
+            };
 
             a_11 = new Among[] {
-				new Among("ca", -1, -1, "", null),
-				new Among("ce", -1, -1, "", null)
-			};
+                new Among("ca", -1, -1, "", null),
+                new Among("ce", -1, -1, "", null)
+            };
 
             a_12 = new Among[] {
-				new Among("im", -1, -1, "", null),
-				new Among("um", -1, -1, "", null),
-				new Among("\u00FCm", -1, -1, "", null),
-				new Among("\u0131m", -1, -1, "", null)
-			};
+                new Among("im", -1, -1, "", null),
+                new Among("um", -1, -1, "", null),
+                new Among("\u00FCm", -1, -1, "", null),
+                new Among("\u0131m", -1, -1, "", null)
+            };
 
             a_13 = new Among[] {
-				new Among("sin", -1, -1, "", null),
-				new Among("sun", -1, -1, "", null),
-				new Among("s\u00FCn", -1, -1, "", null),
-				new Among("s\u0131n", -1, -1, "", null)
-			};
+                new Among("sin", -1, -1, "", null),
+                new Among("sun", -1, -1, "", null),
+                new Among("s\u00FCn", -1, -1, "", null),
+                new Among("s\u0131n", -1, -1, "", null)
+            };
 
             a_14 = new Among[] {
-				new Among("iz", -1, -1, "", null),
-				new Among("uz", -1, -1, "", null),
-				new Among("\u00FCz", -1, -1, "", null),
-				new Among("\u0131z", -1, -1, "", null)
-			};
+                new Among("iz", -1, -1, "", null),
+                new Among("uz", -1, -1, "", null),
+                new Among("\u00FCz", -1, -1, "", null),
+                new Among("\u0131z", -1, -1, "", null)
+            };
 
             a_15 = new Among[] {
-				new Among("siniz", -1, -1, "", null),
-				new Among("sunuz", -1, -1, "", null),
-				new Among("s\u00FCn\u00FCz", -1, -1, "", null),
-				new Among("s\u0131n\u0131z", -1, -1, "", null)
-			};
+                new Among("siniz", -1, -1, "", null),
+                new Among("sunuz", -1, -1, "", null),
+                new Among("s\u00FCn\u00FCz", -1, -1, "", null),
+                new Among("s\u0131n\u0131z", -1, -1, "", null)
+            };
 
             a_16 = new Among[] {
-				new Among("lar", -1, -1, "", null),
-				new Among("ler", -1, -1, "", null)
-			};
+                new Among("lar", -1, -1, "", null),
+                new Among("ler", -1, -1, "", null)
+            };
 
             a_17 = new Among[] {
-				new Among("niz", -1, -1, "", null),
-				new Among("nuz", -1, -1, "", null),
-				new Among("n\u00FCz", -1, -1, "", null),
-				new Among("n\u0131z", -1, -1, "", null)
-			};
+                new Among("niz", -1, -1, "", null),
+                new Among("nuz", -1, -1, "", null),
+                new Among("n\u00FCz", -1, -1, "", null),
+                new Among("n\u0131z", -1, -1, "", null)
+            };
 
             a_18 = new Among[] {
-				new Among("dir", -1, -1, "", null),
-				new Among("tir", -1, -1, "", null),
-				new Among("dur", -1, -1, "", null),
-				new Among("tur", -1, -1, "", null),
-				new Among("d\u00FCr", -1, -1, "", null),
-				new Among("t\u00FCr", -1, -1, "", null),
-				new Among("d\u0131r", -1, -1, "", null),
-				new Among("t\u0131r", -1, -1, "", null)
-			};
+                new Among("dir", -1, -1, "", null),
+                new Among("tir", -1, -1, "", null),
+                new Among("dur", -1, -1, "", null),
+                new Among("tur", -1, -1, "", null),
+                new Among("d\u00FCr", -1, -1, "", null),
+                new Among("t\u00FCr", -1, -1, "", null),
+                new Among("d\u0131r", -1, -1, "", null),
+                new Among("t\u0131r", -1, -1, "", null)
+            };
 
             a_19 = new Among[] {
-				new Among("cas\u0131na", -1, -1, "", null),
-				new Among("cesine", -1, -1, "", null)
-			};
+                new Among("cas\u0131na", -1, -1, "", null),
+                new Among("cesine", -1, -1, "", null)
+            };
 
             a_20 = new Among[] {
-				new Among("di", -1, -1, "", null),
-				new Among("ti", -1, -1, "", null),
-				new Among("dik", -1, -1, "", null),
-				new Among("tik", -1, -1, "", null),
-				new Among("duk", -1, -1, "", null),
-				new Among("tuk", -1, -1, "", null),
-				new Among("d\u00FCk", -1, -1, "", null),
-				new Among("t\u00FCk", -1, -1, "", null),
-				new Among("d\u0131k", -1, -1, "", null),
-				new Among("t\u0131k", -1, -1, "", null),
-				new Among("dim", -1, -1, "", null),
-				new Among("tim", -1, -1, "", null),
-				new Among("dum", -1, -1, "", null),
-				new Among("tum", -1, -1, "", null),
-				new Among("d\u00FCm", -1, -1, "", null),
-				new Among("t\u00FCm", -1, -1, "", null),
-				new Among("d\u0131m", -1, -1, "", null),
-				new Among("t\u0131m", -1, -1, "", null),
-				new Among("din", -1, -1, "", null),
-				new Among("tin", -1, -1, "", null),
-				new Among("dun", -1, -1, "", null),
-				new Among("tun", -1, -1, "", null),
-				new Among("d\u00FCn", -1, -1, "", null),
-				new Among("t\u00FCn", -1, -1, "", null),
-				new Among("d\u0131n", -1, -1, "", null),
-				new Among("t\u0131n", -1, -1, "", null),
-				new Among("du", -1, -1, "", null),
-				new Among("tu", -1, -1, "", null),
-				new Among("d\u00FC", -1, -1, "", null),
-				new Among("t\u00FC", -1, -1, "", null),
-				new Among("d\u0131", -1, -1, "", null),
-				new Among("t\u0131", -1, -1, "", null)
-			};
+                new Among("di", -1, -1, "", null),
+                new Among("ti", -1, -1, "", null),
+                new Among("dik", -1, -1, "", null),
+                new Among("tik", -1, -1, "", null),
+                new Among("duk", -1, -1, "", null),
+                new Among("tuk", -1, -1, "", null),
+                new Among("d\u00FCk", -1, -1, "", null),
+                new Among("t\u00FCk", -1, -1, "", null),
+                new Among("d\u0131k", -1, -1, "", null),
+                new Among("t\u0131k", -1, -1, "", null),
+                new Among("dim", -1, -1, "", null),
+                new Among("tim", -1, -1, "", null),
+                new Among("dum", -1, -1, "", null),
+                new Among("tum", -1, -1, "", null),
+                new Among("d\u00FCm", -1, -1, "", null),
+                new Among("t\u00FCm", -1, -1, "", null),
+                new Among("d\u0131m", -1, -1, "", null),
+                new Among("t\u0131m", -1, -1, "", null),
+                new Among("din", -1, -1, "", null),
+                new Among("tin", -1, -1, "", null),
+                new Among("dun", -1, -1, "", null),
+                new Among("tun", -1, -1, "", null),
+                new Among("d\u00FCn", -1, -1, "", null),
+                new Among("t\u00FCn", -1, -1, "", null),
+                new Among("d\u0131n", -1, -1, "", null),
+                new Among("t\u0131n", -1, -1, "", null),
+                new Among("du", -1, -1, "", null),
+                new Among("tu", -1, -1, "", null),
+                new Among("d\u00FC", -1, -1, "", null),
+                new Among("t\u00FC", -1, -1, "", null),
+                new Among("d\u0131", -1, -1, "", null),
+                new Among("t\u0131", -1, -1, "", null)
+            };
 
             a_21 = new Among[] {
-				new Among("sa", -1, -1, "", null),
-				new Among("se", -1, -1, "", null),
-				new Among("sak", -1, -1, "", null),
-				new Among("sek", -1, -1, "", null),
-				new Among("sam", -1, -1, "", null),
-				new Among("sem", -1, -1, "", null),
-				new Among("san", -1, -1, "", null),
-				new Among("sen", -1, -1, "", null)
-			};
+                new Among("sa", -1, -1, "", null),
+                new Among("se", -1, -1, "", null),
+                new Among("sak", -1, -1, "", null),
+                new Among("sek", -1, -1, "", null),
+                new Among("sam", -1, -1, "", null),
+                new Among("sem", -1, -1, "", null),
+                new Among("san", -1, -1, "", null),
+                new Among("sen", -1, -1, "", null)
+            };
 
             a_22 = new Among[] {
-				new Among("mi\u015F", -1, -1, "", null),
-				new Among("mu\u015F", -1, -1, "", null),
-				new Among("m\u00FC\u015F", -1, -1, "", null),
-				new Among("m\u0131\u015F", -1, -1, "", null)
-			};
+                new Among("mi\u015F", -1, -1, "", null),
+                new Among("mu\u015F", -1, -1, "", null),
+                new Among("m\u00FC\u015F", -1, -1, "", null),
+                new Among("m\u0131\u015F", -1, -1, "", null)
+            };
 
             a_23 = new Among[] {
-				new Among("b", -1, 1, "", null),
-				new Among("c", -1, 2, "", null),
-				new Among("d", -1, 3, "", null),
-				new Among("\u011F", -1, 4, "", null)
-			};
+                new Among("b", -1, 1, "", null),
+                new Among("c", -1, 2, "", null),
+                new Among("d", -1, 3, "", null),
+                new Among("\u011F", -1, 4, "", null)
+            };
 
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/SnowballProgram.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/SnowballProgram.cs b/src/contrib/Snowball/SF/Snowball/SnowballProgram.cs
index e3b7fa1..b181dde 100644
--- a/src/contrib/Snowball/SF/Snowball/SnowballProgram.cs
+++ b/src/contrib/Snowball/SF/Snowball/SnowballProgram.cs
@@ -19,33 +19,33 @@ using System.Text;
 
 namespace SF.Snowball
 {
-	/// <summary>
-	/// This is the rev 500 of the snowball SVN trunk,
-	/// but modified:
-	/// made abstract and introduced abstract method stem to avoid expensive reflection in filter class
-	/// </summary>
-	public abstract class SnowballProgram
-	{
-		protected internal SnowballProgram()
-		{
-			current = new System.Text.StringBuilder();
-			SetCurrent("");
-		}
+    /// <summary>
+    /// This is the rev 500 of the snowball SVN trunk,
+    /// but modified:
+    /// made abstract and introduced abstract method stem to avoid expensive reflection in filter class
+    /// </summary>
+    public abstract class SnowballProgram
+    {
+        protected internal SnowballProgram()
+        {
+            current = new System.Text.StringBuilder();
+            SetCurrent("");
+        }
 
-	    public abstract bool Stem();
+        public abstract bool Stem();
 
-		/// <summary> Set the current string.</summary>
-		public virtual void  SetCurrent(System.String value)
-		{
-			//// current.Replace(current.ToString(0, current.Length - 0), value_Renamed, 0, current.Length - 0);
+        /// <summary> Set the current string.</summary>
+        public virtual void  SetCurrent(System.String value)
+        {
+            //// current.Replace(current.ToString(0, current.Length - 0), value_Renamed, 0, current.Length - 0);
             current.Remove(0, current.Length);
             current.Append(value);
-			cursor = 0;
-			limit = current.Length;
-			limit_backward = 0;
-			bra = cursor;
-			ket = limit;
-		}
+            cursor = 0;
+            limit = current.Length;
+            limit_backward = 0;
+            bra = cursor;
+            ket = limit;
+        }
 
         /// <summary> Get the current string.</summary>
         virtual public System.String GetCurrent()
@@ -61,458 +61,458 @@ namespace SF.Snowball
             return result;
         }
 
-		// current string
-		protected internal System.Text.StringBuilder current;
-		
-		protected internal int cursor;
-		protected internal int limit;
-		protected internal int limit_backward;
-		protected internal int bra;
-		protected internal int ket;
-		
-		protected internal virtual void  copy_from(SnowballProgram other)
-		{
-			current = other.current;
-			cursor = other.cursor;
-			limit = other.limit;
-			limit_backward = other.limit_backward;
-			bra = other.bra;
-			ket = other.ket;
-		}
-		
-		protected internal virtual bool in_grouping(char[] s, int min, int max)
-		{
-			if (cursor >= limit)
-				return false;
-			char ch = current[cursor];
-			if (ch > max || ch < min)
-				return false;
-			ch -= (char) (min);
-			if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
-				return false;
-			cursor++;
-			return true;
-		}
-		
-		protected internal virtual bool in_grouping_b(char[] s, int min, int max)
-		{
-			if (cursor <= limit_backward)
-				return false;
-			char ch = current[cursor - 1];
-			if (ch > max || ch < min)
-				return false;
-			ch -= (char) (min);
-			if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
-				return false;
-			cursor--;
-			return true;
-		}
-		
-		protected internal virtual bool out_grouping(char[] s, int min, int max)
-		{
-			if (cursor >= limit)
-				return false;
-			char ch = current[cursor];
-			if (ch > max || ch < min)
-			{
-				cursor++;
-				return true;
-			}
-			ch -= (char) (min);
-			if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
-			{
-				cursor++;
-				return true;
-			}
-			return false;
-		}
-		
-		protected internal virtual bool out_grouping_b(char[] s, int min, int max)
-		{
-			if (cursor <= limit_backward)
-				return false;
-			char ch = current[cursor - 1];
-			if (ch > max || ch < min)
-			{
-				cursor--;
-				return true;
-			}
-			ch -= (char) (min);
-			if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
-			{
-				cursor--;
-				return true;
-			}
-			return false;
-		}
-		
-		protected internal virtual bool in_range(int min, int max)
-		{
-			if (cursor >= limit)
-				return false;
-			char ch = current[cursor];
-			if (ch > max || ch < min)
-				return false;
-			cursor++;
-			return true;
-		}
-		
-		protected internal virtual bool in_range_b(int min, int max)
-		{
-			if (cursor <= limit_backward)
-				return false;
-			char ch = current[cursor - 1];
-			if (ch > max || ch < min)
-				return false;
-			cursor--;
-			return true;
-		}
-		
-		protected internal virtual bool out_range(int min, int max)
-		{
-			if (cursor >= limit)
-				return false;
-			char ch = current[cursor];
-			if (!(ch > max || ch < min))
-				return false;
-			cursor++;
-			return true;
-		}
-		
-		protected internal virtual bool out_range_b(int min, int max)
-		{
-			if (cursor <= limit_backward)
-				return false;
-			char ch = current[cursor - 1];
-			if (!(ch > max || ch < min))
-				return false;
-			cursor--;
-			return true;
-		}
-		
-		protected internal virtual bool eq_s(int s_size, System.String s)
-		{
-			if (limit - cursor < s_size)
-				return false;
-			int i;
-			for (i = 0; i != s_size; i++)
-			{
-				if (current[cursor + i] != s[i])
-					return false;
-			}
-			cursor += s_size;
-			return true;
-		}
-		
-		protected internal virtual bool eq_s_b(int s_size, System.String s)
-		{
-			if (cursor - limit_backward < s_size)
-				return false;
-			int i;
-			for (i = 0; i != s_size; i++)
-			{
-				if (current[cursor - s_size + i] != s[i])
-					return false;
-			}
-			cursor -= s_size;
-			return true;
-		}
-		
-		protected internal virtual bool eq_v(System.Text.StringBuilder s)
-		{
-			return eq_s(s.Length, s.ToString());
-		}
-		
-		protected internal virtual bool eq_v_b(System.Text.StringBuilder s)
-		{
-			return eq_s_b(s.Length, s.ToString());
-		}
-		
-		protected internal virtual int find_among(Among[] v, int v_size)
-		{
-			int i = 0;
-			int j = v_size;
-			
-			int c = cursor;
-			int l = limit;
-			
-			int common_i = 0;
-			int common_j = 0;
-			
-			bool first_key_inspected = false;
-			
-			while (true)
-			{
-				int k = i + ((j - i) >> 1);
-				int diff = 0;
-				int common = common_i < common_j?common_i:common_j; // smaller
-				Among w = v[k];
-				int i2;
-				for (i2 = common; i2 < w.s_size; i2++)
-				{
-					if (c + common == l)
-					{
-						diff = - 1;
-						break;
-					}
-					diff = current[c + common] - w.s[i2];
-					if (diff != 0)
-						break;
-					common++;
-				}
-				if (diff < 0)
-				{
-					j = k;
-					common_j = common;
-				}
-				else
-				{
-					i = k;
-					common_i = common;
-				}
-				if (j - i <= 1)
-				{
-					if (i > 0)
-						break; // v->s has been inspected
-					if (j == i)
-						break; // only one item in v
-					
-					// - but now we need to go round once more to get
-					// v->s inspected. This looks messy, but is actually
-					// the optimal approach.
-					
-					if (first_key_inspected)
-						break;
-					first_key_inspected = true;
-				}
-			}
-			while (true)
-			{
-				Among w = v[i];
-				if (common_i >= w.s_size)
-				{
-					cursor = c + w.s_size;
-					if (w.method == null)
-						return w.result;
-					bool res;
-					try
-					{
-						System.Object resobj = w.method.Invoke(w.methodobject, (System.Object[]) new System.Object[0]);
-						// {{Aroush}} UPGRADE_TODO: The equivalent in .NET for method 'java.lang.Object.toString' may return a different value. 'ms-help://MS.VSCC.2003/commoner/redir/redirect.htm?keyword="jlca1043_3"'
-						res = resobj.ToString().Equals("true");
-					}
-					catch (System.Reflection.TargetInvocationException)
-					{
-						res = false;
-						// FIXME - debug message
-					}
-					catch (System.UnauthorizedAccessException)
-					{
-						res = false;
-						// FIXME - debug message
-					}
-					cursor = c + w.s_size;
-					if (res)
-						return w.result;
-				}
-				i = w.substring_i;
-				if (i < 0)
-					return 0;
-			}
-		}
-		
-		// find_among_b is for backwards processing. Same comments apply
-		protected internal virtual int find_among_b(Among[] v, int v_size)
-		{
-			int i = 0;
-			int j = v_size;
-			
-			int c = cursor;
-			int lb = limit_backward;
-			
-			int common_i = 0;
-			int common_j = 0;
-			
-			bool first_key_inspected = false;
-			
-			while (true)
-			{
-				int k = i + ((j - i) >> 1);
-				int diff = 0;
-				int common = common_i < common_j?common_i:common_j;
-				Among w = v[k];
-				int i2;
-				for (i2 = w.s_size - 1 - common; i2 >= 0; i2--)
-				{
-					if (c - common == lb)
-					{
-						diff = - 1;
-						break;
-					}
-					diff = current[c - 1 - common] - w.s[i2];
-					if (diff != 0)
-						break;
-					common++;
-				}
-				if (diff < 0)
-				{
-					j = k;
-					common_j = common;
-				}
-				else
-				{
-					i = k;
-					common_i = common;
-				}
-				if (j - i <= 1)
-				{
-					if (i > 0)
-						break;
-					if (j == i)
-						break;
-					if (first_key_inspected)
-						break;
-					first_key_inspected = true;
-				}
-			}
-			while (true)
-			{
-				Among w = v[i];
-				if (common_i >= w.s_size)
-				{
-					cursor = c - w.s_size;
-					if (w.method == null)
-						return w.result;
-					
-					bool res;
-					try
-					{
-						System.Object resobj = w.method.Invoke(w.methodobject, (System.Object[]) new System.Object[0]);
-						// {{Aroush}} UPGRADE_TODO: The equivalent in .NET for method 'java.lang.Object.toString' may return a different value. 'ms-help://MS.VSCC.2003/commoner/redir/redirect.htm?keyword="jlca1043_3"'
-						res = resobj.ToString().Equals("true");
-					}
-					catch (System.Reflection.TargetInvocationException)
-					{
-						res = false;
-						// FIXME - debug message
-					}
-					catch (System.UnauthorizedAccessException)
-					{
-						res = false;
-						// FIXME - debug message
-					}
-					cursor = c - w.s_size;
-					if (res)
-						return w.result;
-				}
-				i = w.substring_i;
-				if (i < 0)
-					return 0;
-			}
-		}
-		
-		/* to replace chars between c_bra and c_ket in current by the
-		* chars in s.
-		*/
-		protected internal virtual int replace_s(int c_bra, int c_ket, System.String s)
-		{
-			int adjustment = s.Length - (c_ket - c_bra);
+        // current string
+        protected internal System.Text.StringBuilder current;
+        
+        protected internal int cursor;
+        protected internal int limit;
+        protected internal int limit_backward;
+        protected internal int bra;
+        protected internal int ket;
+        
+        protected internal virtual void  copy_from(SnowballProgram other)
+        {
+            current = other.current;
+            cursor = other.cursor;
+            limit = other.limit;
+            limit_backward = other.limit_backward;
+            bra = other.bra;
+            ket = other.ket;
+        }
+        
+        protected internal virtual bool in_grouping(char[] s, int min, int max)
+        {
+            if (cursor >= limit)
+                return false;
+            char ch = current[cursor];
+            if (ch > max || ch < min)
+                return false;
+            ch -= (char) (min);
+            if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
+                return false;
+            cursor++;
+            return true;
+        }
+        
+        protected internal virtual bool in_grouping_b(char[] s, int min, int max)
+        {
+            if (cursor <= limit_backward)
+                return false;
+            char ch = current[cursor - 1];
+            if (ch > max || ch < min)
+                return false;
+            ch -= (char) (min);
+            if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
+                return false;
+            cursor--;
+            return true;
+        }
+        
+        protected internal virtual bool out_grouping(char[] s, int min, int max)
+        {
+            if (cursor >= limit)
+                return false;
+            char ch = current[cursor];
+            if (ch > max || ch < min)
+            {
+                cursor++;
+                return true;
+            }
+            ch -= (char) (min);
+            if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
+            {
+                cursor++;
+                return true;
+            }
+            return false;
+        }
+        
+        protected internal virtual bool out_grouping_b(char[] s, int min, int max)
+        {
+            if (cursor <= limit_backward)
+                return false;
+            char ch = current[cursor - 1];
+            if (ch > max || ch < min)
+            {
+                cursor--;
+                return true;
+            }
+            ch -= (char) (min);
+            if ((s[ch >> 3] & (0x1 << (ch & 0x7))) == 0)
+            {
+                cursor--;
+                return true;
+            }
+            return false;
+        }
+        
+        protected internal virtual bool in_range(int min, int max)
+        {
+            if (cursor >= limit)
+                return false;
+            char ch = current[cursor];
+            if (ch > max || ch < min)
+                return false;
+            cursor++;
+            return true;
+        }
+        
+        protected internal virtual bool in_range_b(int min, int max)
+        {
+            if (cursor <= limit_backward)
+                return false;
+            char ch = current[cursor - 1];
+            if (ch > max || ch < min)
+                return false;
+            cursor--;
+            return true;
+        }
+        
+        protected internal virtual bool out_range(int min, int max)
+        {
+            if (cursor >= limit)
+                return false;
+            char ch = current[cursor];
+            if (!(ch > max || ch < min))
+                return false;
+            cursor++;
+            return true;
+        }
+        
+        protected internal virtual bool out_range_b(int min, int max)
+        {
+            if (cursor <= limit_backward)
+                return false;
+            char ch = current[cursor - 1];
+            if (!(ch > max || ch < min))
+                return false;
+            cursor--;
+            return true;
+        }
+        
+        protected internal virtual bool eq_s(int s_size, System.String s)
+        {
+            if (limit - cursor < s_size)
+                return false;
+            int i;
+            for (i = 0; i != s_size; i++)
+            {
+                if (current[cursor + i] != s[i])
+                    return false;
+            }
+            cursor += s_size;
+            return true;
+        }
+        
+        protected internal virtual bool eq_s_b(int s_size, System.String s)
+        {
+            if (cursor - limit_backward < s_size)
+                return false;
+            int i;
+            for (i = 0; i != s_size; i++)
+            {
+                if (current[cursor - s_size + i] != s[i])
+                    return false;
+            }
+            cursor -= s_size;
+            return true;
+        }
+        
+        protected internal virtual bool eq_v(System.Text.StringBuilder s)
+        {
+            return eq_s(s.Length, s.ToString());
+        }
+        
+        protected internal virtual bool eq_v_b(System.Text.StringBuilder s)
+        {
+            return eq_s_b(s.Length, s.ToString());
+        }
+        
+        protected internal virtual int find_among(Among[] v, int v_size)
+        {
+            int i = 0;
+            int j = v_size;
+            
+            int c = cursor;
+            int l = limit;
+            
+            int common_i = 0;
+            int common_j = 0;
+            
+            bool first_key_inspected = false;
+            
+            while (true)
+            {
+                int k = i + ((j - i) >> 1);
+                int diff = 0;
+                int common = common_i < common_j?common_i:common_j; // smaller
+                Among w = v[k];
+                int i2;
+                for (i2 = common; i2 < w.s_size; i2++)
+                {
+                    if (c + common == l)
+                    {
+                        diff = - 1;
+                        break;
+                    }
+                    diff = current[c + common] - w.s[i2];
+                    if (diff != 0)
+                        break;
+                    common++;
+                }
+                if (diff < 0)
+                {
+                    j = k;
+                    common_j = common;
+                }
+                else
+                {
+                    i = k;
+                    common_i = common;
+                }
+                if (j - i <= 1)
+                {
+                    if (i > 0)
+                        break; // v->s has been inspected
+                    if (j == i)
+                        break; // only one item in v
+                    
+                    // - but now we need to go round once more to get
+                    // v->s inspected. This looks messy, but is actually
+                    // the optimal approach.
+                    
+                    if (first_key_inspected)
+                        break;
+                    first_key_inspected = true;
+                }
+            }
+            while (true)
+            {
+                Among w = v[i];
+                if (common_i >= w.s_size)
+                {
+                    cursor = c + w.s_size;
+                    if (w.method == null)
+                        return w.result;
+                    bool res;
+                    try
+                    {
+                        System.Object resobj = w.method.Invoke(w.methodobject, (System.Object[]) new System.Object[0]);
+                        // {{Aroush}} UPGRADE_TODO: The equivalent in .NET for method 'java.lang.Object.toString' may return a different value. 'ms-help://MS.VSCC.2003/commoner/redir/redirect.htm?keyword="jlca1043_3"'
+                        res = resobj.ToString().Equals("true");
+                    }
+                    catch (System.Reflection.TargetInvocationException)
+                    {
+                        res = false;
+                        // FIXME - debug message
+                    }
+                    catch (System.UnauthorizedAccessException)
+                    {
+                        res = false;
+                        // FIXME - debug message
+                    }
+                    cursor = c + w.s_size;
+                    if (res)
+                        return w.result;
+                }
+                i = w.substring_i;
+                if (i < 0)
+                    return 0;
+            }
+        }
+        
+        // find_among_b is for backwards processing. Same comments apply
+        protected internal virtual int find_among_b(Among[] v, int v_size)
+        {
+            int i = 0;
+            int j = v_size;
+            
+            int c = cursor;
+            int lb = limit_backward;
+            
+            int common_i = 0;
+            int common_j = 0;
+            
+            bool first_key_inspected = false;
+            
+            while (true)
+            {
+                int k = i + ((j - i) >> 1);
+                int diff = 0;
+                int common = common_i < common_j?common_i:common_j;
+                Among w = v[k];
+                int i2;
+                for (i2 = w.s_size - 1 - common; i2 >= 0; i2--)
+                {
+                    if (c - common == lb)
+                    {
+                        diff = - 1;
+                        break;
+                    }
+                    diff = current[c - 1 - common] - w.s[i2];
+                    if (diff != 0)
+                        break;
+                    common++;
+                }
+                if (diff < 0)
+                {
+                    j = k;
+                    common_j = common;
+                }
+                else
+                {
+                    i = k;
+                    common_i = common;
+                }
+                if (j - i <= 1)
+                {
+                    if (i > 0)
+                        break;
+                    if (j == i)
+                        break;
+                    if (first_key_inspected)
+                        break;
+                    first_key_inspected = true;
+                }
+            }
+            while (true)
+            {
+                Among w = v[i];
+                if (common_i >= w.s_size)
+                {
+                    cursor = c - w.s_size;
+                    if (w.method == null)
+                        return w.result;
+                    
+                    bool res;
+                    try
+                    {
+                        System.Object resobj = w.method.Invoke(w.methodobject, (System.Object[]) new System.Object[0]);
+                        // {{Aroush}} UPGRADE_TODO: The equivalent in .NET for method 'java.lang.Object.toString' may return a different value. 'ms-help://MS.VSCC.2003/commoner/redir/redirect.htm?keyword="jlca1043_3"'
+                        res = resobj.ToString().Equals("true");
+                    }
+                    catch (System.Reflection.TargetInvocationException)
+                    {
+                        res = false;
+                        // FIXME - debug message
+                    }
+                    catch (System.UnauthorizedAccessException)
+                    {
+                        res = false;
+                        // FIXME - debug message
+                    }
+                    cursor = c - w.s_size;
+                    if (res)
+                        return w.result;
+                }
+                i = w.substring_i;
+                if (i < 0)
+                    return 0;
+            }
+        }
+        
+        /* to replace chars between c_bra and c_ket in current by the
+        * chars in s.
+        */
+        protected internal virtual int replace_s(int c_bra, int c_ket, System.String s)
+        {
+            int adjustment = s.Length - (c_ket - c_bra);
             if (current.Length > c_bra)
-    			current.Replace(current.ToString(c_bra, c_ket - c_bra), s, c_bra, c_ket - c_bra);
+                current.Replace(current.ToString(c_bra, c_ket - c_bra), s, c_bra, c_ket - c_bra);
             else
                 current.Append(s);
-			limit += adjustment;
-			if (cursor >= c_ket)
-				cursor += adjustment;
-			else if (cursor > c_bra)
-				cursor = c_bra;
-			return adjustment;
-		}
-		
-		protected internal virtual void  slice_check()
-		{
-			if (bra < 0 || bra > ket || ket > limit || limit > current.Length)
-			// this line could be removed
-			{
-				System.Console.Error.WriteLine("faulty slice operation");
-				// FIXME: report error somehow.
-				/*
-				fprintf(stderr, "faulty slice operation:\n");
-				debug(z, -1, 0);
-				exit(1);
-				*/
-			}
-		}
-		
-		protected internal virtual void  slice_from(System.String s)
-		{
-			slice_check();
-			replace_s(bra, ket, s);
-		}
-		
-		protected internal virtual void  slice_from(System.Text.StringBuilder s)
-		{
-			slice_from(s.ToString());
-		}
-		
-		protected internal virtual void  slice_del()
-		{
-			slice_from("");
-		}
-		
-		protected internal virtual void  insert(int c_bra, int c_ket, System.String s)
-		{
-			int adjustment = replace_s(c_bra, c_ket, s);
-			if (c_bra <= bra)
-				bra += adjustment;
-			if (c_bra <= ket)
-				ket += adjustment;
-		}
-		
-		protected internal virtual void  insert(int c_bra, int c_ket, System.Text.StringBuilder s)
-		{
-			insert(c_bra, c_ket, s.ToString());
-		}
-		
-		/* Copy the slice into the supplied StringBuffer */
-		protected internal virtual System.Text.StringBuilder slice_to(System.Text.StringBuilder s)
-		{
-			slice_check();
-			int len = ket - bra;
-			//// s.Replace(s.ToString(0, s.Length - 0), current.ToString(bra, ket), 0, s.Length - 0);
-			s.Remove(0, s.Length);
+            limit += adjustment;
+            if (cursor >= c_ket)
+                cursor += adjustment;
+            else if (cursor > c_bra)
+                cursor = c_bra;
+            return adjustment;
+        }
+        
+        protected internal virtual void  slice_check()
+        {
+            if (bra < 0 || bra > ket || ket > limit || limit > current.Length)
+            // this line could be removed
+            {
+                System.Console.Error.WriteLine("faulty slice operation");
+                // FIXME: report error somehow.
+                /*
+                fprintf(stderr, "faulty slice operation:\n");
+                debug(z, -1, 0);
+                exit(1);
+                */
+            }
+        }
+        
+        protected internal virtual void  slice_from(System.String s)
+        {
+            slice_check();
+            replace_s(bra, ket, s);
+        }
+        
+        protected internal virtual void  slice_from(System.Text.StringBuilder s)
+        {
+            slice_from(s.ToString());
+        }
+        
+        protected internal virtual void  slice_del()
+        {
+            slice_from("");
+        }
+        
+        protected internal virtual void  insert(int c_bra, int c_ket, System.String s)
+        {
+            int adjustment = replace_s(c_bra, c_ket, s);
+            if (c_bra <= bra)
+                bra += adjustment;
+            if (c_bra <= ket)
+                ket += adjustment;
+        }
+        
+        protected internal virtual void  insert(int c_bra, int c_ket, System.Text.StringBuilder s)
+        {
+            insert(c_bra, c_ket, s.ToString());
+        }
+        
+        /* Copy the slice into the supplied StringBuffer */
+        protected internal virtual System.Text.StringBuilder slice_to(System.Text.StringBuilder s)
+        {
+            slice_check();
+            int len = ket - bra;
+            //// s.Replace(s.ToString(0, s.Length - 0), current.ToString(bra, ket), 0, s.Length - 0);
+            s.Remove(0, s.Length);
             s.Append(current.ToString(bra, len));
-			return s;
-		}
-		
-		protected internal virtual System.Text.StringBuilder assign_to(System.Text.StringBuilder s)
-		{
-			//// s.Replace(s.ToString(0, s.Length - 0), current.ToString(0, limit), 0, s.Length - 0);
-			s.Remove(0, s.Length);
+            return s;
+        }
+        
+        protected internal virtual System.Text.StringBuilder assign_to(System.Text.StringBuilder s)
+        {
+            //// s.Replace(s.ToString(0, s.Length - 0), current.ToString(0, limit), 0, s.Length - 0);
+            s.Remove(0, s.Length);
             s.Append(current.ToString(0, limit));
-			return s;
-		}
-		
-		/*
-		extern void debug(struct SN_env * z, int number, int line_count)
-		{   int i;
-		int limit = SIZE(z->p);
-		//if (number >= 0) printf("%3d (line %4d): '", number, line_count);
-		if (number >= 0) printf("%3d (line %4d): [%d]'", number, line_count,limit);
-		for (i = 0; i <= limit; i++)
-		{   if (z->lb == i) printf("{");
-		if (z->bra == i) printf("[");
-		if (z->c == i) printf("|");
-		if (z->ket == i) printf("]");
-		if (z->l == i) printf("}");
-		if (i < limit)
-		{   int ch = z->p[i];
-		if (ch == 0) ch = '#';
-		printf("%c", ch);
-		}
-		}
-		printf("'\n");
-		}*/
-	}
-	
+            return s;
+        }
+        
+        /*
+        extern void debug(struct SN_env * z, int number, int line_count)
+        {   int i;
+        int limit = SIZE(z->p);
+        //if (number >= 0) printf("%3d (line %4d): '", number, line_count);
+        if (number >= 0) printf("%3d (line %4d): [%d]'", number, line_count,limit);
+        for (i = 0; i <= limit; i++)
+        {   if (z->lb == i) printf("{");
+        if (z->bra == i) printf("[");
+        if (z->c == i) printf("|");
+        if (z->ket == i) printf("]");
+        if (z->l == i) printf("}");
+        if (i < limit)
+        {   int ch = z->p[i];
+        if (ch == 0) ch = '#';
+        printf("%c", ch);
+        }
+        }
+        printf("'\n");
+        }*/
+    }
+    
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/TestApp.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/TestApp.cs b/src/contrib/Snowball/SF/Snowball/TestApp.cs
index f757685..0977156 100644
--- a/src/contrib/Snowball/SF/Snowball/TestApp.cs
+++ b/src/contrib/Snowball/SF/Snowball/TestApp.cs
@@ -17,79 +17,79 @@
 using System;
 namespace SF.Snowball
 {
-	
-	public class TestApp
-	{
-		[STAThread]
-		public static void  Main(System.String[] args)
-		{
-			
-			if (args.Length < 2)
-			{
-				ExitWithUsage();
-			}
-			
-			System.Type stemClass = System.Type.GetType("SF.Snowball.Ext." + args[0] + "Stemmer");
-			SnowballProgram stemmer = (SnowballProgram) System.Activator.CreateInstance(stemClass);
-			System.Reflection.MethodInfo stemMethod = stemClass.GetMethod("stem", (new System.Type[0] == null)?new System.Type[0]:(System.Type[]) new System.Type[0]);
-			
-			System.IO.StreamReader reader;
-			reader = new System.IO.StreamReader(new System.IO.FileStream(args[1], System.IO.FileMode.Open, System.IO.FileAccess.Read), System.Text.Encoding.Default);
-			reader = new System.IO.StreamReader(reader.BaseStream, reader.CurrentEncoding);
-			
-			System.Text.StringBuilder input = new System.Text.StringBuilder();
-			
-			System.IO.Stream outstream = System.Console.OpenStandardOutput();
-			
-			if (args.Length > 2 && args[2].Equals("-o"))
-			{
-				outstream = new System.IO.FileStream(args[3], System.IO.FileMode.Create);
-			}
-			else if (args.Length > 2)
-			{
-				ExitWithUsage();
-			}
-			
-			System.IO.StreamWriter output = new System.IO.StreamWriter(outstream, System.Text.Encoding.Default);
-			output = new System.IO.StreamWriter(output.BaseStream, output.Encoding);
-			
-			int repeat = 1;
-			if (args.Length > 4)
-			{
-				repeat = System.Int32.Parse(args[4]);
-			}
-			
-			System.Object[] emptyArgs = new System.Object[0];
-			int character;
-			while ((character = reader.Read()) != - 1)
-			{
-				char ch = (char) character;
-				if (System.Char.IsWhiteSpace(ch))
-				{
-					if (input.Length > 0)
-					{
-						stemmer.SetCurrent(input.ToString());
-						for (int i = repeat; i != 0; i--)
-						{
-							stemMethod.Invoke(stemmer, (System.Object[]) emptyArgs);
-						}
-						output.Write(stemmer.GetCurrent());
-						output.Write('\n');
-						input.Remove(0, input.Length - 0);
-					}
-				}
-				else
-				{
-					input.Append(System.Char.ToLower(ch));
-				}
-			}
-			output.Flush();
-		}
-		
-		private static void  ExitWithUsage()
-		{
-			System.Console.Error.WriteLine("Usage: TestApp <stemmer name> <input file> [-o <output file>]");
-			System.Environment.Exit(1);
-		}
-	}
+    
+    public class TestApp
+    {
+        [STAThread]
+        public static void  Main(System.String[] args)
+        {
+            
+            if (args.Length < 2)
+            {
+                ExitWithUsage();
+            }
+            
+            System.Type stemClass = System.Type.GetType("SF.Snowball.Ext." + args[0] + "Stemmer");
+            SnowballProgram stemmer = (SnowballProgram) System.Activator.CreateInstance(stemClass);
+            System.Reflection.MethodInfo stemMethod = stemClass.GetMethod("stem", (new System.Type[0] == null)?new System.Type[0]:(System.Type[]) new System.Type[0]);
+            
+            System.IO.StreamReader reader;
+            reader = new System.IO.StreamReader(new System.IO.FileStream(args[1], System.IO.FileMode.Open, System.IO.FileAccess.Read), System.Text.Encoding.Default);
+            reader = new System.IO.StreamReader(reader.BaseStream, reader.CurrentEncoding);
+            
+            System.Text.StringBuilder input = new System.Text.StringBuilder();
+            
+            System.IO.Stream outstream = System.Console.OpenStandardOutput();
+            
+            if (args.Length > 2 && args[2].Equals("-o"))
+            {
+                outstream = new System.IO.FileStream(args[3], System.IO.FileMode.Create);
+            }
+            else if (args.Length > 2)
+            {
+                ExitWithUsage();
+            }
+            
+            System.IO.StreamWriter output = new System.IO.StreamWriter(outstream, System.Text.Encoding.Default);
+            output = new System.IO.StreamWriter(output.BaseStream, output.Encoding);
+            
+            int repeat = 1;
+            if (args.Length > 4)
+            {
+                repeat = System.Int32.Parse(args[4]);
+            }
+            
+            System.Object[] emptyArgs = new System.Object[0];
+            int character;
+            while ((character = reader.Read()) != - 1)
+            {
+                char ch = (char) character;
+                if (System.Char.IsWhiteSpace(ch))
+                {
+                    if (input.Length > 0)
+                    {
+                        stemmer.SetCurrent(input.ToString());
+                        for (int i = repeat; i != 0; i--)
+                        {
+                            stemMethod.Invoke(stemmer, (System.Object[]) emptyArgs);
+                        }
+                        output.Write(stemmer.GetCurrent());
+                        output.Write('\n');
+                        input.Remove(0, input.Length - 0);
+                    }
+                }
+                else
+                {
+                    input.Append(System.Char.ToLower(ch));
+                }
+            }
+            output.Flush();
+        }
+        
+        private static void  ExitWithUsage()
+        {
+            System.Console.Error.WriteLine("Usage: TestApp <stemmer name> <input file> [-o <output file>]");
+            System.Environment.Exit(1);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/BBox/AreaSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/BBox/AreaSimilarity.cs b/src/contrib/Spatial/BBox/AreaSimilarity.cs
index a4e25d8..2fd3314 100644
--- a/src/contrib/Spatial/BBox/AreaSimilarity.cs
+++ b/src/contrib/Spatial/BBox/AreaSimilarity.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -22,7 +22,7 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.BBox
 {
-	/// <summary>
+    /// <summary>
     /// The algorithm is implemented as envelope on envelope overlays rather than
     /// complex polygon on complex polygon overlays.
     /// <p/>
@@ -46,186 +46,186 @@ namespace Lucene.Net.Spatial.BBox
     ///   SpatialRankingValueSource</a>.
     ///
     /// @lucene.experimental
-	/// </summary>
-	public class AreaSimilarity : BBoxSimilarity
-	{
-	   /*
-		* Properties associated with the query envelope
-		*/
-		private readonly Rectangle queryExtent;
-		private readonly double queryArea;
-
-		private readonly double targetPower;
-		private readonly double queryPower;
-
-		public AreaSimilarity(Rectangle queryExtent, double queryPower, double targetPower)
-		{
-			this.queryExtent = queryExtent;
-			this.queryArea = queryExtent.GetArea(null);
-
-			this.queryPower = queryPower;
-			this.targetPower = targetPower;
-
-			//  if (this.qryMinX > queryExtent.getMaxX()) {
-			//    this.qryCrossedDateline = true;
-			//    this.qryArea = Math.abs(qryMaxX + 360.0 - qryMinX) * Math.abs(qryMaxY - qryMinY);
-			//  } else {
-			//    this.qryArea = Math.abs(qryMaxX - qryMinX) * Math.abs(qryMaxY - qryMinY);
-			//  }
-		}
-
-		public AreaSimilarity(Rectangle queryExtent)
-			: this(queryExtent, 2.0, 0.5)
-		{
-		}
-
-		public String GetDelimiterQueryParameters()
-		{
-			return queryExtent + ";" + queryPower + ";" + targetPower;
-		}
-
-		public double Score(Rectangle target, Explanation exp)
-		{
-			if (target == null || queryArea <= 0)
-			{
-				return 0;
-			}
-			double targetArea = target.GetArea(null);
-			if (targetArea <= 0)
-			{
-				return 0;
-			}
-			double score = 0;
-
-			double top = Math.Min(queryExtent.GetMaxY(), target.GetMaxY());
-			double bottom = Math.Max(queryExtent.GetMinY(), target.GetMinY());
-			double height = top - bottom;
-			double width = 0;
-
-			// queries that cross the date line
-			if (queryExtent.GetCrossesDateLine())
-			{
-				// documents that cross the date line
-				if (target.GetCrossesDateLine())
-				{
-					double left = Math.Max(queryExtent.GetMinX(), target.GetMinX());
-					double right = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
-					width = right + 360.0 - left;
-				}
-				else
-				{
-					double qryWestLeft = Math.Max(queryExtent.GetMinX(), target.GetMaxX());
-					double qryWestRight = Math.Min(target.GetMaxX(), 180.0);
-					double qryWestWidth = qryWestRight - qryWestLeft;
-					if (qryWestWidth > 0)
-					{
-						width = qryWestWidth;
-					}
-					else
-					{
-						double qryEastLeft = Math.Max(target.GetMaxX(), -180.0);
-						double qryEastRight = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
-						double qryEastWidth = qryEastRight - qryEastLeft;
-						if (qryEastWidth > 0)
-						{
-							width = qryEastWidth;
-						}
-					}
-				}
-			}
-			else
-			{ // queries that do not cross the date line
-
-				if (target.GetCrossesDateLine())
-				{
-					double tgtWestLeft = Math.Max(queryExtent.GetMinX(), target.GetMinX());
-					double tgtWestRight = Math.Min(queryExtent.GetMaxX(), 180.0);
-					double tgtWestWidth = tgtWestRight - tgtWestLeft;
-					if (tgtWestWidth > 0)
-					{
-						width = tgtWestWidth;
-					}
-					else
-					{
-						double tgtEastLeft = Math.Max(queryExtent.GetMinX(), -180.0);
-						double tgtEastRight = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
-						double tgtEastWidth = tgtEastRight - tgtEastLeft;
-						if (tgtEastWidth > 0)
-						{
-							width = tgtEastWidth;
-						}
-					}
-				}
-				else
-				{
-					double left = Math.Max(queryExtent.GetMinX(), target.GetMinX());
-					double right = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
-					width = right - left;
-				}
-			}
-
-
-			// calculate the score
-			if ((width > 0) && (height > 0))
-			{
-				double intersectionArea = width * height;
-				double queryRatio = intersectionArea / queryArea;
-				double targetRatio = intersectionArea / targetArea;
-				double queryFactor = Math.Pow(queryRatio, queryPower);
-				double targetFactor = Math.Pow(targetRatio, targetPower);
-				score = queryFactor * targetFactor * 10000.0;
-
-				if (exp != null)
-				{
-					//        StringBuilder sb = new StringBuilder();
-					//        sb.append("\nscore=").append(score);
-					//        sb.append("\n  query=").append();
-					//        sb.append("\n  target=").append(target.toString());
-					//        sb.append("\n  intersectionArea=").append(intersectionArea);
-					//        
-					//        sb.append(" queryArea=").append(queryArea).append(" targetArea=").append(targetArea);
-					//        sb.append("\n  queryRatio=").append(queryRatio).append(" targetRatio=").append(targetRatio);
-					//        sb.append("\n  queryFactor=").append(queryFactor).append(" targetFactor=").append(targetFactor);
-					//        sb.append(" (queryPower=").append(queryPower).append(" targetPower=").append(targetPower).append(")");
-
-					exp.Value = (float) score;
-					exp.Description = GetType().Name;
-
-					Explanation e = null;
-
-					exp.AddDetail(e = new Explanation((float)intersectionArea, "IntersectionArea"));
-					e.AddDetail(new Explanation((float)width, "width; Query: " + queryExtent));
-					e.AddDetail(new Explanation((float)height, "height; Target: " + target));
-
-					exp.AddDetail(e = new Explanation((float)queryFactor, "Query"));
-					e.AddDetail(new Explanation((float)queryArea, "area"));
-					e.AddDetail(new Explanation((float)queryRatio, "ratio"));
-					e.AddDetail(new Explanation((float)queryPower, "power"));
-
-					exp.AddDetail(e = new Explanation((float)targetFactor, "Target"));
-					e.AddDetail(new Explanation((float)targetArea, "area"));
-					e.AddDetail(new Explanation((float)targetRatio, "ratio"));
-					e.AddDetail(new Explanation((float)targetPower, "power"));
-				}
-			}
-			else if (exp != null)
-			{
-				exp.Value = 0;
-				exp.Description = "Shape does not intersect";
-			}
-			return score;
-		}
-
-		public override bool Equals(object obj)
-		{
-			var other = obj as AreaSimilarity;
-			if (other == null) return false;
-			return GetDelimiterQueryParameters().Equals(other.GetDelimiterQueryParameters());
-		}
-
-		public override int GetHashCode()
-		{
-			return GetDelimiterQueryParameters().GetHashCode();
-		} 
-	}
+    /// </summary>
+    public class AreaSimilarity : BBoxSimilarity
+    {
+       /*
+        * Properties associated with the query envelope
+        */
+        private readonly Rectangle queryExtent;
+        private readonly double queryArea;
+
+        private readonly double targetPower;
+        private readonly double queryPower;
+
+        public AreaSimilarity(Rectangle queryExtent, double queryPower, double targetPower)
+        {
+            this.queryExtent = queryExtent;
+            this.queryArea = queryExtent.GetArea(null);
+
+            this.queryPower = queryPower;
+            this.targetPower = targetPower;
+
+            //  if (this.qryMinX > queryExtent.getMaxX()) {
+            //    this.qryCrossedDateline = true;
+            //    this.qryArea = Math.abs(qryMaxX + 360.0 - qryMinX) * Math.abs(qryMaxY - qryMinY);
+            //  } else {
+            //    this.qryArea = Math.abs(qryMaxX - qryMinX) * Math.abs(qryMaxY - qryMinY);
+            //  }
+        }
+
+        public AreaSimilarity(Rectangle queryExtent)
+            : this(queryExtent, 2.0, 0.5)
+        {
+        }
+
+        public String GetDelimiterQueryParameters()
+        {
+            return queryExtent + ";" + queryPower + ";" + targetPower;
+        }
+
+        public double Score(Rectangle target, Explanation exp)
+        {
+            if (target == null || queryArea <= 0)
+            {
+                return 0;
+            }
+            double targetArea = target.GetArea(null);
+            if (targetArea <= 0)
+            {
+                return 0;
+            }
+            double score = 0;
+
+            double top = Math.Min(queryExtent.GetMaxY(), target.GetMaxY());
+            double bottom = Math.Max(queryExtent.GetMinY(), target.GetMinY());
+            double height = top - bottom;
+            double width = 0;
+
+            // queries that cross the date line
+            if (queryExtent.GetCrossesDateLine())
+            {
+                // documents that cross the date line
+                if (target.GetCrossesDateLine())
+                {
+                    double left = Math.Max(queryExtent.GetMinX(), target.GetMinX());
+                    double right = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
+                    width = right + 360.0 - left;
+                }
+                else
+                {
+                    double qryWestLeft = Math.Max(queryExtent.GetMinX(), target.GetMaxX());
+                    double qryWestRight = Math.Min(target.GetMaxX(), 180.0);
+                    double qryWestWidth = qryWestRight - qryWestLeft;
+                    if (qryWestWidth > 0)
+                    {
+                        width = qryWestWidth;
+                    }
+                    else
+                    {
+                        double qryEastLeft = Math.Max(target.GetMaxX(), -180.0);
+                        double qryEastRight = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
+                        double qryEastWidth = qryEastRight - qryEastLeft;
+                        if (qryEastWidth > 0)
+                        {
+                            width = qryEastWidth;
+                        }
+                    }
+                }
+            }
+            else
+            { // queries that do not cross the date line
+
+                if (target.GetCrossesDateLine())
+                {
+                    double tgtWestLeft = Math.Max(queryExtent.GetMinX(), target.GetMinX());
+                    double tgtWestRight = Math.Min(queryExtent.GetMaxX(), 180.0);
+                    double tgtWestWidth = tgtWestRight - tgtWestLeft;
+                    if (tgtWestWidth > 0)
+                    {
+                        width = tgtWestWidth;
+                    }
+                    else
+                    {
+                        double tgtEastLeft = Math.Max(queryExtent.GetMinX(), -180.0);
+                        double tgtEastRight = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
+                        double tgtEastWidth = tgtEastRight - tgtEastLeft;
+                        if (tgtEastWidth > 0)
+                        {
+                            width = tgtEastWidth;
+                        }
+                    }
+                }
+                else
+                {
+                    double left = Math.Max(queryExtent.GetMinX(), target.GetMinX());
+                    double right = Math.Min(queryExtent.GetMaxX(), target.GetMaxX());
+                    width = right - left;
+                }
+            }
+
+
+            // calculate the score
+            if ((width > 0) && (height > 0))
+            {
+                double intersectionArea = width * height;
+                double queryRatio = intersectionArea / queryArea;
+                double targetRatio = intersectionArea / targetArea;
+                double queryFactor = Math.Pow(queryRatio, queryPower);
+                double targetFactor = Math.Pow(targetRatio, targetPower);
+                score = queryFactor * targetFactor * 10000.0;
+
+                if (exp != null)
+                {
+                    //        StringBuilder sb = new StringBuilder();
+                    //        sb.append("\nscore=").append(score);
+                    //        sb.append("\n  query=").append();
+                    //        sb.append("\n  target=").append(target.toString());
+                    //        sb.append("\n  intersectionArea=").append(intersectionArea);
+                    //        
+                    //        sb.append(" queryArea=").append(queryArea).append(" targetArea=").append(targetArea);
+                    //        sb.append("\n  queryRatio=").append(queryRatio).append(" targetRatio=").append(targetRatio);
+                    //        sb.append("\n  queryFactor=").append(queryFactor).append(" targetFactor=").append(targetFactor);
+                    //        sb.append(" (queryPower=").append(queryPower).append(" targetPower=").append(targetPower).append(")");
+
+                    exp.Value = (float) score;
+                    exp.Description = GetType().Name;
+
+                    Explanation e = null;
+
+                    exp.AddDetail(e = new Explanation((float)intersectionArea, "IntersectionArea"));
+                    e.AddDetail(new Explanation((float)width, "width; Query: " + queryExtent));
+                    e.AddDetail(new Explanation((float)height, "height; Target: " + target));
+
+                    exp.AddDetail(e = new Explanation((float)queryFactor, "Query"));
+                    e.AddDetail(new Explanation((float)queryArea, "area"));
+                    e.AddDetail(new Explanation((float)queryRatio, "ratio"));
+                    e.AddDetail(new Explanation((float)queryPower, "power"));
+
+                    exp.AddDetail(e = new Explanation((float)targetFactor, "Target"));
+                    e.AddDetail(new Explanation((float)targetArea, "area"));
+                    e.AddDetail(new Explanation((float)targetRatio, "ratio"));
+                    e.AddDetail(new Explanation((float)targetPower, "power"));
+                }
+            }
+            else if (exp != null)
+            {
+                exp.Value = 0;
+                exp.Description = "Shape does not intersect";
+            }
+            return score;
+        }
+
+        public override bool Equals(object obj)
+        {
+            var other = obj as AreaSimilarity;
+            if (other == null) return false;
+            return GetDelimiterQueryParameters().Equals(other.GetDelimiterQueryParameters());
+        }
+
+        public override int GetHashCode()
+        {
+            return GetDelimiterQueryParameters().GetHashCode();
+        } 
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/BBox/BBoxSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/BBox/BBoxSimilarity.cs b/src/contrib/Spatial/BBox/BBoxSimilarity.cs
index 2f347c8..a18baa4 100644
--- a/src/contrib/Spatial/BBox/BBoxSimilarity.cs
+++ b/src/contrib/Spatial/BBox/BBoxSimilarity.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -23,8 +23,8 @@ namespace Lucene.Net.Spatial.BBox
     /// <summary>
     /// Abstraction of the calculation used to determine how similar two Bounding Boxes are.
     /// </summary>
-	public interface BBoxSimilarity
-	{
-		double Score(Rectangle extent, Explanation exp);
-	}
+    public interface BBoxSimilarity
+    {
+        double Score(Rectangle extent, Explanation exp);
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/BBox/BBoxSimilarityValueSource.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/BBox/BBoxSimilarityValueSource.cs b/src/contrib/Spatial/BBox/BBoxSimilarityValueSource.cs
index 093482b..6939274 100644
--- a/src/contrib/Spatial/BBox/BBoxSimilarityValueSource.cs
+++ b/src/contrib/Spatial/BBox/BBoxSimilarityValueSource.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -24,41 +24,41 @@ using Spatial4n.Core.Shapes.Impl;
 
 namespace Lucene.Net.Spatial.BBox
 {
-	public class BBoxSimilarityValueSource : ValueSource
-	{
-		private readonly BBoxStrategy strategy;
-		private readonly BBoxSimilarity similarity;
+    public class BBoxSimilarityValueSource : ValueSource
+    {
+        private readonly BBoxStrategy strategy;
+        private readonly BBoxSimilarity similarity;
 
-		public BBoxSimilarityValueSource(BBoxStrategy strategy, BBoxSimilarity similarity)
-		{
-			this.strategy = strategy;
-			this.similarity = similarity;
-		}
+        public BBoxSimilarityValueSource(BBoxStrategy strategy, BBoxSimilarity similarity)
+        {
+            this.strategy = strategy;
+            this.similarity = similarity;
+        }
 
-		private class BBoxSimilarityValueSourceDocValues : DocValues
-		{
-			private readonly BBoxSimilarityValueSource _enclosingInstance;
-		    private readonly Rectangle rect;
-		    private readonly double[] minX;
-			private readonly double[] minY;
-			private readonly double[] maxX;
-			private readonly double[] maxY;
+        private class BBoxSimilarityValueSourceDocValues : DocValues
+        {
+            private readonly BBoxSimilarityValueSource _enclosingInstance;
+            private readonly Rectangle rect;
+            private readonly double[] minX;
+            private readonly double[] minY;
+            private readonly double[] maxX;
+            private readonly double[] maxY;
 
-			private readonly IBits validMinX, validMaxX;
+            private readonly IBits validMinX, validMaxX;
 
-			public BBoxSimilarityValueSourceDocValues(IndexReader reader, BBoxSimilarityValueSource enclosingInstance)
-			{
-				_enclosingInstance = enclosingInstance;
+            public BBoxSimilarityValueSourceDocValues(IndexReader reader, BBoxSimilarityValueSource enclosingInstance)
+            {
+                _enclosingInstance = enclosingInstance;
                 rect = _enclosingInstance.strategy.GetSpatialContext().MakeRectangle(0, 0, 0, 0); //reused
 
-			    minX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_minX/*, true*/);
-				minY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_minY/*, true*/);
-				maxX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_maxX/*, true*/);
-				maxY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_maxY/*, true*/);
+                minX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_minX/*, true*/);
+                minY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_minY/*, true*/);
+                maxX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_maxX/*, true*/);
+                maxY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.field_maxY/*, true*/);
 
-				validMinX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.field_minX);
-				validMaxX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.field_maxX);
-			}
+                validMinX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.field_minX);
+                validMaxX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.field_maxX);
+            }
 
             public override float FloatVal(int doc)
             {
@@ -76,47 +76,47 @@ namespace Lucene.Net.Spatial.BBox
                 }
             }
 
-		    public override Explanation Explain(int doc)
-			{
-				// make sure it has minX and area
-				if (validMinX.Get(doc) && validMaxX.Get(doc))
-				{
-					rect.Reset(
-						minX[doc], maxX[doc],
-						minY[doc], maxY[doc]);
-					var exp = new Explanation();
-					_enclosingInstance.similarity.Score(rect, exp);
-					return exp;
-				}
-				return new Explanation(0, "No BBox");
-			}
+            public override Explanation Explain(int doc)
+            {
+                // make sure it has minX and area
+                if (validMinX.Get(doc) && validMaxX.Get(doc))
+                {
+                    rect.Reset(
+                        minX[doc], maxX[doc],
+                        minY[doc], maxY[doc]);
+                    var exp = new Explanation();
+                    _enclosingInstance.similarity.Score(rect, exp);
+                    return exp;
+                }
+                return new Explanation(0, "No BBox");
+            }
 
-			public override string ToString(int doc)
-			{
-				return _enclosingInstance.Description() + "=" + FloatVal(doc);
-			}
-		}
+            public override string ToString(int doc)
+            {
+                return _enclosingInstance.Description() + "=" + FloatVal(doc);
+            }
+        }
 
-		public override DocValues GetValues(IndexReader reader)
-		{
-			return new BBoxSimilarityValueSourceDocValues(reader, this);
-		}
+        public override DocValues GetValues(IndexReader reader)
+        {
+            return new BBoxSimilarityValueSourceDocValues(reader, this);
+        }
 
-		public override string Description()
-		{
-			return "BBoxSimilarityValueSource(" + similarity + ")";
-		}
+        public override string Description()
+        {
+            return "BBoxSimilarityValueSource(" + similarity + ")";
+        }
 
-		public override bool Equals(object o)
-		{
-			var other = o as BBoxSimilarityValueSource;
-			if (other == null) return false;
-			return similarity.Equals(other.similarity);
-		}
+        public override bool Equals(object o)
+        {
+            var other = o as BBoxSimilarityValueSource;
+            if (other == null) return false;
+            return similarity.Equals(other.similarity);
+        }
 
-		public override int GetHashCode()
-		{
-			return typeof(BBoxSimilarityValueSource).GetHashCode() + similarity.GetHashCode();
-		}
-	}
+        public override int GetHashCode()
+        {
+            return typeof(BBoxSimilarityValueSource).GetHashCode() + similarity.GetHashCode();
+        }
+    }
 }


[49/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/CurrentIndex.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/CurrentIndex.cs b/src/contrib/DistributedSearch/Distributed/Configuration/CurrentIndex.cs
index eee771d..325207e 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/CurrentIndex.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/CurrentIndex.cs
@@ -31,12 +31,12 @@ using Lucene.Net.Distributed;
 
 namespace Lucene.Net.Distributed.Configuration
 {
-	public enum IndexSetting
-	{
-		NoSetting	= 0,
-		IndexA		= 1,
-		IndexB		= 2
-	}
+    public enum IndexSetting
+    {
+        NoSetting    = 0,
+        IndexA        = 1,
+        IndexB        = 2
+    }
 
     /// <summary>
     /// Definition of current index information managed by the 
@@ -61,280 +61,280 @@ namespace Lucene.Net.Distributed.Configuration
     /// </code>
     /// </summary>
     public class CurrentIndex
-	{
-		#region Variables
-		private static readonly string CURRENTINDEX = "currentIndex";
-		private static readonly string INDEX_A = "indexA";
-		private static readonly string INDEX_B = "indexB";
-		private static readonly string TOGGLE = "toggle";
-		private string _strLocalPath;
-		private string _strStatusDir;
-		private string _strIndexAPath;
-		private string _strIndexBPath;
-		private bool _bIndexChanged=false;
+    {
+        #region Variables
+        private static readonly string CURRENTINDEX = "currentIndex";
+        private static readonly string INDEX_A = "indexA";
+        private static readonly string INDEX_B = "indexB";
+        private static readonly string TOGGLE = "toggle";
+        private string _strLocalPath;
+        private string _strStatusDir;
+        private string _strIndexAPath;
+        private string _strIndexBPath;
+        private bool _bIndexChanged=false;
 
         private int _mergeFactor = (ConfigurationManager.AppSettings["IndexMergeFactor"] != null ? Convert.ToInt32(ConfigurationManager.AppSettings["IndexMergeFactor"]) : 5);
         private int _maxMergeDocs = (ConfigurationManager.AppSettings["IndexMaxMergeDocs"] != null ? Convert.ToInt32(ConfigurationManager.AppSettings["IndexMaxMergeDocs"]) : 9999999);
 
-		#endregion
-
-		#region Constructors
-		/// <summary>
-		/// Constructs a new CurrentIndex using the XmlNode value (from IndexSetConfigurationHandler configuration)
-		/// </summary>
-		/// <param name="node">XmlNode containing configuration information</param>
-		/// <param name="strLocalPath">Local filesystem path to source index</param>
-		public CurrentIndex(XmlNode node, string strLocalPath)
-		{
-			this._strLocalPath=strLocalPath;
-			this.LoadValues(node);
-		}
-
-		/// <summary>
-		/// Constructs a shell CurrentIndex. Use this constructor to interact 
-		/// with the underlying status and toggle files ONLY.
-		/// </summary>
-		/// <param name="sStatusDir">Filesystem path to the status and toggle files for an index</param>
-		public CurrentIndex(string sStatusDir)
-		{
-			this._strStatusDir=sStatusDir;
-		}
-		#endregion
-
-		#region Internals
+        #endregion
+
+        #region Constructors
+        /// <summary>
+        /// Constructs a new CurrentIndex using the XmlNode value (from IndexSetConfigurationHandler configuration)
+        /// </summary>
+        /// <param name="node">XmlNode containing configuration information</param>
+        /// <param name="strLocalPath">Local filesystem path to source index</param>
+        public CurrentIndex(XmlNode node, string strLocalPath)
+        {
+            this._strLocalPath=strLocalPath;
+            this.LoadValues(node);
+        }
+
+        /// <summary>
+        /// Constructs a shell CurrentIndex. Use this constructor to interact 
+        /// with the underlying status and toggle files ONLY.
+        /// </summary>
+        /// <param name="sStatusDir">Filesystem path to the status and toggle files for an index</param>
+        public CurrentIndex(string sStatusDir)
+        {
+            this._strStatusDir=sStatusDir;
+        }
+        #endregion
+
+        #region Internals
         /// <summary>
         /// Internal routine for use by constructor that accepts a configuration
         /// entry structured as XmlNode.
         /// </summary>
         /// <param name="node">XmlNode containing configuration information</param>
-		internal void LoadValues(XmlNode node)
-		{
-			foreach (XmlNode c in node.ChildNodes)
-			{
-				if (c.Name.ToLower()=="targetpath")
-				{
-					this._strIndexAPath = c.Attributes["indexA"].Value;
-					this._strIndexBPath = c.Attributes["indexB"].Value;
-				}
-				else if (c.Name.ToLower()=="statusdir")
-				{
-					this._strStatusDir = c.Attributes["value"].Value;
-				}
-			}
+        internal void LoadValues(XmlNode node)
+        {
+            foreach (XmlNode c in node.ChildNodes)
+            {
+                if (c.Name.ToLower()=="targetpath")
+                {
+                    this._strIndexAPath = c.Attributes["indexA"].Value;
+                    this._strIndexBPath = c.Attributes["indexB"].Value;
+                }
+                else if (c.Name.ToLower()=="statusdir")
+                {
+                    this._strStatusDir = c.Attributes["value"].Value;
+                }
+            }
             this.CheckValidConfiguration(node);
-		}
-		#endregion
-
-		#region Public properties
-		/// <summary>
-		/// Filesystem path to the local source for an index; this is the path to the master index.
-		/// </summary>
-		public string LocalPath
-		{
-			get {return this._strLocalPath;}
-		}
-		/// <summary>
-		/// Filesystem path to a LuceneServer's status and toggle file for a given IndexSet
-		/// </summary>
-		public string StatusDirectory
-		{
-			get {return this._strStatusDir;}
-		}
-
-		/// <summary>
-		/// Indicates the current index directory (IndexSetting enum) in use (the online set)
-		/// </summary>
-		public IndexSetting IndexSetting
-		{
-			get
-			{
-				string input=(this.GetCurrentIndex());
-				return (input==CurrentIndex.INDEX_A ? IndexSetting.IndexA : (input==CurrentIndex.INDEX_B ? IndexSetting.IndexB : IndexSetting.IndexA));
-			}
-		}
-
-		/// <summary>
-		/// Indicates the index directory to be used in any index searcher refresh
-		/// by determining if any updates have been applied
-		/// </summary>
-		public IndexSetting IndexSettingRefresh
-		{
-			get
-			{
-				if (this.HasChanged)
-				{
-					return (this.IndexSetting==IndexSetting.IndexA ? IndexSetting.IndexB : (this.IndexSetting==IndexSetting.IndexB ? IndexSetting.IndexA : IndexSetting.IndexB ));
-				}
-				else
-				{
-					return this.IndexSetting;
-				}
-			}
-		}
-
-		/// <summary>
-		/// Indicates if the current index permits updated indexes to be copied to CopyTargetPath
-		/// </summary>
-		public bool CanCopy
-		{
-			get {return (!this.GetToggle() && (this.LocalIndexVersion!=this.TargetIndexVersion));}
-		}
-
-		/// <summary>
-		/// Indicates if the current index has pending updates (in the offline directory) to be used by an index searcher
+        }
+        #endregion
+
+        #region Public properties
+        /// <summary>
+        /// Filesystem path to the local source for an index; this is the path to the master index.
+        /// </summary>
+        public string LocalPath
+        {
+            get {return this._strLocalPath;}
+        }
+        /// <summary>
+        /// Filesystem path to a LuceneServer's status and toggle file for a given IndexSet
+        /// </summary>
+        public string StatusDirectory
+        {
+            get {return this._strStatusDir;}
+        }
+
+        /// <summary>
+        /// Indicates the current index directory (IndexSetting enum) in use (the online set)
+        /// </summary>
+        public IndexSetting IndexSetting
+        {
+            get
+            {
+                string input=(this.GetCurrentIndex());
+                return (input==CurrentIndex.INDEX_A ? IndexSetting.IndexA : (input==CurrentIndex.INDEX_B ? IndexSetting.IndexB : IndexSetting.IndexA));
+            }
+        }
+
+        /// <summary>
+        /// Indicates the index directory to be used in any index searcher refresh
+        /// by determining if any updates have been applied
+        /// </summary>
+        public IndexSetting IndexSettingRefresh
+        {
+            get
+            {
+                if (this.HasChanged)
+                {
+                    return (this.IndexSetting==IndexSetting.IndexA ? IndexSetting.IndexB : (this.IndexSetting==IndexSetting.IndexB ? IndexSetting.IndexA : IndexSetting.IndexB ));
+                }
+                else
+                {
+                    return this.IndexSetting;
+                }
+            }
+        }
+
+        /// <summary>
+        /// Indicates if the current index permits updated indexes to be copied to CopyTargetPath
+        /// </summary>
+        public bool CanCopy
+        {
+            get {return (!this.GetToggle() && (this.LocalIndexVersion!=this.TargetIndexVersion));}
+        }
+
+        /// <summary>
+        /// Indicates if the current index has pending updates (in the offline directory) to be used by an index searcher
         /// in a refresh evaluation
-		/// </summary>
-		public bool HasChanged
-		{
-			get {return this.GetToggle();}
-		}
-
-		/// <summary>
-		/// The target directory path to be used when updating the offline index
-		/// </summary>
-		public string CopyTargetPath
-		{
-			get {return (this.IndexSetting==IndexSetting.IndexA ? this._strIndexBPath : (this.IndexSetting==IndexSetting.IndexB ? this._strIndexAPath : ""));}
-		}
-		#endregion
-
-		#region Public methods
-		/// <summary>
-		/// Method that executes a filesystem copy of all directory files from a local path to 
+        /// </summary>
+        public bool HasChanged
+        {
+            get {return this.GetToggle();}
+        }
+
+        /// <summary>
+        /// The target directory path to be used when updating the offline index
+        /// </summary>
+        public string CopyTargetPath
+        {
+            get {return (this.IndexSetting==IndexSetting.IndexA ? this._strIndexBPath : (this.IndexSetting==IndexSetting.IndexB ? this._strIndexAPath : ""));}
+        }
+        #endregion
+
+        #region Public methods
+        /// <summary>
+        /// Method that executes a filesystem copy of all directory files from a local path to 
         /// the proper offline index.  This method ensures no conflicts occur with the online index.
-		/// </summary>
-		/// <returns>bool</returns>
-		public bool Copy()
-		{
-			try
-			{
-				if (this.CanCopy && this.CopyTargetPath!="")
-				{
-					this.DeleteDirectoryFiles(this.CopyTargetPath);
-					this.CopyDirectory(this._strLocalPath, this.CopyTargetPath);
-					return true;
-				}
-				else
-				{
-					return false;
-				}
-			}
-			catch (Exception e)
-			{
+        /// </summary>
+        /// <returns>bool</returns>
+        public bool Copy()
+        {
+            try
+            {
+                if (this.CanCopy && this.CopyTargetPath!="")
+                {
+                    this.DeleteDirectoryFiles(this.CopyTargetPath);
+                    this.CopyDirectory(this._strLocalPath, this.CopyTargetPath);
+                    return true;
+                }
+                else
+                {
+                    return false;
+                }
+            }
+            catch (Exception e)
+            {
                 //Do something with e
-				return false;
-			}
-		}
+                return false;
+            }
+        }
 
         /// <summary>
         /// Method that executes a filesystem copy of updated or new files from a local path to 
         /// the proper offline index.  This method ensures no conflicts occur with the online index.
         /// </summary>
         /// <returns></returns>
-		public bool CopyIncremental()
-		{
-			try
-			{
-				if (this.CanCopy && this.CopyTargetPath!="")
-				{
-					this.CopyDirectoryIncremental(this._strLocalPath, this.CopyTargetPath);
-					return true;
-				}
-				else
-				{
-					return false;
-				}
-			}
-			catch (Exception e)
-			{
+        public bool CopyIncremental()
+        {
+            try
+            {
+                if (this.CanCopy && this.CopyTargetPath!="")
+                {
+                    this.CopyDirectoryIncremental(this._strLocalPath, this.CopyTargetPath);
+                    return true;
+                }
+                else
+                {
+                    return false;
+                }
+            }
+            catch (Exception e)
+            {
                 //Do something with e
                 return false;
-			}
-		}
-
-		/// <summary>
-		/// Takes a name/value pair collection to be used in updating an index. 
-		/// Deletes are necessary to ensure no duplication occurs within the index.
-		/// </summary>
-		/// <param name="nvcDeleteCollection">Set of record IDs (with underlying field name) to be applied for index updating</param>
-		public void ProcessLocalIndexDeletes(NameValueCollection nvcDeleteCollection)
-		{
-			if (IndexReader.IndexExists(this._strLocalPath) && nvcDeleteCollection.Count>0)
-			{
-				IndexReader idxDeleter = IndexReader.Open(this._strLocalPath);
-				string[] arKeys = nvcDeleteCollection.AllKeys;
-				int xDelete=0;
-				for (int k=0;k<arKeys.Length;k++)
-				{
-					string[] arKeyValues=nvcDeleteCollection.GetValues(arKeys[k]);
-					for (int v=0;v<arKeyValues.Length;v++)
-						xDelete=idxDeleter.DeleteDocuments(new Term(arKeys[k].ToString(),arKeyValues[v].ToString()));
-				}
-				idxDeleter.Close();
-			}
-		}
-
-		/// <summary>
-		/// Executes a loop on the Documents arraylist, adding each one to the index with the associated analyzer.
-		/// </summary>
-		/// <param name="oAnalyzer">Analyzer to be used in index document addition</param>
-		/// <param name="alAddDocuments">Arraylist of Lucene Document objects to be inserted in the index</param>
-		/// <param name="bCompoundFile">Setting to dictate if the index should use compound format</param>
-		public void ProcessLocalIndexAdditions(Analyzer oAnalyzer, Hashtable htAddDocuments, bool bCompoundFile)
-		{
-			IndexWriter idxWriter = this.GetIndexWriter(this._strLocalPath, oAnalyzer, bCompoundFile);
+            }
+        }
+
+        /// <summary>
+        /// Takes a name/value pair collection to be used in updating an index. 
+        /// Deletes are necessary to ensure no duplication occurs within the index.
+        /// </summary>
+        /// <param name="nvcDeleteCollection">Set of record IDs (with underlying field name) to be applied for index updating</param>
+        public void ProcessLocalIndexDeletes(NameValueCollection nvcDeleteCollection)
+        {
+            if (IndexReader.IndexExists(this._strLocalPath) && nvcDeleteCollection.Count>0)
+            {
+                IndexReader idxDeleter = IndexReader.Open(this._strLocalPath);
+                string[] arKeys = nvcDeleteCollection.AllKeys;
+                int xDelete=0;
+                for (int k=0;k<arKeys.Length;k++)
+                {
+                    string[] arKeyValues=nvcDeleteCollection.GetValues(arKeys[k]);
+                    for (int v=0;v<arKeyValues.Length;v++)
+                        xDelete=idxDeleter.DeleteDocuments(new Term(arKeys[k].ToString(),arKeyValues[v].ToString()));
+                }
+                idxDeleter.Close();
+            }
+        }
+
+        /// <summary>
+        /// Executes a loop on the Documents arraylist, adding each one to the index with the associated analyzer.
+        /// </summary>
+        /// <param name="oAnalyzer">Analyzer to be used in index document addition</param>
+        /// <param name="alAddDocuments">Arraylist of Lucene Document objects to be inserted in the index</param>
+        /// <param name="bCompoundFile">Setting to dictate if the index should use compound format</param>
+        public void ProcessLocalIndexAdditions(Analyzer oAnalyzer, Hashtable htAddDocuments, bool bCompoundFile)
+        {
+            IndexWriter idxWriter = this.GetIndexWriter(this._strLocalPath, oAnalyzer, bCompoundFile);
             idxWriter.SetMergeFactor(5);
             idxWriter.SetMaxMergeDocs(9999999);
 
-			foreach (DictionaryEntry de in htAddDocuments)
-			{
-				Document d = (Document)de.Key;
-				Analyzer a = (Analyzer)de.Value;
-				idxWriter.AddDocument(d,a);
-			}
-			idxWriter.Close();
-		}
-
-		/// <summary>
-		/// Single method to be used by a searchhost to indicate an index refresh has completed.
-		/// </summary>
-		public void IndexRefresh()
-		{
-			if (this.HasChanged)
-			{
-				this.SetCurrentIndex(this.IndexSettingRefresh);
-				this.SetToggle(false);
-			}
-		}
-
-		/// <summary>
-		/// Single method to be used by an index updater to indicate an index update has completed.
-		/// </summary>
-		public void UpdateRefresh()
-		{
-			this.SetToggle(true);
-		}
-		#endregion
-
-		#region Private properties
-		/// <summary>
-		/// The filesystem path to the underlying index status file
-		/// </summary>
-		private string CurrentIndexFile
-		{
-			get {return (this._strStatusDir+(this._strStatusDir.EndsWith(@"\") ? "" : @"\")+CURRENTINDEX);}
-		}
-		/// <summary>
-		/// The filesystem path to the underlying index toggle file
-		/// </summary>
-		private string ToggleFile
-		{
-			get {return (this._strStatusDir+(this._strStatusDir.EndsWith(@"\") ? "" : @"\")+TOGGLE);}
-		}
-
-		#endregion
-
-		#region Private methods
+            foreach (DictionaryEntry de in htAddDocuments)
+            {
+                Document d = (Document)de.Key;
+                Analyzer a = (Analyzer)de.Value;
+                idxWriter.AddDocument(d,a);
+            }
+            idxWriter.Close();
+        }
+
+        /// <summary>
+        /// Single method to be used by a searchhost to indicate an index refresh has completed.
+        /// </summary>
+        public void IndexRefresh()
+        {
+            if (this.HasChanged)
+            {
+                this.SetCurrentIndex(this.IndexSettingRefresh);
+                this.SetToggle(false);
+            }
+        }
+
+        /// <summary>
+        /// Single method to be used by an index updater to indicate an index update has completed.
+        /// </summary>
+        public void UpdateRefresh()
+        {
+            this.SetToggle(true);
+        }
+        #endregion
+
+        #region Private properties
+        /// <summary>
+        /// The filesystem path to the underlying index status file
+        /// </summary>
+        private string CurrentIndexFile
+        {
+            get {return (this._strStatusDir+(this._strStatusDir.EndsWith(@"\") ? "" : @"\")+CURRENTINDEX);}
+        }
+        /// <summary>
+        /// The filesystem path to the underlying index toggle file
+        /// </summary>
+        private string ToggleFile
+        {
+            get {return (this._strStatusDir+(this._strStatusDir.EndsWith(@"\") ? "" : @"\")+TOGGLE);}
+        }
+
+        #endregion
+
+        #region Private methods
 
         /// <summary>
         /// Validation routine to ensure all required values were present within xml configuration node
@@ -349,222 +349,222 @@ namespace Lucene.Net.Distributed.Configuration
             if (this._strIndexBPath == null) throw new ConfigurationErrorsException("CurrentIndex indexB invalid: " + Environment.NewLine + node.OuterXml);
         }
 
-		/// <summary>
-		/// Returns the current toggle file setting
-		/// </summary>
-		/// <returns>bool</returns>
-		private bool GetToggle()
-		{
-			bool bValue=false;
-			string input="";
-			try
-			{
-				if (!File.Exists(this.ToggleFile))
-				{
-					this.SetToggle(false);
-				}
-				else
-				{
-					StreamReader sr = File.OpenText(this.ToggleFile);
-					input = sr.ReadLine();
-					sr.Close();
-					bValue = (input.ToLower()=="true" ? true : false);
-				}
-			}
-			catch (Exception ex)
-			{
-				//Do something with ex
-			}
-			return bValue;
-		}
-
-		/// <summary>
-		/// Returns the current status file setting
-		/// </summary>
-		/// <returns>string</returns>
-		private string GetCurrentIndex()
-		{
-			string input="";
-			try
-			{
-				if (!File.Exists(this.CurrentIndexFile))
-				{
-					this.SetCurrentIndex(IndexSetting.IndexA);
-					input=IndexSetting.IndexA.ToString();
-				}
-				else
-				{
-					StreamReader sr = File.OpenText(this.CurrentIndexFile);
-					input = sr.ReadLine();
-					sr.Close();
-				}
-			}
-			catch (Exception ex)
-			{
-				//Do something with ex
-			}
-			return input;
-		}
-
-		/// <summary>
-		/// Updates the status file with the IndexSetting value parameter
-		/// </summary>
-		/// <param name="eIndexSetting">Setting to be applied to the status file</param>
-		private void SetCurrentIndex(IndexSetting eIndexSetting)
-		{
-			try
-			{
-				StreamWriter sw = File.CreateText(this.CurrentIndexFile);
-				sw.WriteLine((eIndexSetting==IndexSetting.IndexA ? CurrentIndex.INDEX_A : CurrentIndex.INDEX_B));
-				sw.Close();
-			}
-			catch (Exception ex)
-			{
-				//Do something with ex
-			}
-		}
-
-		/// <summary>
-		/// IndexWriter that can be used to apply updates to an index
-		/// </summary>
-		/// <param name="indexPath">File system path to the target index</param>
-		/// <param name="oAnalyzer">Lucene Analyzer to be used by the underlying IndexWriter</param>
-		/// <param name="bCompoundFile">Setting to dictate if the index should use compound format</param>
-		/// <returns></returns>
-		private IndexWriter GetIndexWriter(string indexPath, Analyzer oAnalyzer, bool bCompoundFile)
-		{
-			bool bExists = System.IO.Directory.Exists(indexPath);
-			if (bExists==false)
-				System.IO.Directory.CreateDirectory(indexPath);
-			bExists=IndexReader.IndexExists(FSDirectory.GetDirectory(indexPath, false));
-			IndexWriter idxWriter = new IndexWriter(indexPath, oAnalyzer, !bExists);
-			idxWriter.SetUseCompoundFile(bCompoundFile);
-			return idxWriter;
-		}
-
-		/// <summary>
-		/// Updates the toggle file with the bool value parameter
-		/// </summary>
-		/// <param name="bValue">Bool to be applied to the toggle file</param>
-		private void SetToggle(bool bValue)
-		{
-			try
-			{
-				StreamWriter sw = File.CreateText(this.ToggleFile);
-				sw.WriteLine(bValue.ToString());
-				sw.Close();
-				this._bIndexChanged=bValue;
-			}
-			catch (Exception ex)
-			{
-				//Do something with ex
-			}
-		}
-
-		/// <summary>
-		/// Returns the numeric index version (using Lucene objects) for the index located at LocalPath
-		/// </summary>
-		private long LocalIndexVersion
-		{
-			get {return IndexReader.GetCurrentVersion(this.LocalPath);}
-		}
-		/// <summary>
-		/// Returns the numeric index version (using Lucene objects) for the index located at CopyTargetPath
-		/// </summary>
-		private long TargetIndexVersion
-		{
-			get {return (IndexReader.IndexExists(this.CopyTargetPath) ? IndexReader.GetCurrentVersion(this.CopyTargetPath) : 0);}
-		}
-
-		/// <summary>
-		/// Deletes index files at the filesystem directoryPath location
-		/// </summary>
-		/// <param name="directoryPath">Filesystem path</param>
-		private void DeleteDirectoryFiles(string directoryPath)
-		{
-			try
-			{
-				if(!System.IO.Directory.Exists(directoryPath))
-					return;
-				DirectoryInfo di = new DirectoryInfo(directoryPath);
-				FileInfo[] arFi = di.GetFiles();
-				foreach(FileInfo fi in arFi)
-					fi.Delete();
-			}
-			catch(Exception e)
-			{
-				//Do something with e
-			}
-		}
-
-		/// <summary>
-		/// Copy all index files from the sourceDirPath to the destDirPath
-		/// </summary>
-		/// <param name="sourceDirPath">Filesystem path</param>
-		/// <param name="destDirPath">Filesystem path</param>
-		private void CopyDirectory(string sourceDirPath, string destDirPath)
-		{
-			string[] Files;
-
-			if(destDirPath[destDirPath.Length-1]!=Path.DirectorySeparatorChar) 
-				destDirPath+=Path.DirectorySeparatorChar;
-			if(!System.IO.Directory.Exists(destDirPath)) System.IO.Directory.CreateDirectory(destDirPath);
-			Files=System.IO.Directory.GetFileSystemEntries(sourceDirPath);
-			foreach(string Element in Files)
-			{
-				// Sub directories
-				if(System.IO.Directory.Exists(Element)) 
-					CopyDirectory(Element,destDirPath+Path.GetFileName(Element));
-					// Files in directory
-				else 
-					File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
-			}
-
-		}
+        /// <summary>
+        /// Returns the current toggle file setting
+        /// </summary>
+        /// <returns>bool</returns>
+        private bool GetToggle()
+        {
+            bool bValue=false;
+            string input="";
+            try
+            {
+                if (!File.Exists(this.ToggleFile))
+                {
+                    this.SetToggle(false);
+                }
+                else
+                {
+                    StreamReader sr = File.OpenText(this.ToggleFile);
+                    input = sr.ReadLine();
+                    sr.Close();
+                    bValue = (input.ToLower()=="true" ? true : false);
+                }
+            }
+            catch (Exception ex)
+            {
+                //Do something with ex
+            }
+            return bValue;
+        }
+
+        /// <summary>
+        /// Returns the current status file setting
+        /// </summary>
+        /// <returns>string</returns>
+        private string GetCurrentIndex()
+        {
+            string input="";
+            try
+            {
+                if (!File.Exists(this.CurrentIndexFile))
+                {
+                    this.SetCurrentIndex(IndexSetting.IndexA);
+                    input=IndexSetting.IndexA.ToString();
+                }
+                else
+                {
+                    StreamReader sr = File.OpenText(this.CurrentIndexFile);
+                    input = sr.ReadLine();
+                    sr.Close();
+                }
+            }
+            catch (Exception ex)
+            {
+                //Do something with ex
+            }
+            return input;
+        }
+
+        /// <summary>
+        /// Updates the status file with the IndexSetting value parameter
+        /// </summary>
+        /// <param name="eIndexSetting">Setting to be applied to the status file</param>
+        private void SetCurrentIndex(IndexSetting eIndexSetting)
+        {
+            try
+            {
+                StreamWriter sw = File.CreateText(this.CurrentIndexFile);
+                sw.WriteLine((eIndexSetting==IndexSetting.IndexA ? CurrentIndex.INDEX_A : CurrentIndex.INDEX_B));
+                sw.Close();
+            }
+            catch (Exception ex)
+            {
+                //Do something with ex
+            }
+        }
+
+        /// <summary>
+        /// IndexWriter that can be used to apply updates to an index
+        /// </summary>
+        /// <param name="indexPath">File system path to the target index</param>
+        /// <param name="oAnalyzer">Lucene Analyzer to be used by the underlying IndexWriter</param>
+        /// <param name="bCompoundFile">Setting to dictate if the index should use compound format</param>
+        /// <returns></returns>
+        private IndexWriter GetIndexWriter(string indexPath, Analyzer oAnalyzer, bool bCompoundFile)
+        {
+            bool bExists = System.IO.Directory.Exists(indexPath);
+            if (bExists==false)
+                System.IO.Directory.CreateDirectory(indexPath);
+            bExists=IndexReader.IndexExists(FSDirectory.GetDirectory(indexPath, false));
+            IndexWriter idxWriter = new IndexWriter(indexPath, oAnalyzer, !bExists);
+            idxWriter.SetUseCompoundFile(bCompoundFile);
+            return idxWriter;
+        }
+
+        /// <summary>
+        /// Updates the toggle file with the bool value parameter
+        /// </summary>
+        /// <param name="bValue">Bool to be applied to the toggle file</param>
+        private void SetToggle(bool bValue)
+        {
+            try
+            {
+                StreamWriter sw = File.CreateText(this.ToggleFile);
+                sw.WriteLine(bValue.ToString());
+                sw.Close();
+                this._bIndexChanged=bValue;
+            }
+            catch (Exception ex)
+            {
+                //Do something with ex
+            }
+        }
+
+        /// <summary>
+        /// Returns the numeric index version (using Lucene objects) for the index located at LocalPath
+        /// </summary>
+        private long LocalIndexVersion
+        {
+            get {return IndexReader.GetCurrentVersion(this.LocalPath);}
+        }
+        /// <summary>
+        /// Returns the numeric index version (using Lucene objects) for the index located at CopyTargetPath
+        /// </summary>
+        private long TargetIndexVersion
+        {
+            get {return (IndexReader.IndexExists(this.CopyTargetPath) ? IndexReader.GetCurrentVersion(this.CopyTargetPath) : 0);}
+        }
+
+        /// <summary>
+        /// Deletes index files at the filesystem directoryPath location
+        /// </summary>
+        /// <param name="directoryPath">Filesystem path</param>
+        private void DeleteDirectoryFiles(string directoryPath)
+        {
+            try
+            {
+                if(!System.IO.Directory.Exists(directoryPath))
+                    return;
+                DirectoryInfo di = new DirectoryInfo(directoryPath);
+                FileInfo[] arFi = di.GetFiles();
+                foreach(FileInfo fi in arFi)
+                    fi.Delete();
+            }
+            catch(Exception e)
+            {
+                //Do something with e
+            }
+        }
+
+        /// <summary>
+        /// Copy all index files from the sourceDirPath to the destDirPath
+        /// </summary>
+        /// <param name="sourceDirPath">Filesystem path</param>
+        /// <param name="destDirPath">Filesystem path</param>
+        private void CopyDirectory(string sourceDirPath, string destDirPath)
+        {
+            string[] Files;
+
+            if(destDirPath[destDirPath.Length-1]!=Path.DirectorySeparatorChar) 
+                destDirPath+=Path.DirectorySeparatorChar;
+            if(!System.IO.Directory.Exists(destDirPath)) System.IO.Directory.CreateDirectory(destDirPath);
+            Files=System.IO.Directory.GetFileSystemEntries(sourceDirPath);
+            foreach(string Element in Files)
+            {
+                // Sub directories
+                if(System.IO.Directory.Exists(Element)) 
+                    CopyDirectory(Element,destDirPath+Path.GetFileName(Element));
+                    // Files in directory
+                else 
+                    File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
+            }
+
+        }
 
         /// <summary>
         /// Copy only new and updated index files from the sourceDirPath to the destDirPath
         /// </summary>
         /// <param name="sourceDirPath">Filesystem path</param>
         /// <param name="destDirPath">Filesystem path</param>
-		private void CopyDirectoryIncremental(string sourceDirPath, string destDirPath)
-		{
-			string[] Files;
-
-			if(destDirPath[destDirPath.Length-1]!=Path.DirectorySeparatorChar) 
-				destDirPath+=Path.DirectorySeparatorChar;
-			Files=System.IO.Directory.GetFileSystemEntries(sourceDirPath);
-			if(!System.IO.Directory.Exists(destDirPath))
-			{
-				System.IO.Directory.CreateDirectory(destDirPath);
-				foreach(string Element in Files)
-				{
-					// Sub directories
-					if(System.IO.Directory.Exists(Element)) 
-						CopyDirectory(Element,destDirPath+Path.GetFileName(Element));
-						// Files in directory
-					else 
-						File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
-				}
-			}
-			else
-			{
-				foreach(string Element in Files)
-				{
-					if(System.IO.Directory.Exists(Element))
-					{
-						CopyDirectoryIncremental(Element,destDirPath+Path.GetFileName(Element));
-					}
-					else
-					{
-						if (System.IO.File.Exists(destDirPath+Path.GetFileName(Element)))
-							this.CopyFileIncremental(Element, destDirPath+Path.GetFileName(Element));
-						else
-							File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
-					}
-				}
-			}
-		}
+        private void CopyDirectoryIncremental(string sourceDirPath, string destDirPath)
+        {
+            string[] Files;
+
+            if(destDirPath[destDirPath.Length-1]!=Path.DirectorySeparatorChar) 
+                destDirPath+=Path.DirectorySeparatorChar;
+            Files=System.IO.Directory.GetFileSystemEntries(sourceDirPath);
+            if(!System.IO.Directory.Exists(destDirPath))
+            {
+                System.IO.Directory.CreateDirectory(destDirPath);
+                foreach(string Element in Files)
+                {
+                    // Sub directories
+                    if(System.IO.Directory.Exists(Element)) 
+                        CopyDirectory(Element,destDirPath+Path.GetFileName(Element));
+                        // Files in directory
+                    else 
+                        File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
+                }
+            }
+            else
+            {
+                foreach(string Element in Files)
+                {
+                    if(System.IO.Directory.Exists(Element))
+                    {
+                        CopyDirectoryIncremental(Element,destDirPath+Path.GetFileName(Element));
+                    }
+                    else
+                    {
+                        if (System.IO.File.Exists(destDirPath+Path.GetFileName(Element)))
+                            this.CopyFileIncremental(Element, destDirPath+Path.GetFileName(Element));
+                        else
+                            File.Copy(Element,destDirPath+Path.GetFileName(Element),true);
+                    }
+                }
+            }
+        }
 
         /// <summary>
         /// Evaluates the LastWriteTime and Length properties of two files to determine
@@ -573,13 +573,13 @@ namespace Lucene.Net.Distributed.Configuration
         /// <param name="filepath1">Filesystem path</param>
         /// <param name="filepath2">Filesystem path</param>
         private void CopyFileIncremental(string filepath1, string filepath2)
-		{
-			FileInfo fi1 = new FileInfo(filepath1);
-			FileInfo fi2 = new FileInfo(filepath2);
-			if ((fi1.LastWriteTime!=fi2.LastWriteTime)||(fi1.Length!=fi2.Length))
-				File.Copy(filepath1,filepath2,true);
-		}
-		#endregion
+        {
+            FileInfo fi1 = new FileInfo(filepath1);
+            FileInfo fi2 = new FileInfo(filepath2);
+            if ((fi1.LastWriteTime!=fi2.LastWriteTime)||(fi1.Length!=fi2.Length))
+                File.Copy(filepath1,filepath2,true);
+        }
+        #endregion
 
         #region Static methods
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcher.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcher.cs b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcher.cs
index 500b606..13f0d35 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcher.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcher.cs
@@ -24,7 +24,7 @@ using Lucene.Net.Distributed;
 
 namespace Lucene.Net.Distributed.Configuration
 {
-	/// <summary>
+    /// <summary>
     /// Definition of a configurable set of search indexes made accessible by the 
     /// LuceneServer windows service for a consuming application. These search indexes 
     /// are defined in the configuration file of an application. The locations defined 
@@ -40,18 +40,18 @@ namespace Lucene.Net.Distributed.Configuration
     /// </code>
     /// </summary>
     public class DistributedSearcher
-	{
+    {
         private int _id;
-		private SearchMethod _eSearchMethod;
-		private string _strLocation;
+        private SearchMethod _eSearchMethod;
+        private string _strLocation;
 
         /// <summary>
         /// Public constructor for DistributedSearcher. A DistributedSearcher is defined
         /// in XML configuration and is loaded via a custom configuration handler.
         /// </summary>
         /// <param name="xSection">The Xml definition in the configuration file</param>
-		public DistributedSearcher(XmlNode xSection)
-		{
+        public DistributedSearcher(XmlNode xSection)
+        {
             
             XmlAttributeCollection attributeCollection = xSection.Attributes;
             if (attributeCollection == null)
@@ -95,7 +95,7 @@ namespace Lucene.Net.Distributed.Configuration
                 //exec ping check if needed
             }
 
-		}
+        }
 
         /// <summary>
         /// Unique Id value assigned to this DistributedSearcher. Not required for any processing,
@@ -109,18 +109,18 @@ namespace Lucene.Net.Distributed.Configuration
         /// <summary>
         /// Enumeration value specifying the locality of the index -- local or remote
         /// </summary>
-		public SearchMethod SearchMethod
-		{
-			get {return this._eSearchMethod;}
-		}
+        public SearchMethod SearchMethod
+        {
+            get {return this._eSearchMethod;}
+        }
         /// <summary>
         /// Reference path to the DistributedSearcher. If SearchMethod is Local, this is a local
         /// file-system path, i.e. "c:\local\index". If SearchMethod is Distributed, this is the 
         /// URI of the server-activated service type, i.e. "tcp://192.168.1.100:1089/RemoteIndex".
         /// </summary>
-		public string Location
-		{
-			get {return this._strLocation;}
-		}
-	}
+        public string Location
+        {
+            get {return this._strLocation;}
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcherConfigurationHandler.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcherConfigurationHandler.cs b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcherConfigurationHandler.cs
index 49adbd3..5816a8f 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcherConfigurationHandler.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearcherConfigurationHandler.cs
@@ -28,15 +28,15 @@ namespace Lucene.Net.Distributed.Configuration
     /// by the LuceneServer windows service.
     /// </summary>
     public class DistributedSearcherConfigurationHandler : IConfigurationSectionHandler
-	{
+    {
         /// <summary>
         /// Empty public constructor for the configuration handler.
         /// </summary>
-		public DistributedSearcherConfigurationHandler()
-		{
-		}
+        public DistributedSearcherConfigurationHandler()
+        {
+        }
 
-		#region IConfigurationSectionHandler Members
+        #region IConfigurationSectionHandler Members
 
         /// <summary>
         /// Required implementation of IConfigurationSectionHandler.
@@ -45,12 +45,12 @@ namespace Lucene.Net.Distributed.Configuration
         /// <param name="configContext">Configuration context object</param>
         /// <param name="section">Xml configuration in the application configuration file</param>
         /// <returns></returns>
-		public object Create(object parent, object configContext, XmlNode section)
-		{
-			DistributedSearchers wsConfig = new DistributedSearchers(section);
-			return wsConfig;
-		}
+        public object Create(object parent, object configContext, XmlNode section)
+        {
+            DistributedSearchers wsConfig = new DistributedSearchers(section);
+            return wsConfig;
+        }
 
-		#endregion
-	}
+        #endregion
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearchers.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearchers.cs b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearchers.cs
index 5bdaff2..28aa1f2 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearchers.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/DistributedSearchers.cs
@@ -37,16 +37,16 @@ namespace Lucene.Net.Distributed.Configuration
     /// </code>
     /// </summary>
     public class DistributedSearchers
-	{
-		private DistributedSearcher[] _arDistributedSearcherArray;
+    {
+        private DistributedSearcher[] _arDistributedSearcherArray;
 
         /// <summary>
         /// Accessor method for the configurable DistributedSearchers.
         /// </summary>
-		public static DistributedSearchers GetConfig
-		{
+        public static DistributedSearchers GetConfig
+        {
             get { return (DistributedSearchers)ConfigurationManager.GetSection("DistributedSearchers"); }
-		}
+        }
 
         /// <summary>
         /// Public constructor for DistributedSearchers. A DistributedSearcher is defined
@@ -54,29 +54,29 @@ namespace Lucene.Net.Distributed.Configuration
         /// </summary>
         /// <param name="xSection">The Xml definition in the configuration file</param>
         public DistributedSearchers(XmlNode xSection)
-		{
-			this._arDistributedSearcherArray = new DistributedSearcher[xSection.ChildNodes.Count];
-			int x=0;
+        {
+            this._arDistributedSearcherArray = new DistributedSearcher[xSection.ChildNodes.Count];
+            int x=0;
 
-			foreach (XmlNode c in xSection.ChildNodes)
-			{
-				if (c.Name.ToLower()=="DistributedSearcher")
-				{
-					DistributedSearcher ws = new DistributedSearcher(c);
-					this._arDistributedSearcherArray[x] = ws;
-					x++;
-				}
-			}
-		}
+            foreach (XmlNode c in xSection.ChildNodes)
+            {
+                if (c.Name.ToLower()=="DistributedSearcher")
+                {
+                    DistributedSearcher ws = new DistributedSearcher(c);
+                    this._arDistributedSearcherArray[x] = ws;
+                    x++;
+                }
+            }
+        }
 
         /// <summary>
         /// Strongly-typed array of DistributedSearcher objects as defined in 
         /// a configuration section.
         /// </summary>
         public DistributedSearcher[] DistributedSearcherArray
-		{
-			get {return this._arDistributedSearcherArray;}
-		}
+        {
+            get {return this._arDistributedSearcherArray;}
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndex.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndex.cs b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndex.cs
index caaa28f..2ec1084 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndex.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndex.cs
@@ -23,7 +23,7 @@ using System.Xml;
 
 namespace Lucene.Net.Distributed.Configuration
 {
-	/// <summary>
+    /// <summary>
     /// Definition of a configurable search index made accessible by the 
     /// LuceneServer windows service.
     /// 
@@ -41,13 +41,13 @@ namespace Lucene.Net.Distributed.Configuration
     ///   </LuceneServerIndexes>
     /// </code>
     /// </summary>
-	public class LuceneServerIndex
-	{
-		private string _strObjectUri;
-		private int _intPort;
-		private DirectoryInfo[] _arIndexADirectories;
-		private DirectoryInfo[] _arIndexBDirectories;
-		private DirectoryInfo[] _arRefreshDirectories;
+    public class LuceneServerIndex
+    {
+        private string _strObjectUri;
+        private int _intPort;
+        private DirectoryInfo[] _arIndexADirectories;
+        private DirectoryInfo[] _arIndexBDirectories;
+        private DirectoryInfo[] _arRefreshDirectories;
 
         /// <summary>
         /// Public constructor for LuceneServerIndex. A LuceneServerIndex is defined
@@ -56,9 +56,9 @@ namespace Lucene.Net.Distributed.Configuration
         /// <param name="xSection">The Xml definition in the configuration file</param>
         /// <param name="defaultPort">The default Port value, as defined in the contained 
         /// LuceneServerIndexes configuration</param>
-		public LuceneServerIndex(XmlNode xSection, int defaultPort)
-		{
-			XmlAttributeCollection attributeCollection = xSection.Attributes;
+        public LuceneServerIndex(XmlNode xSection, int defaultPort)
+        {
+            XmlAttributeCollection attributeCollection = xSection.Attributes;
             try
             {
                 this._strObjectUri = attributeCollection["ObjectUri"].Value;
@@ -80,16 +80,16 @@ namespace Lucene.Net.Distributed.Configuration
             if (xSection.ChildNodes.Count == 0)
                 throw new ConfigurationErrorsException("LuceneServerIndex configuration missing: " + Environment.NewLine + xSection.OuterXml);
 
-			_arIndexADirectories = new DirectoryInfo[xSection.ChildNodes.Count];
-			_arIndexBDirectories = new DirectoryInfo[xSection.ChildNodes.Count];
-			DirectoryInfo diA;
-			DirectoryInfo diB;
-			int x=0;
+            _arIndexADirectories = new DirectoryInfo[xSection.ChildNodes.Count];
+            _arIndexBDirectories = new DirectoryInfo[xSection.ChildNodes.Count];
+            DirectoryInfo diA;
+            DirectoryInfo diB;
+            int x=0;
 
-			foreach (XmlNode c in xSection.ChildNodes)
-			{
-				if (c.Name.ToLower()=="directory")
-				{
+            foreach (XmlNode c in xSection.ChildNodes)
+            {
+                if (c.Name.ToLower()=="directory")
+                {
                     try
                     {
                         diA = new DirectoryInfo(c.Attributes["indexA"].Value);
@@ -113,10 +113,10 @@ namespace Lucene.Net.Distributed.Configuration
                     {
                         throw new ConfigurationErrorsException("LuceneServerIndex configuration Directory error: indexA=" + c.Attributes["indexB"].Value + Environment.NewLine + xSection.OuterXml);
                     }
-					x++;
-				}
-			}
-		}
+                    x++;
+                }
+            }
+        }
 
         /// <summary>
         /// The published Uri name for a collective set of indexes. The ObjectUri
@@ -126,35 +126,35 @@ namespace Lucene.Net.Distributed.Configuration
         /// <para>This value is required in configuration.</para>
         /// </summary>
         public string ObjectUri
-		{
-			get {return this._strObjectUri;}
-		}
+        {
+            get {return this._strObjectUri;}
+        }
 
         /// <summary>
         /// A definable port number for the published Uri. Use this value to override the default
         /// Port setting for all published URIs.
         /// <para>This value is optional in configuration.</para>
         /// </summary>
-		public int Port
-		{
-			get {return this._intPort;}
-		}
+        public int Port
+        {
+            get {return this._intPort;}
+        }
 
         /// <summary>
         /// File-system path to the "IndexA" location of the index files.
         /// </summary>
-		public DirectoryInfo[] IndexADirectories
-		{
-			get {return this._arIndexADirectories;}
-		}
+        public DirectoryInfo[] IndexADirectories
+        {
+            get {return this._arIndexADirectories;}
+        }
 
         /// <summary>
         /// File-system path to the "IndexB" location of the index files.
         /// </summary>
-		public DirectoryInfo[] IndexBDirectories
-		{
-			get {return this._arIndexBDirectories;}
-		}
+        public DirectoryInfo[] IndexBDirectories
+        {
+            get {return this._arIndexBDirectories;}
+        }
 
         /// <summary>
         /// Instance method that returns an array of directory paths associated
@@ -162,14 +162,14 @@ namespace Lucene.Net.Distributed.Configuration
         /// </summary>
         /// <param name="oIndexSettingRefresh">IndexSetting enumeration value</param>
         /// <returns>DirectoryInfo[] of directory paths</returns>
-		public DirectoryInfo[] RefreshDirectories(IndexSetting oIndexSettingRefresh)
-		{
-			this._arRefreshDirectories=null;
-			if (oIndexSettingRefresh==IndexSetting.IndexA)
-				this._arRefreshDirectories = this._arIndexADirectories;
-			else if (oIndexSettingRefresh==IndexSetting.IndexB)
-				this._arRefreshDirectories = this._arIndexBDirectories;
-			return this._arRefreshDirectories;
-		}
-	}
+        public DirectoryInfo[] RefreshDirectories(IndexSetting oIndexSettingRefresh)
+        {
+            this._arRefreshDirectories=null;
+            if (oIndexSettingRefresh==IndexSetting.IndexA)
+                this._arRefreshDirectories = this._arIndexADirectories;
+            else if (oIndexSettingRefresh==IndexSetting.IndexB)
+                this._arRefreshDirectories = this._arIndexBDirectories;
+            return this._arRefreshDirectories;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexConfigurationHandler.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexConfigurationHandler.cs b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexConfigurationHandler.cs
index 13362bc..9d54022 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexConfigurationHandler.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexConfigurationHandler.cs
@@ -21,23 +21,23 @@ using System.Xml;
 
 namespace Lucene.Net.Distributed.Configuration
 {
-	/// <summary>
-	/// Implementation of custom configuration handler for the definition of search indexes
+    /// <summary>
+    /// Implementation of custom configuration handler for the definition of search indexes
     /// made accessible by the LuceneServer windows service.
-	/// </summary>
-	public class LuceneServerIndexConfigurationHandler: IConfigurationSectionHandler
-	{
+    /// </summary>
+    public class LuceneServerIndexConfigurationHandler: IConfigurationSectionHandler
+    {
         public LuceneServerIndexConfigurationHandler()
-		{
-		}
-		#region IConfigurationSectionHandler Members
+        {
+        }
+        #region IConfigurationSectionHandler Members
 
-		public object Create(object parent, object configContext, XmlNode section)
-		{
-			LuceneServerIndexes rsConfig = new LuceneServerIndexes(section);
-			return rsConfig;
-		}
+        public object Create(object parent, object configContext, XmlNode section)
+        {
+            LuceneServerIndexes rsConfig = new LuceneServerIndexes(section);
+            return rsConfig;
+        }
 
-		#endregion
-	}
+        #endregion
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexes.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexes.cs b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexes.cs
index bce6d3b..19cb856 100644
--- a/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexes.cs
+++ b/src/contrib/DistributedSearch/Distributed/Configuration/LuceneServerIndexes.cs
@@ -40,17 +40,17 @@ namespace Lucene.Net.Distributed.Configuration
     /// </code>
     /// </summary>
     public class LuceneServerIndexes
-	{
-		private LuceneServerIndex[] _arLuceneServerIndexArray;
-		private int _intPort;
+    {
+        private LuceneServerIndex[] _arLuceneServerIndexArray;
+        private int _intPort;
 
         /// <summary>
         /// Accessor method for the configurable search indexes.
         /// </summary>
-		public static LuceneServerIndexes GetConfig
-		{
-			get {return (LuceneServerIndexes)ConfigurationManager.GetSection("LuceneServerIndexes");}
-		}
+        public static LuceneServerIndexes GetConfig
+        {
+            get {return (LuceneServerIndexes)ConfigurationManager.GetSection("LuceneServerIndexes");}
+        }
 
         /// <summary>
         /// Public constructor for LuceneServerIndexes. A LuceneServerIndex is defined
@@ -58,8 +58,8 @@ namespace Lucene.Net.Distributed.Configuration
         /// </summary>
         /// <param name="xSection">The Xml definition in the configuration file</param>
         public LuceneServerIndexes(XmlNode xSection)
-		{
-			XmlAttributeCollection attributeCollection = xSection.Attributes;
+        {
+            XmlAttributeCollection attributeCollection = xSection.Attributes;
 
             try
             {
@@ -73,37 +73,37 @@ namespace Lucene.Net.Distributed.Configuration
             if (xSection.ChildNodes.Count==0)
                 throw new ConfigurationErrorsException("LuceneServerIndexes configuration missing: " + Environment.NewLine + xSection.OuterXml);
 
-			this._arLuceneServerIndexArray = new LuceneServerIndex[xSection.ChildNodes.Count];
-			int x=0;
+            this._arLuceneServerIndexArray = new LuceneServerIndex[xSection.ChildNodes.Count];
+            int x=0;
 
-			foreach (XmlNode c in xSection.ChildNodes)
-			{
-				if (c.Name.ToLower()=="luceneserverindex")
-				{
-					LuceneServerIndex rs = new LuceneServerIndex(c, _intPort);
-					this._arLuceneServerIndexArray[x] = rs;
-					x++;
-				}
+            foreach (XmlNode c in xSection.ChildNodes)
+            {
+                if (c.Name.ToLower()=="luceneserverindex")
+                {
+                    LuceneServerIndex rs = new LuceneServerIndex(c, _intPort);
+                    this._arLuceneServerIndexArray[x] = rs;
+                    x++;
+                }
 
-			}
-		}
+            }
+        }
 
         /// <summary>
         /// Strongly-typed array of LuceneServerIndex objects as defined in 
         /// a configuration section.
         /// </summary>
-		public LuceneServerIndex[] LuceneServerIndexArray
-		{
-			get {return this._arLuceneServerIndexArray;}
-		}
+        public LuceneServerIndex[] LuceneServerIndexArray
+        {
+            get {return this._arLuceneServerIndexArray;}
+        }
 
         /// <summary>
         /// A default Port to be assigned to all defined LuceneServerIndex objects.
         /// This value can be overridden for a specific LuceneServerIndex.
         /// </summary>
-		public int Port
-		{
-			get {return this._intPort;}
-		}
-	}
+        public int Port
+        {
+            get {return this._intPort;}
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Enumerations.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Enumerations.cs b/src/contrib/DistributedSearch/Distributed/Enumerations.cs
index 7e86646..805800e 100644
--- a/src/contrib/DistributedSearch/Distributed/Enumerations.cs
+++ b/src/contrib/DistributedSearch/Distributed/Enumerations.cs
@@ -23,23 +23,23 @@ namespace Lucene.Net.Distributed
     /// <summary>
     /// Specifies the location of a DistributedSearcher
     /// </summary>
-	public enum SearchMethod
-	{
-		Local		= 0,
-		Distributed	= 1,
-		Undefined	= 2
-	}
+    public enum SearchMethod
+    {
+        Local        = 0,
+        Distributed    = 1,
+        Undefined    = 2
+    }
 
     /// <summary>
     /// Specifies the type of Field in an IndexDocument
     /// </summary>
-	public enum FieldStorageType
-	{
-		Keyword		= 1,
-		UnIndexed	= 2,
-		UnStored	= 3,
-		Text		= 4
-	}
+    public enum FieldStorageType
+    {
+        Keyword        = 1,
+        UnIndexed    = 2,
+        UnStored    = 3,
+        Text        = 4
+    }
 
     /// <summary>
     /// Specifies the type of action for an IndexSet to take when applying changes to an index
@@ -54,12 +54,12 @@ namespace Lucene.Net.Distributed
     /// <summary>
     /// Specifies the type of Analyzer to use in creation of an IndexDocument
     /// </summary>
-	public enum AnalyzerType
-	{
-		StandardAnalyzer			= 0,
-		SimpleAnalyzer				= 1,
-		WhitespaceAnalyzer			= 2,
-		StopAnalyzer				= 3
-	}
+    public enum AnalyzerType
+    {
+        StandardAnalyzer            = 0,
+        SimpleAnalyzer                = 1,
+        WhitespaceAnalyzer            = 2,
+        StopAnalyzer                = 3
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/DeleteIndexDocument.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/DeleteIndexDocument.cs b/src/contrib/DistributedSearch/Distributed/Indexing/DeleteIndexDocument.cs
index b3d40f6..8186b36 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/DeleteIndexDocument.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/DeleteIndexDocument.cs
@@ -22,14 +22,14 @@ using Lucene.Net.Distributed;
 
 namespace Lucene.Net.Distributed.Indexing
 {
-	[Serializable]
-	public class DeleteIndexDocument: IndexDocument
-	{
+    [Serializable]
+    public class DeleteIndexDocument: IndexDocument
+    {
 
-		public DeleteIndexDocument(int iRecordId)
-			: base(iRecordId)
-		{
-		}
+        public DeleteIndexDocument(int iRecordId)
+            : base(iRecordId)
+        {
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/FileNameComparer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/FileNameComparer.cs b/src/contrib/DistributedSearch/Distributed/Indexing/FileNameComparer.cs
index 379d195..24eb329 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/FileNameComparer.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/FileNameComparer.cs
@@ -21,24 +21,24 @@ using System.IO;
 
 namespace Lucene.Net.Distributed.Indexing
 {
-	/// <summary>
-	/// Summary description for FileNameComparer.
-	/// </summary>
-	public class FileNameComparer : IComparer
-	{
+    /// <summary>
+    /// Summary description for FileNameComparer.
+    /// </summary>
+    public class FileNameComparer : IComparer
+    {
 
-		public int Compare(object x, object y)
-		{
-			if ((x is FileInfo) && (y is FileInfo))
-			{
-				FileInfo fX = (FileInfo)x;
-				FileInfo fY = (FileInfo)y;
-				return fX.Name.CompareTo(fY.Name);
-			}
-			else
-			{
-				return 0;
-			}
-		}
-	}
+        public int Compare(object x, object y)
+        {
+            if ((x is FileInfo) && (y is FileInfo))
+            {
+                FileInfo fX = (FileInfo)x;
+                FileInfo fY = (FileInfo)y;
+                return fX.Name.CompareTo(fY.Name);
+            }
+            else
+            {
+                return 0;
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/IndexDocument.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/IndexDocument.cs b/src/contrib/DistributedSearch/Distributed/Indexing/IndexDocument.cs
index 83a7295..a8c20c8 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/IndexDocument.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/IndexDocument.cs
@@ -28,119 +28,119 @@ using Lucene.Net.Distributed;
 namespace Lucene.Net.Distributed.Indexing
 {
 
-	/// <summary>
-	/// Base class representing a record to be added to a Lucene index.
+    /// <summary>
+    /// Base class representing a record to be added to a Lucene index.
     /// <para>
     /// IndexDocument contains a RecordId and a Lucene.Net.Document. The RecordId
     /// is interrogated to determine which index to add the associated 
     /// Lucene.Net.Document.
     /// </para>
-	/// </summary>
-	[Serializable]
-	public abstract class IndexDocument
-	{
-		#region Variables
-		protected Document _oDocument;
-		protected int _intRecordId;
-		public static BinaryFormatter Formatter = new BinaryFormatter();
+    /// </summary>
+    [Serializable]
+    public abstract class IndexDocument
+    {
+        #region Variables
+        protected Document _oDocument;
+        protected int _intRecordId;
+        public static BinaryFormatter Formatter = new BinaryFormatter();
         private static string filepath = (ConfigurationManager.AppSettings["IndexDocumentPath"] != null ? ConfigurationManager.AppSettings["IndexDocumentPath"] : "");
-		private static string endwhack = (filepath.EndsWith(@"\") ? "" : @"\");
-		private DateTime _eDateTime;
-		#endregion
+        private static string endwhack = (filepath.EndsWith(@"\") ? "" : @"\");
+        private DateTime _eDateTime;
+        #endregion
 
-		#region Constructors
+        #region Constructors
         /// <summary>
         /// Empty public constructor.
         /// </summary>
-		public IndexDocument()
-		{
-		}
+        public IndexDocument()
+        {
+        }
 
         /// <summary>
         /// Base constructor accepting only a RecordId. Useful for classes that 
         /// will have no associated Document, i.e. deletes.
         /// </summary>
         /// <param name="iRecordId">The source recordId (see also <seealso cref="#">IndexSet.IdColumn</seealso>) </param>
-		public IndexDocument(int iRecordId)
-		{
-			this._intRecordId = iRecordId;
-			this._oDocument = new Document();
-			this._eDateTime = DateTime.Now;
-		}
+        public IndexDocument(int iRecordId)
+        {
+            this._intRecordId = iRecordId;
+            this._oDocument = new Document();
+            this._eDateTime = DateTime.Now;
+        }
 
-		public IndexDocument(Document oDocument, int iRecordId)
-		{
-			this._oDocument = oDocument;
-			this._intRecordId = iRecordId;
-			this._eDateTime = DateTime.Now;
-		}
+        public IndexDocument(Document oDocument, int iRecordId)
+        {
+            this._oDocument = oDocument;
+            this._intRecordId = iRecordId;
+            this._eDateTime = DateTime.Now;
+        }
 
-		#endregion
+        #endregion
 
-		#region Properties
-		public Document Document
-		{
-			get {return this._oDocument;}
-		}
+        #region Properties
+        public Document Document
+        {
+            get {return this._oDocument;}
+        }
 
-		public int RecordId
-		{
-			get {return this._intRecordId;}
-		}
+        public int RecordId
+        {
+            get {return this._intRecordId;}
+        }
 
         public virtual Analyzer GetAnalyzer()
         {
             return null;
         }
 
-		public string FileName
-		{
-			get { return Environment.MachineName + "_" + this.GetType().ToString() + "_" + this.RecordId.ToString() + "_" + this.DateTime.Ticks.ToString() + ".bin"; }
-		}
-		private DateTime DateTime
-		{
-			get { return this._eDateTime; }
-		}
+        public string FileName
+        {
+            get { return Environment.MachineName + "_" + this.GetType().ToString() + "_" + this.RecordId.ToString() + "_" + this.DateTime.Ticks.ToString() + ".bin"; }
+        }
+        private DateTime DateTime
+        {
+            get { return this._eDateTime; }
+        }
 
-		#endregion
+        #endregion
 
-		#region Methods
-		public void Save()
-		{
-			try
-			{
-				FileStream fs = File.Open(filepath + endwhack + this.FileName, FileMode.Create, FileAccess.ReadWrite);
-				IndexDocument.Formatter.Serialize(fs, this);
-				fs.Close();
-			}
-			catch (SerializationException se)
-			{
-				throw (se);
-			}
-			catch (NullReferenceException nre)
-			{
-				throw (nre);
-			}
-		}
-		public void Save(string filePath)
-		{
-			try
-			{
-				FileStream fs = File.Open(filePath + endwhack + this.FileName, FileMode.Create, FileAccess.ReadWrite);
-				IndexDocument.Formatter.Serialize(fs, this);
-				fs.Close();
-			}
-			catch (SerializationException se)
-			{
-				throw (se);
-			}
-			catch (NullReferenceException nre)
-			{
-				throw (nre);
-			}
-		}
-		#endregion
+        #region Methods
+        public void Save()
+        {
+            try
+            {
+                FileStream fs = File.Open(filepath + endwhack + this.FileName, FileMode.Create, FileAccess.ReadWrite);
+                IndexDocument.Formatter.Serialize(fs, this);
+                fs.Close();
+            }
+            catch (SerializationException se)
+            {
+                throw (se);
+            }
+            catch (NullReferenceException nre)
+            {
+                throw (nre);
+            }
+        }
+        public void Save(string filePath)
+        {
+            try
+            {
+                FileStream fs = File.Open(filePath + endwhack + this.FileName, FileMode.Create, FileAccess.ReadWrite);
+                IndexDocument.Formatter.Serialize(fs, this);
+                fs.Close();
+            }
+            catch (SerializationException se)
+            {
+                throw (se);
+            }
+            catch (NullReferenceException nre)
+            {
+                throw (nre);
+            }
+        }
+        #endregion
 
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/IndexSet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSet.cs b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSet.cs
index 01dac8e..3638051 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSet.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSet.cs
@@ -52,43 +52,43 @@ namespace Lucene.Net.Distributed.Indexing
     /// </code>
     /// </summary>
     public class IndexSet
-	{
-		#region Variables
-		private int _intId = -1;
-		private string _strLocalPath;
-		private string _strIdColumn;
-		private int _intBottomId;
-		private int _intTopId;
-		private CurrentIndex _oCurrentIndex;
-		private IndexAction _eIndexAction=IndexAction.NoAction;
-		private AnalyzerType _eAnalyzerType=AnalyzerType.StandardAnalyzer;
-		private Hashtable _htDocuments = new Hashtable();
-		private Hashtable _htIndexDocuments = new Hashtable();
-		private List<string> _alFileSystemDocuments = new List<string>();
-		#endregion
+    {
+        #region Variables
+        private int _intId = -1;
+        private string _strLocalPath;
+        private string _strIdColumn;
+        private int _intBottomId;
+        private int _intTopId;
+        private CurrentIndex _oCurrentIndex;
+        private IndexAction _eIndexAction=IndexAction.NoAction;
+        private AnalyzerType _eAnalyzerType=AnalyzerType.StandardAnalyzer;
+        private Hashtable _htDocuments = new Hashtable();
+        private Hashtable _htIndexDocuments = new Hashtable();
+        private List<string> _alFileSystemDocuments = new List<string>();
+        #endregion
 
-		#region Constructors
+        #region Constructors
         /// <summary>
         /// Public constructor for IndexSet. An IndexSet is defined in XML configuration 
         /// and is loaded via a custom configuration handler.
         /// </summary>
         /// <param name="node">XmlNode definition for a given IndexSet</param>
         public IndexSet(XmlNode node)
-		{
-			this.LoadValues(node);
-		}
+        {
+            this.LoadValues(node);
+        }
 
-		#endregion
+        #endregion
 
-		#region Internal voids
+        #region Internal voids
         /// <summary>
         /// Internal load method called from the constructor. Loads underlying values
         /// based on Xml configuration.
         /// </summary>
         /// <param name="node">XmlNode definition for a given IndexSet</param>
-		internal void LoadValues(XmlNode node)
-		{
-			XmlAttributeCollection attributeCollection = node.Attributes;
+        internal void LoadValues(XmlNode node)
+        {
+            XmlAttributeCollection attributeCollection = node.Attributes;
             try
             {
                 this._intId = Convert.ToInt32(attributeCollection["id"].Value);
@@ -120,19 +120,19 @@ namespace Lucene.Net.Distributed.Indexing
             if (node.ChildNodes.Count==0)
                 throw new ConfigurationErrorsException("IndexSet " + this._intId.ToString() + " configuration missing " + Environment.NewLine + node.OuterXml);
 
-			foreach (XmlNode c in node.ChildNodes)
-			{
-				if (!c.HasChildNodes)
-				{
-					switch (c.Attributes["key"].Value.ToLower())
-					{
-						case "localpath":
-							this._strLocalPath = c.Attributes["value"].Value;
-							break;
-						case "idcolumn":
-							this._strIdColumn = c.Attributes["value"].Value;
-							break;
-						case "bottomid":
+            foreach (XmlNode c in node.ChildNodes)
+            {
+                if (!c.HasChildNodes)
+                {
+                    switch (c.Attributes["key"].Value.ToLower())
+                    {
+                        case "localpath":
+                            this._strLocalPath = c.Attributes["value"].Value;
+                            break;
+                        case "idcolumn":
+                            this._strIdColumn = c.Attributes["value"].Value;
+                            break;
+                        case "bottomid":
                             try
                             {
                                 this._intBottomId = Convert.ToInt32(c.Attributes["value"].Value);
@@ -141,8 +141,8 @@ namespace Lucene.Net.Distributed.Indexing
                             {
                                 throw new ConfigurationErrorsException("IndexSet " + this._intId.ToString() + " bottomid invalid: " + Environment.NewLine + node.OuterXml);
                             }
-							break;
-						case "topid":
+                            break;
+                        case "topid":
                             try
                             {
                                 this._intTopId = Convert.ToInt32(c.Attributes["value"].Value);
@@ -152,34 +152,34 @@ namespace Lucene.Net.Distributed.Indexing
                                 throw new ConfigurationErrorsException("IndexSet " + this._intId.ToString() + " topid invalid: " + Environment.NewLine + node.OuterXml);
                             }
                             break;
-					}
-				}
-				else
-				{
-					switch(c.Name.ToLower())
-					{
-						case "copy":
-							if (this._strLocalPath!=null)
-								LoadCopy(c,this._strLocalPath);
-							else
-								LoadCopy(c,node);
-							break;
-					}
-				}
-			}
+                    }
+                }
+                else
+                {
+                    switch(c.Name.ToLower())
+                    {
+                        case "copy":
+                            if (this._strLocalPath!=null)
+                                LoadCopy(c,this._strLocalPath);
+                            else
+                                LoadCopy(c,node);
+                            break;
+                    }
+                }
+            }
             this.CheckValidSet(node);
 
-		}
+        }
 
-		internal void LoadCopy(XmlNode node, string localpath)
-		{
-			this._oCurrentIndex = new CurrentIndex(node,localpath);
-		}
+        internal void LoadCopy(XmlNode node, string localpath)
+        {
+            this._oCurrentIndex = new CurrentIndex(node,localpath);
+        }
 
-		internal void LoadCopy(XmlNode node, XmlNode masternode)
-		{
-			foreach (XmlNode c in node.ChildNodes)
-			{
+        internal void LoadCopy(XmlNode node, XmlNode masternode)
+        {
+            foreach (XmlNode c in node.ChildNodes)
+            {
                 if (c.Attributes["key"] != null)
                 {
                     switch (c.Attributes["key"].Value.ToLower())
@@ -189,8 +189,8 @@ namespace Lucene.Net.Distributed.Indexing
                             break;
                     }
                 }
-			}
-		}
+            }
+        }
 
         private void CheckValidSet(XmlNode node)
         {
@@ -199,82 +199,82 @@ namespace Lucene.Net.Distributed.Indexing
             if (this._strIdColumn==null) throw new ConfigurationErrorsException("IndexSet " + this._intId.ToString() + " IdColumn undefined: " + Environment.NewLine + node.OuterXml);
         }
 
-		#endregion
+        #endregion
 
-		#region Properties
+        #region Properties
         /// <summary>
         /// Unique identifier for an IndexSet within a configuration of multiple IndexSet objects
         /// </summary>
-		public int Id
-		{
-			get {return this._intId;}
-		}
+        public int Id
+        {
+            get {return this._intId;}
+        }
 
         /// <summary>
         /// Enumeration dictating the type of updates to be applied to the underlying master index
         /// </summary>
-		public IndexAction IndexAction
-		{
-			get {return this._eIndexAction;}
-		}
+        public IndexAction IndexAction
+        {
+            get {return this._eIndexAction;}
+        }
 
         /// <summary>
         /// Enumeration dictating the type of Analyzer to be applied to IndexDocuments in update scenarios
         /// </summary>
-		public AnalyzerType AnalyzerType
-		{
-			get {return this._eAnalyzerType;}
-		}
+        public AnalyzerType AnalyzerType
+        {
+            get {return this._eAnalyzerType;}
+        }
 
         /// <summary>
         /// The Analyzer object used in application of IndexDocument updates 
         /// </summary>
-		public Analyzer Analzyer
-		{
-			get {return CurrentIndex.GetAnalyzer(this._eAnalyzerType);}
-		}
+        public Analyzer Analzyer
+        {
+            get {return CurrentIndex.GetAnalyzer(this._eAnalyzerType);}
+        }
 
         /// <summary>
         /// Filesystem path to the master index
         /// </summary>
-		public string LocalPath
-		{
-			get {return this._strLocalPath;}
-		}
+        public string LocalPath
+        {
+            get {return this._strLocalPath;}
+        }
 
         /// <summary>
         /// String name representing the unique key for the given record in the index
         /// </summary>
-		public string IdColumn
-		{
-			get {return this._strIdColumn;}
-		}
+        public string IdColumn
+        {
+            get {return this._strIdColumn;}
+        }
 
         /// <summary>
         /// Minimum IdColumn value for a record in this index
         /// </summary>
-		public int BottomId
-		{
-			get {return this._intBottomId;}
-		}
+        public int BottomId
+        {
+            get {return this._intBottomId;}
+        }
 
         /// <summary>
         /// Maximum IdColumn value for a record in this index
         /// </summary>
-		public int TopId
-		{
-			get {return this._intTopId;}
-		}
+        public int TopId
+        {
+            get {return this._intTopId;}
+        }
 
         /// <summary>
         /// CurrentIndex object associated with this IndexSet.  The CurrentIndex is used
         /// in determining index settings and maintenance as well as managing physical file updates
         /// for index updates.
         /// </summary>
-		public CurrentIndex CurrentIndex
-		{
-			get {return this._oCurrentIndex;}
-		}
+        public CurrentIndex CurrentIndex
+        {
+            get {return this._oCurrentIndex;}
+        }
 
         /// <summary>
         /// List of filesystem paths representing files for the master index
@@ -318,21 +318,21 @@ namespace Lucene.Net.Distributed.Indexing
         /// </summary>
         /// <returns></returns>
         public NameValueCollection GetDeletionCollection()
-		{
-			NameValueCollection nvc = new NameValueCollection(this._htDocuments.Count);
-			foreach(DictionaryEntry de in this._htIndexDocuments)
-				nvc.Add(this.IdColumn, ((IndexDocument)de.Value).RecordId.ToString());
-			return nvc;
-		}
+        {
+            NameValueCollection nvc = new NameValueCollection(this._htDocuments.Count);
+            foreach(DictionaryEntry de in this._htIndexDocuments)
+                nvc.Add(this.IdColumn, ((IndexDocument)de.Value).RecordId.ToString());
+            return nvc;
+        }
 
         /// <summary>
         /// Clears the contents of Documents and IndexDocuments
         /// </summary>
-		public void Reset()
-		{
-			this._htIndexDocuments.Clear();
-			this._htDocuments.Clear();
-		}
+        public void Reset()
+        {
+            this._htIndexDocuments.Clear();
+            this._htDocuments.Clear();
+        }
 
         /// <summary>
         /// Executes a Lucene.Net optimization against the referenced index.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/IndexSetConfigurationHandler.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSetConfigurationHandler.cs b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSetConfigurationHandler.cs
index dab927b..2ae90fd 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSetConfigurationHandler.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSetConfigurationHandler.cs
@@ -27,20 +27,20 @@ namespace Lucene.Net.Distributed.Indexing
     /// as managed by the LuceneUpdater windows service.
     /// </summary>
     public class IndexSetConfigurationHandler : IConfigurationSectionHandler
-	{
-		public IndexSetConfigurationHandler()
-		{
-		}
+    {
+        public IndexSetConfigurationHandler()
+        {
+        }
 
-		#region IConfigurationSectionHandler Members
+        #region IConfigurationSectionHandler Members
 
-		public object Create(object parent, object configContext, XmlNode section)
-		{
+        public object Create(object parent, object configContext, XmlNode section)
+        {
             IndexSets isConfig = new IndexSets();
             isConfig.LoadIndexSetArray(section);
             return isConfig;
-		}
+        }
 
-		#endregion
-	}
+        #endregion
+    }
 }


[29/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/ASCIIFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/ASCIIFoldingFilter.cs b/src/core/Analysis/ASCIIFoldingFilter.cs
index 6133870..aaf023e 100644
--- a/src/core/Analysis/ASCIIFoldingFilter.cs
+++ b/src/core/Analysis/ASCIIFoldingFilter.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -20,16 +20,16 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> This class converts alphabetic, numeric, and symbolic Unicode characters
-	/// which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
-	/// block) into their ASCII equivalents, if one exists.
-	/// 
-	/// Characters from the following Unicode blocks are converted; however, only
-	/// those characters with reasonable ASCII alternatives are converted:
-	/// 
-	/// <list type="bullet">
-	/// <item>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></item>
+    
+    /// <summary> This class converts alphabetic, numeric, and symbolic Unicode characters
+    /// which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
+    /// block) into their ASCII equivalents, if one exists.
+    /// 
+    /// Characters from the following Unicode blocks are converted; however, only
+    /// those characters with reasonable ASCII alternatives are converted:
+    /// 
+    /// <list type="bullet">
+    /// <item>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></item>
     /// <item>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></item>
     /// <item>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></item>
     /// <item>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></item>
@@ -45,3241 +45,3241 @@ namespace Lucene.Net.Analysis
     /// <item>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></item>
     /// <item>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></item>
     /// <item>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></item>
-	/// </list>
-	/// 
-	/// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
-	/// 
-	/// The set of character conversions supported by this class is a superset of
-	/// those supported by Lucene's <see cref="ISOLatin1AccentFilter" /> which strips
-	/// accents from Latin1 characters.  For example, '&#192;' will be replaced by
-	/// 'a'.
-	/// </summary>
-	public sealed class ASCIIFoldingFilter : TokenFilter
-	{
-		public ASCIIFoldingFilter(TokenStream input):base(input)
-		{
+    /// </list>
+    /// 
+    /// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
+    /// 
+    /// The set of character conversions supported by this class is a superset of
+    /// those supported by Lucene's <see cref="ISOLatin1AccentFilter" /> which strips
+    /// accents from Latin1 characters.  For example, '&#192;' will be replaced by
+    /// 'a'.
+    /// </summary>
+    public sealed class ASCIIFoldingFilter : TokenFilter
+    {
+        public ASCIIFoldingFilter(TokenStream input):base(input)
+        {
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		private char[] output = new char[512];
-		private int outputPos;
-		private ITermAttribute termAtt;
-		
-		public override bool IncrementToken()
-		{
-			if (input.IncrementToken())
-			{
-				char[] buffer = termAtt.TermBuffer();
-				int length = termAtt.TermLength();
-				
-				// If no characters actually require rewriting then we
-				// just return token as-is:
-				for (int i = 0; i < length; ++i)
-				{
-					char c = buffer[i];
-					if (c >= '\u0080')
-					{
-						FoldToASCII(buffer, length);
-						termAtt.SetTermBuffer(output, 0, outputPos);
-						break;
-					}
-				}
-				return true;
-			}
-			else
-			{
-				return false;
-			}
-		}
-		
-		/// <summary> Converts characters above ASCII to their ASCII equivalents.  For example,
-		/// accents are removed from accented characters.
-		/// </summary>
-		/// <param name="input">The string to fold
-		/// </param>
-		/// <param name="length">The number of characters in the input string
-		/// </param>
-		public void  FoldToASCII(char[] input, int length)
-		{
-			// Worst-case length required:
-			int maxSizeNeeded = 4 * length;
-			if (output.Length < maxSizeNeeded)
-			{
-				output = new char[ArrayUtil.GetNextSize(maxSizeNeeded)];
-			}
-			
-			outputPos = 0;
-			
-			for (int pos = 0; pos < length; ++pos)
-			{
-				char c = input[pos];
-				
-				// Quick test: if it's not in range then just keep current character
-				if (c < '\u0080')
-				{
-					output[outputPos++] = c;
-				}
-				else
-				{
-					switch (c)
-					{
-						
-						case '\u00C0': 
-						// À  [LATIN CAPITAL LETTER A WITH GRAVE]
-						case '\u00C1': 
-						// �  [LATIN CAPITAL LETTER A WITH ACUTE]
-						case '\u00C2': 
-						// Â  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX]
-						case '\u00C3': 
-						// Ã  [LATIN CAPITAL LETTER A WITH TILDE]
-						case '\u00C4': 
-						// Ä  [LATIN CAPITAL LETTER A WITH DIAERESIS]
-						case '\u00C5': 
-						// Ã…  [LATIN CAPITAL LETTER A WITH RING ABOVE]
-						case '\u0100': 
-						// Ā  [LATIN CAPITAL LETTER A WITH MACRON]
-						case '\u0102': 
-						// Ä‚  [LATIN CAPITAL LETTER A WITH BREVE]
-						case '\u0104': 
-						// Ä„  [LATIN CAPITAL LETTER A WITH OGONEK]
-						case '\u018F': 
-						// �  http://en.wikipedia.org/wiki/Schwa  [LATIN CAPITAL LETTER SCHWA]
-						case '\u01CD': 
-						// �  [LATIN CAPITAL LETTER A WITH CARON]
-						case '\u01DE': 
-						// Çž  [LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON]
-						case '\u01E0': 
-						// Ç   [LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON]
-						case '\u01FA': 
-						// Ǻ  [LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE]
-						case '\u0200': 
-						// Ȁ  [LATIN CAPITAL LETTER A WITH DOUBLE GRAVE]
-						case '\u0202': 
-						// È‚  [LATIN CAPITAL LETTER A WITH INVERTED BREVE]
-						case '\u0226': 
-						// Ȧ  [LATIN CAPITAL LETTER A WITH DOT ABOVE]
-						case '\u023A': 
-						// Ⱥ  [LATIN CAPITAL LETTER A WITH STROKE]
-						case '\u1D00': 
-						// á´€  [LATIN LETTER SMALL CAPITAL A]
-						case '\u1E00': 
-						// Ḁ  [LATIN CAPITAL LETTER A WITH RING BELOW]
-						case '\u1EA0': 
-						// Ạ  [LATIN CAPITAL LETTER A WITH DOT BELOW]
-						case '\u1EA2': 
-						// Ả  [LATIN CAPITAL LETTER A WITH HOOK ABOVE]
-						case '\u1EA4': 
-						// Ấ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE]
-						case '\u1EA6': 
-						// Ầ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE]
-						case '\u1EA8': 
-						// Ẩ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1EAA': 
-						// Ẫ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE]
-						case '\u1EAC': 
-						// Ậ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u1EAE': 
-						// Ắ  [LATIN CAPITAL LETTER A WITH BREVE AND ACUTE]
-						case '\u1EB0': 
-						// Ằ  [LATIN CAPITAL LETTER A WITH BREVE AND GRAVE]
-						case '\u1EB2': 
-						// Ẳ  [LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE]
-						case '\u1EB4': 
-						// Ẵ  [LATIN CAPITAL LETTER A WITH BREVE AND TILDE]
-						case '\u1EB6': 
-						// Ặ  [LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW]
-						case '\u24B6': 
-						// â’¶  [CIRCLED LATIN CAPITAL LETTER A]
-						case '\uFF21':  // A  [FULLWIDTH LATIN CAPITAL LETTER A]
-							output[outputPos++] = 'A';
-							break;
-						
-						case '\u00E0': 
-						// à  [LATIN SMALL LETTER A WITH GRAVE]
-						case '\u00E1': 
-						// á  [LATIN SMALL LETTER A WITH ACUTE]
-						case '\u00E2': 
-						// â  [LATIN SMALL LETTER A WITH CIRCUMFLEX]
-						case '\u00E3': 
-						// ã  [LATIN SMALL LETTER A WITH TILDE]
-						case '\u00E4': 
-						// ä  [LATIN SMALL LETTER A WITH DIAERESIS]
-						case '\u00E5': 
-						// å  [LATIN SMALL LETTER A WITH RING ABOVE]
-						case '\u0101': 
-						// �  [LATIN SMALL LETTER A WITH MACRON]
-						case '\u0103': 
-						// ă  [LATIN SMALL LETTER A WITH BREVE]
-						case '\u0105': 
-						// Ä…  [LATIN SMALL LETTER A WITH OGONEK]
-						case '\u01CE': 
-						// ÇŽ  [LATIN SMALL LETTER A WITH CARON]
-						case '\u01DF': 
-						// ÇŸ  [LATIN SMALL LETTER A WITH DIAERESIS AND MACRON]
-						case '\u01E1': 
-						// Ç¡  [LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON]
-						case '\u01FB': 
-						// Ç»  [LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE]
-						case '\u0201': 
-						// �  [LATIN SMALL LETTER A WITH DOUBLE GRAVE]
-						case '\u0203': 
-						// ȃ  [LATIN SMALL LETTER A WITH INVERTED BREVE]
-						case '\u0227': 
-						// ȧ  [LATIN SMALL LETTER A WITH DOT ABOVE]
-						case '\u0250': 
-						// �  [LATIN SMALL LETTER TURNED A]
-						case '\u0259': 
-						// É™  [LATIN SMALL LETTER SCHWA]
-						case '\u025A': 
-						// Éš  [LATIN SMALL LETTER SCHWA WITH HOOK]
-						case '\u1D8F': 
-						// �  [LATIN SMALL LETTER A WITH RETROFLEX HOOK]
-						case '\u1D95': 
-						// ᶕ  [LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK]
-						case '\u1E01': 
-						// ạ  [LATIN SMALL LETTER A WITH RING BELOW]
-						case '\u1E9A': 
-						// ả  [LATIN SMALL LETTER A WITH RIGHT HALF RING]
-						case '\u1EA1': 
-						// ạ  [LATIN SMALL LETTER A WITH DOT BELOW]
-						case '\u1EA3': 
-						// ả  [LATIN SMALL LETTER A WITH HOOK ABOVE]
-						case '\u1EA5': 
-						// ấ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE]
-						case '\u1EA7': 
-						// ầ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE]
-						case '\u1EA9': 
-						// ẩ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1EAB': 
-						// ẫ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE]
-						case '\u1EAD': 
-						// ậ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u1EAF': 
-						// ắ  [LATIN SMALL LETTER A WITH BREVE AND ACUTE]
-						case '\u1EB1': 
-						// ằ  [LATIN SMALL LETTER A WITH BREVE AND GRAVE]
-						case '\u1EB3': 
-						// ẳ  [LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE]
-						case '\u1EB5': 
-						// ẵ  [LATIN SMALL LETTER A WITH BREVE AND TILDE]
-						case '\u1EB7': 
-						// ặ  [LATIN SMALL LETTER A WITH BREVE AND DOT BELOW]
-						case '\u2090': 
-						// �  [LATIN SUBSCRIPT SMALL LETTER A]
-						case '\u2094': 
-						// �?  [LATIN SUBSCRIPT SMALL LETTER SCHWA]
-						case '\u24D0': 
-						// �  [CIRCLED LATIN SMALL LETTER A]
-						case '\u2C65': 
-						// â±¥  [LATIN SMALL LETTER A WITH STROKE]
-						case '\u2C6F': 
-						// Ɐ  [LATIN CAPITAL LETTER TURNED A]
-						case '\uFF41':  // �  [FULLWIDTH LATIN SMALL LETTER A]
-							output[outputPos++] = 'a';
-							break;
-						
-						case '\uA732':  // Ꜳ  [LATIN CAPITAL LETTER AA]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'A';
-							break;
-						
-						case '\u00C6': 
-						// Æ  [LATIN CAPITAL LETTER AE]
-						case '\u01E2': 
-						// Ǣ  [LATIN CAPITAL LETTER AE WITH MACRON]
-						case '\u01FC': 
-						// Ǽ  [LATIN CAPITAL LETTER AE WITH ACUTE]
-						case '\u1D01':  // á´�  [LATIN LETTER SMALL CAPITAL AE]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\uA734':  // Ꜵ  [LATIN CAPITAL LETTER AO]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'O';
-							break;
-						
-						case '\uA736':  // Ꜷ  [LATIN CAPITAL LETTER AU]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'U';
-							break;
-						
-						case '\uA738': 
-						// Ꜹ  [LATIN CAPITAL LETTER AV]
-						case '\uA73A':  // Ꜻ  [LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'V';
-							break;
-						
-						case '\uA73C':  // Ꜽ  [LATIN CAPITAL LETTER AY]
-							output[outputPos++] = 'A';
-							output[outputPos++] = 'Y';
-							break;
-						
-						case '\u249C':  // ⒜  [PARENTHESIZED LATIN SMALL LETTER A]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'a';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\uA733':  // ꜳ  [LATIN SMALL LETTER AA]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'a';
-							break;
-						
-						case '\u00E6': 
-						// æ  [LATIN SMALL LETTER AE]
-						case '\u01E3': 
-						// ǣ  [LATIN SMALL LETTER AE WITH MACRON]
-						case '\u01FD': 
-						// ǽ  [LATIN SMALL LETTER AE WITH ACUTE]
-						case '\u1D02':  // á´‚  [LATIN SMALL LETTER TURNED AE]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\uA735':  // ꜵ  [LATIN SMALL LETTER AO]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'o';
-							break;
-						
-						case '\uA737':  // ꜷ  [LATIN SMALL LETTER AU]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'u';
-							break;
-						
-						case '\uA739': 
-						// ꜹ  [LATIN SMALL LETTER AV]
-						case '\uA73B':  // ꜻ  [LATIN SMALL LETTER AV WITH HORIZONTAL BAR]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'v';
-							break;
-						
-						case '\uA73D':  // ꜽ  [LATIN SMALL LETTER AY]
-							output[outputPos++] = 'a';
-							output[outputPos++] = 'y';
-							break;
-						
-						case '\u0181': 
-						// �  [LATIN CAPITAL LETTER B WITH HOOK]
-						case '\u0182': 
-						// Æ‚  [LATIN CAPITAL LETTER B WITH TOPBAR]
-						case '\u0243': 
-						// Ƀ  [LATIN CAPITAL LETTER B WITH STROKE]
-						case '\u0299': 
-						// Ê™  [LATIN LETTER SMALL CAPITAL B]
-						case '\u1D03': 
-						// á´ƒ  [LATIN LETTER SMALL CAPITAL BARRED B]
-						case '\u1E02': 
-						// Ḃ  [LATIN CAPITAL LETTER B WITH DOT ABOVE]
-						case '\u1E04': 
-						// Ḅ  [LATIN CAPITAL LETTER B WITH DOT BELOW]
-						case '\u1E06': 
-						// Ḇ  [LATIN CAPITAL LETTER B WITH LINE BELOW]
-						case '\u24B7': 
-						// â’·  [CIRCLED LATIN CAPITAL LETTER B]
-						case '\uFF22':  // ï¼¢  [FULLWIDTH LATIN CAPITAL LETTER B]
-							output[outputPos++] = 'B';
-							break;
-						
-						case '\u0180': 
-						// ƀ  [LATIN SMALL LETTER B WITH STROKE]
-						case '\u0183': 
-						// ƃ  [LATIN SMALL LETTER B WITH TOPBAR]
-						case '\u0253': 
-						// É“  [LATIN SMALL LETTER B WITH HOOK]
-						case '\u1D6C': 
-						// ᵬ  [LATIN SMALL LETTER B WITH MIDDLE TILDE]
-						case '\u1D80': 
-						// ᶀ  [LATIN SMALL LETTER B WITH PALATAL HOOK]
-						case '\u1E03': 
-						// ḃ  [LATIN SMALL LETTER B WITH DOT ABOVE]
-						case '\u1E05': 
-						// ḅ  [LATIN SMALL LETTER B WITH DOT BELOW]
-						case '\u1E07': 
-						// ḇ  [LATIN SMALL LETTER B WITH LINE BELOW]
-						case '\u24D1': 
-						// â“‘  [CIRCLED LATIN SMALL LETTER B]
-						case '\uFF42':  // b  [FULLWIDTH LATIN SMALL LETTER B]
-							output[outputPos++] = 'b';
-							break;
-						
-						case '\u249D':  // â’�  [PARENTHESIZED LATIN SMALL LETTER B]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'b';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u00C7': 
-						// Ç  [LATIN CAPITAL LETTER C WITH CEDILLA]
-						case '\u0106': 
-						// Ć  [LATIN CAPITAL LETTER C WITH ACUTE]
-						case '\u0108': 
-						// Ĉ  [LATIN CAPITAL LETTER C WITH CIRCUMFLEX]
-						case '\u010A': 
-						// ÄŠ  [LATIN CAPITAL LETTER C WITH DOT ABOVE]
-						case '\u010C': 
-						// Č  [LATIN CAPITAL LETTER C WITH CARON]
-						case '\u0187': 
-						// Ƈ  [LATIN CAPITAL LETTER C WITH HOOK]
-						case '\u023B': 
-						// È»  [LATIN CAPITAL LETTER C WITH STROKE]
-						case '\u0297': 
-						// Ê—  [LATIN LETTER STRETCHED C]
-						case '\u1D04': 
-						// á´„  [LATIN LETTER SMALL CAPITAL C]
-						case '\u1E08': 
-						// Ḉ  [LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE]
-						case '\u24B8': 
-						// â’¸  [CIRCLED LATIN CAPITAL LETTER C]
-						case '\uFF23':  // ï¼£  [FULLWIDTH LATIN CAPITAL LETTER C]
-							output[outputPos++] = 'C';
-							break;
-						
-						case '\u00E7': 
-						// ç  [LATIN SMALL LETTER C WITH CEDILLA]
-						case '\u0107': 
-						// ć  [LATIN SMALL LETTER C WITH ACUTE]
-						case '\u0109': 
-						// ĉ  [LATIN SMALL LETTER C WITH CIRCUMFLEX]
-						case '\u010B': 
-						// Ä‹  [LATIN SMALL LETTER C WITH DOT ABOVE]
-						case '\u010D': 
-						// �  [LATIN SMALL LETTER C WITH CARON]
-						case '\u0188': 
-						// ƈ  [LATIN SMALL LETTER C WITH HOOK]
-						case '\u023C': 
-						// ȼ  [LATIN SMALL LETTER C WITH STROKE]
-						case '\u0255': 
-						// É•  [LATIN SMALL LETTER C WITH CURL]
-						case '\u1E09': 
-						// ḉ  [LATIN SMALL LETTER C WITH CEDILLA AND ACUTE]
-						case '\u2184': 
-						// ↄ  [LATIN SMALL LETTER REVERSED C]
-						case '\u24D2': 
-						// â“’  [CIRCLED LATIN SMALL LETTER C]
-						case '\uA73E': 
-						// Ꜿ  [LATIN CAPITAL LETTER REVERSED C WITH DOT]
-						case '\uA73F': 
-						// ꜿ  [LATIN SMALL LETTER REVERSED C WITH DOT]
-						case '\uFF43':  // c  [FULLWIDTH LATIN SMALL LETTER C]
-							output[outputPos++] = 'c';
-							break;
-						
-						case '\u249E':  // â’ž  [PARENTHESIZED LATIN SMALL LETTER C]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'c';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u00D0': 
-						// �  [LATIN CAPITAL LETTER ETH]
-						case '\u010E': 
-						// ÄŽ  [LATIN CAPITAL LETTER D WITH CARON]
-						case '\u0110': 
-						// �  [LATIN CAPITAL LETTER D WITH STROKE]
-						case '\u0189': 
-						// Ɖ  [LATIN CAPITAL LETTER AFRICAN D]
-						case '\u018A': 
-						// ÆŠ  [LATIN CAPITAL LETTER D WITH HOOK]
-						case '\u018B': 
-						// Æ‹  [LATIN CAPITAL LETTER D WITH TOPBAR]
-						case '\u1D05': 
-						// á´…  [LATIN LETTER SMALL CAPITAL D]
-						case '\u1D06': 
-						// á´†  [LATIN LETTER SMALL CAPITAL ETH]
-						case '\u1E0A': 
-						// Ḋ  [LATIN CAPITAL LETTER D WITH DOT ABOVE]
-						case '\u1E0C': 
-						// Ḍ  [LATIN CAPITAL LETTER D WITH DOT BELOW]
-						case '\u1E0E': 
-						// Ḏ  [LATIN CAPITAL LETTER D WITH LINE BELOW]
-						case '\u1E10': 
-						// �  [LATIN CAPITAL LETTER D WITH CEDILLA]
-						case '\u1E12': 
-						// Ḓ  [LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW]
-						case '\u24B9': 
-						// â’¹  [CIRCLED LATIN CAPITAL LETTER D]
-						case '\uA779': 
-						// �  [LATIN CAPITAL LETTER INSULAR D]
-						case '\uFF24':  // D  [FULLWIDTH LATIN CAPITAL LETTER D]
-							output[outputPos++] = 'D';
-							break;
-						
-						case '\u00F0': 
-						// ð  [LATIN SMALL LETTER ETH]
-						case '\u010F': 
-						// �  [LATIN SMALL LETTER D WITH CARON]
-						case '\u0111': 
-						// Ä‘  [LATIN SMALL LETTER D WITH STROKE]
-						case '\u018C': 
-						// ƌ  [LATIN SMALL LETTER D WITH TOPBAR]
-						case '\u0221': 
-						// È¡  [LATIN SMALL LETTER D WITH CURL]
-						case '\u0256': 
-						// É–  [LATIN SMALL LETTER D WITH TAIL]
-						case '\u0257': 
-						// É—  [LATIN SMALL LETTER D WITH HOOK]
-						case '\u1D6D': 
-						// áµ­  [LATIN SMALL LETTER D WITH MIDDLE TILDE]
-						case '\u1D81': 
-						// �  [LATIN SMALL LETTER D WITH PALATAL HOOK]
-						case '\u1D91': 
-						// ᶑ  [LATIN SMALL LETTER D WITH HOOK AND TAIL]
-						case '\u1E0B': 
-						// ḋ  [LATIN SMALL LETTER D WITH DOT ABOVE]
-						case '\u1E0D': 
-						// �  [LATIN SMALL LETTER D WITH DOT BELOW]
-						case '\u1E0F': 
-						// �  [LATIN SMALL LETTER D WITH LINE BELOW]
-						case '\u1E11': 
-						// ḑ  [LATIN SMALL LETTER D WITH CEDILLA]
-						case '\u1E13': 
-						// ḓ  [LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW]
-						case '\u24D3': 
-						// â““  [CIRCLED LATIN SMALL LETTER D]
-						case '\uA77A': 
-						// �  [LATIN SMALL LETTER INSULAR D]
-						case '\uFF44':  // d  [FULLWIDTH LATIN SMALL LETTER D]
-							output[outputPos++] = 'd';
-							break;
-						
-						case '\u01C4': 
-						// Ç„  [LATIN CAPITAL LETTER DZ WITH CARON]
-						case '\u01F1':  // DZ  [LATIN CAPITAL LETTER DZ]
-							output[outputPos++] = 'D';
-							output[outputPos++] = 'Z';
-							break;
-						
-						case '\u01C5': 
-						// Ç…  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON]
-						case '\u01F2':  // Dz  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z]
-							output[outputPos++] = 'D';
-							output[outputPos++] = 'z';
-							break;
-						
-						case '\u249F':  // â’Ÿ  [PARENTHESIZED LATIN SMALL LETTER D]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'd';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0238':  // ȸ  [LATIN SMALL LETTER DB DIGRAPH]
-							output[outputPos++] = 'd';
-							output[outputPos++] = 'b';
-							break;
-						
-						case '\u01C6': 
-						// dž  [LATIN SMALL LETTER DZ WITH CARON]
-						case '\u01F3': 
-						// dz  [LATIN SMALL LETTER DZ]
-						case '\u02A3': 
-						// ʣ  [LATIN SMALL LETTER DZ DIGRAPH]
-						case '\u02A5':  // ʥ  [LATIN SMALL LETTER DZ DIGRAPH WITH CURL]
-							output[outputPos++] = 'd';
-							output[outputPos++] = 'z';
-							break;
-						
-						case '\u00C8': 
-						// È  [LATIN CAPITAL LETTER E WITH GRAVE]
-						case '\u00C9': 
-						// É  [LATIN CAPITAL LETTER E WITH ACUTE]
-						case '\u00CA': 
-						// Ê  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX]
-						case '\u00CB': 
-						// Ë  [LATIN CAPITAL LETTER E WITH DIAERESIS]
-						case '\u0112': 
-						// Ä’  [LATIN CAPITAL LETTER E WITH MACRON]
-						case '\u0114': 
-						// �?  [LATIN CAPITAL LETTER E WITH BREVE]
-						case '\u0116': 
-						// Ä–  [LATIN CAPITAL LETTER E WITH DOT ABOVE]
-						case '\u0118': 
-						// Ę  [LATIN CAPITAL LETTER E WITH OGONEK]
-						case '\u011A': 
-						// Äš  [LATIN CAPITAL LETTER E WITH CARON]
-						case '\u018E': 
-						// ÆŽ  [LATIN CAPITAL LETTER REVERSED E]
-						case '\u0190': 
-						// �  [LATIN CAPITAL LETTER OPEN E]
-						case '\u0204': 
-						// È„  [LATIN CAPITAL LETTER E WITH DOUBLE GRAVE]
-						case '\u0206': 
-						// Ȇ  [LATIN CAPITAL LETTER E WITH INVERTED BREVE]
-						case '\u0228': 
-						// Ȩ  [LATIN CAPITAL LETTER E WITH CEDILLA]
-						case '\u0246': 
-						// Ɇ  [LATIN CAPITAL LETTER E WITH STROKE]
-						case '\u1D07': 
-						// á´‡  [LATIN LETTER SMALL CAPITAL E]
-						case '\u1E14': 
-						// �?  [LATIN CAPITAL LETTER E WITH MACRON AND GRAVE]
-						case '\u1E16': 
-						// Ḗ  [LATIN CAPITAL LETTER E WITH MACRON AND ACUTE]
-						case '\u1E18': 
-						// Ḙ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW]
-						case '\u1E1A': 
-						// Ḛ  [LATIN CAPITAL LETTER E WITH TILDE BELOW]
-						case '\u1E1C': 
-						// Ḝ  [LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE]
-						case '\u1EB8': 
-						// Ẹ  [LATIN CAPITAL LETTER E WITH DOT BELOW]
-						case '\u1EBA': 
-						// Ẻ  [LATIN CAPITAL LETTER E WITH HOOK ABOVE]
-						case '\u1EBC': 
-						// Ẽ  [LATIN CAPITAL LETTER E WITH TILDE]
-						case '\u1EBE': 
-						// Ế  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE]
-						case '\u1EC0': 
-						// Ề  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE]
-						case '\u1EC2': 
-						// Ể  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1EC4': 
-						// Ễ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE]
-						case '\u1EC6': 
-						// Ệ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u24BA': 
-						// â’º  [CIRCLED LATIN CAPITAL LETTER E]
-						case '\u2C7B': 
-						// â±»  [LATIN LETTER SMALL CAPITAL TURNED E]
-						case '\uFF25':  // ï¼¥  [FULLWIDTH LATIN CAPITAL LETTER E]
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\u00E8': 
-						// è  [LATIN SMALL LETTER E WITH GRAVE]
-						case '\u00E9': 
-						// é  [LATIN SMALL LETTER E WITH ACUTE]
-						case '\u00EA': 
-						// ê  [LATIN SMALL LETTER E WITH CIRCUMFLEX]
-						case '\u00EB': 
-						// ë  [LATIN SMALL LETTER E WITH DIAERESIS]
-						case '\u0113': 
-						// Ä“  [LATIN SMALL LETTER E WITH MACRON]
-						case '\u0115': 
-						// Ä•  [LATIN SMALL LETTER E WITH BREVE]
-						case '\u0117': 
-						// Ä—  [LATIN SMALL LETTER E WITH DOT ABOVE]
-						case '\u0119': 
-						// Ä™  [LATIN SMALL LETTER E WITH OGONEK]
-						case '\u011B': 
-						// Ä›  [LATIN SMALL LETTER E WITH CARON]
-						case '\u01DD': 
-						// �  [LATIN SMALL LETTER TURNED E]
-						case '\u0205': 
-						// È…  [LATIN SMALL LETTER E WITH DOUBLE GRAVE]
-						case '\u0207': 
-						// ȇ  [LATIN SMALL LETTER E WITH INVERTED BREVE]
-						case '\u0229': 
-						// È©  [LATIN SMALL LETTER E WITH CEDILLA]
-						case '\u0247': 
-						// ɇ  [LATIN SMALL LETTER E WITH STROKE]
-						case '\u0258': 
-						// ɘ  [LATIN SMALL LETTER REVERSED E]
-						case '\u025B': 
-						// É›  [LATIN SMALL LETTER OPEN E]
-						case '\u025C': 
-						// ɜ  [LATIN SMALL LETTER REVERSED OPEN E]
-						case '\u025D': 
-						// �  [LATIN SMALL LETTER REVERSED OPEN E WITH HOOK]
-						case '\u025E': 
-						// Éž  [LATIN SMALL LETTER CLOSED REVERSED OPEN E]
-						case '\u029A': 
-						// Êš  [LATIN SMALL LETTER CLOSED OPEN E]
-						case '\u1D08': 
-						// á´ˆ  [LATIN SMALL LETTER TURNED OPEN E]
-						case '\u1D92': 
-						// ᶒ  [LATIN SMALL LETTER E WITH RETROFLEX HOOK]
-						case '\u1D93': 
-						// ᶓ  [LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK]
-						case '\u1D94': 
-						// �?  [LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK]
-						case '\u1E15': 
-						// ḕ  [LATIN SMALL LETTER E WITH MACRON AND GRAVE]
-						case '\u1E17': 
-						// ḗ  [LATIN SMALL LETTER E WITH MACRON AND ACUTE]
-						case '\u1E19': 
-						// ḙ  [LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW]
-						case '\u1E1B': 
-						// ḛ  [LATIN SMALL LETTER E WITH TILDE BELOW]
-						case '\u1E1D': 
-						// �  [LATIN SMALL LETTER E WITH CEDILLA AND BREVE]
-						case '\u1EB9': 
-						// ẹ  [LATIN SMALL LETTER E WITH DOT BELOW]
-						case '\u1EBB': 
-						// ẻ  [LATIN SMALL LETTER E WITH HOOK ABOVE]
-						case '\u1EBD': 
-						// ẽ  [LATIN SMALL LETTER E WITH TILDE]
-						case '\u1EBF': 
-						// ế  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE]
-						case '\u1EC1': 
-						// �  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE]
-						case '\u1EC3': 
-						// ể  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1EC5': 
-						// á»…  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE]
-						case '\u1EC7': 
-						// ệ  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u2091': 
-						// â‚‘  [LATIN SUBSCRIPT SMALL LETTER E]
-						case '\u24D4': 
-						// �?  [CIRCLED LATIN SMALL LETTER E]
-						case '\u2C78': 
-						// ⱸ  [LATIN SMALL LETTER E WITH NOTCH]
-						case '\uFF45':  // ï½…  [FULLWIDTH LATIN SMALL LETTER E]
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\u24A0':  // â’   [PARENTHESIZED LATIN SMALL LETTER E]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'e';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0191': 
-						// Æ‘  [LATIN CAPITAL LETTER F WITH HOOK]
-						case '\u1E1E': 
-						// Ḟ  [LATIN CAPITAL LETTER F WITH DOT ABOVE]
-						case '\u24BB': 
-						// â’»  [CIRCLED LATIN CAPITAL LETTER F]
-						case '\uA730': 
-						// ꜰ  [LATIN LETTER SMALL CAPITAL F]
-						case '\uA77B': 
-						// �  [LATIN CAPITAL LETTER INSULAR F]
-						case '\uA7FB': 
-						// ꟻ  [LATIN EPIGRAPHIC LETTER REVERSED F]
-						case '\uFF26':  // F  [FULLWIDTH LATIN CAPITAL LETTER F]
-							output[outputPos++] = 'F';
-							break;
-						
-						case '\u0192': 
-						// Æ’  [LATIN SMALL LETTER F WITH HOOK]
-						case '\u1D6E': 
-						// áµ®  [LATIN SMALL LETTER F WITH MIDDLE TILDE]
-						case '\u1D82': 
-						// ᶂ  [LATIN SMALL LETTER F WITH PALATAL HOOK]
-						case '\u1E1F': 
-						// ḟ  [LATIN SMALL LETTER F WITH DOT ABOVE]
-						case '\u1E9B': 
-						// ẛ  [LATIN SMALL LETTER LONG S WITH DOT ABOVE]
-						case '\u24D5': 
-						// â“•  [CIRCLED LATIN SMALL LETTER F]
-						case '\uA77C': 
-						// �  [LATIN SMALL LETTER INSULAR F]
-						case '\uFF46':  // f  [FULLWIDTH LATIN SMALL LETTER F]
-							output[outputPos++] = 'f';
-							break;
-						
-						case '\u24A1':  // â’¡  [PARENTHESIZED LATIN SMALL LETTER F]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'f';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\uFB00':  // ff  [LATIN SMALL LIGATURE FF]
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'f';
-							break;
-						
-						case '\uFB03':  // ffi  [LATIN SMALL LIGATURE FFI]
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'i';
-							break;
-						
-						case '\uFB04':  // ffl  [LATIN SMALL LIGATURE FFL]
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'l';
-							break;
-						
-						case '\uFB01':  // �  [LATIN SMALL LIGATURE FI]
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'i';
-							break;
-						
-						case '\uFB02':  // fl  [LATIN SMALL LIGATURE FL]
-							output[outputPos++] = 'f';
-							output[outputPos++] = 'l';
-							break;
-						
-						case '\u011C': 
-						// Ĝ  [LATIN CAPITAL LETTER G WITH CIRCUMFLEX]
-						case '\u011E': 
-						// Äž  [LATIN CAPITAL LETTER G WITH BREVE]
-						case '\u0120': 
-						// Ä   [LATIN CAPITAL LETTER G WITH DOT ABOVE]
-						case '\u0122': 
-						// Ģ  [LATIN CAPITAL LETTER G WITH CEDILLA]
-						case '\u0193': 
-						// Æ“  [LATIN CAPITAL LETTER G WITH HOOK]
-						case '\u01E4': 
-						// Ǥ  [LATIN CAPITAL LETTER G WITH STROKE]
-						case '\u01E5': 
-						// ǥ  [LATIN SMALL LETTER G WITH STROKE]
-						case '\u01E6': 
-						// Ǧ  [LATIN CAPITAL LETTER G WITH CARON]
-						case '\u01E7': 
-						// ǧ  [LATIN SMALL LETTER G WITH CARON]
-						case '\u01F4': 
-						// Ç´  [LATIN CAPITAL LETTER G WITH ACUTE]
-						case '\u0262': 
-						// ɢ  [LATIN LETTER SMALL CAPITAL G]
-						case '\u029B': 
-						// Ê›  [LATIN LETTER SMALL CAPITAL G WITH HOOK]
-						case '\u1E20': 
-						// Ḡ  [LATIN CAPITAL LETTER G WITH MACRON]
-						case '\u24BC': 
-						// â’¼  [CIRCLED LATIN CAPITAL LETTER G]
-						case '\uA77D': 
-						// �  [LATIN CAPITAL LETTER INSULAR G]
-						case '\uA77E': 
-						// �  [LATIN CAPITAL LETTER TURNED INSULAR G]
-						case '\uFF27':  // G  [FULLWIDTH LATIN CAPITAL LETTER G]
-							output[outputPos++] = 'G';
-							break;
-						
-						case '\u011D': 
-						// �  [LATIN SMALL LETTER G WITH CIRCUMFLEX]
-						case '\u011F': 
-						// ÄŸ  [LATIN SMALL LETTER G WITH BREVE]
-						case '\u0121': 
-						// Ä¡  [LATIN SMALL LETTER G WITH DOT ABOVE]
-						case '\u0123': 
-						// ģ  [LATIN SMALL LETTER G WITH CEDILLA]
-						case '\u01F5': 
-						// ǵ  [LATIN SMALL LETTER G WITH ACUTE]
-						case '\u0260': 
-						// É   [LATIN SMALL LETTER G WITH HOOK]
-						case '\u0261': 
-						// É¡  [LATIN SMALL LETTER SCRIPT G]
-						case '\u1D77': 
-						// áµ·  [LATIN SMALL LETTER TURNED G]
-						case '\u1D79': 
-						// áµ¹  [LATIN SMALL LETTER INSULAR G]
-						case '\u1D83': 
-						// ᶃ  [LATIN SMALL LETTER G WITH PALATAL HOOK]
-						case '\u1E21': 
-						// ḡ  [LATIN SMALL LETTER G WITH MACRON]
-						case '\u24D6': 
-						// â“–  [CIRCLED LATIN SMALL LETTER G]
-						case '\uA77F': 
-						// �  [LATIN SMALL LETTER TURNED INSULAR G]
-						case '\uFF47':  // g  [FULLWIDTH LATIN SMALL LETTER G]
-							output[outputPos++] = 'g';
-							break;
-						
-						case '\u24A2':  // â’¢  [PARENTHESIZED LATIN SMALL LETTER G]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'g';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0124': 
-						// Ĥ  [LATIN CAPITAL LETTER H WITH CIRCUMFLEX]
-						case '\u0126': 
-						// Ħ  [LATIN CAPITAL LETTER H WITH STROKE]
-						case '\u021E': 
-						// Èž  [LATIN CAPITAL LETTER H WITH CARON]
-						case '\u029C': 
-						// ʜ  [LATIN LETTER SMALL CAPITAL H]
-						case '\u1E22': 
-						// Ḣ  [LATIN CAPITAL LETTER H WITH DOT ABOVE]
-						case '\u1E24': 
-						// Ḥ  [LATIN CAPITAL LETTER H WITH DOT BELOW]
-						case '\u1E26': 
-						// Ḧ  [LATIN CAPITAL LETTER H WITH DIAERESIS]
-						case '\u1E28': 
-						// Ḩ  [LATIN CAPITAL LETTER H WITH CEDILLA]
-						case '\u1E2A': 
-						// Ḫ  [LATIN CAPITAL LETTER H WITH BREVE BELOW]
-						case '\u24BD': 
-						// â’½  [CIRCLED LATIN CAPITAL LETTER H]
-						case '\u2C67': 
-						// Ⱨ  [LATIN CAPITAL LETTER H WITH DESCENDER]
-						case '\u2C75': 
-						// â±µ  [LATIN CAPITAL LETTER HALF H]
-						case '\uFF28':  // H  [FULLWIDTH LATIN CAPITAL LETTER H]
-							output[outputPos++] = 'H';
-							break;
-						
-						case '\u0125': 
-						// ĥ  [LATIN SMALL LETTER H WITH CIRCUMFLEX]
-						case '\u0127': 
-						// ħ  [LATIN SMALL LETTER H WITH STROKE]
-						case '\u021F': 
-						// ÈŸ  [LATIN SMALL LETTER H WITH CARON]
-						case '\u0265': 
-						// ɥ  [LATIN SMALL LETTER TURNED H]
-						case '\u0266': 
-						// ɦ  [LATIN SMALL LETTER H WITH HOOK]
-						case '\u02AE': 
-						// Ê®  [LATIN SMALL LETTER TURNED H WITH FISHHOOK]
-						case '\u02AF': 
-						// ʯ  [LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL]
-						case '\u1E23': 
-						// ḣ  [LATIN SMALL LETTER H WITH DOT ABOVE]
-						case '\u1E25': 
-						// ḥ  [LATIN SMALL LETTER H WITH DOT BELOW]
-						case '\u1E27': 
-						// ḧ  [LATIN SMALL LETTER H WITH DIAERESIS]
-						case '\u1E29': 
-						// ḩ  [LATIN SMALL LETTER H WITH CEDILLA]
-						case '\u1E2B': 
-						// ḫ  [LATIN SMALL LETTER H WITH BREVE BELOW]
-						case '\u1E96': 
-						// ẖ  [LATIN SMALL LETTER H WITH LINE BELOW]
-						case '\u24D7': 
-						// â“—  [CIRCLED LATIN SMALL LETTER H]
-						case '\u2C68': 
-						// ⱨ  [LATIN SMALL LETTER H WITH DESCENDER]
-						case '\u2C76': 
-						// ⱶ  [LATIN SMALL LETTER HALF H]
-						case '\uFF48':  // h  [FULLWIDTH LATIN SMALL LETTER H]
-							output[outputPos++] = 'h';
-							break;
-						
-						case '\u01F6':  // Ƕ  http://en.wikipedia.org/wiki/Hwair  [LATIN CAPITAL LETTER HWAIR]
-							output[outputPos++] = 'H';
-							output[outputPos++] = 'V';
-							break;
-						
-						case '\u24A3':  // â’£  [PARENTHESIZED LATIN SMALL LETTER H]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'h';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0195':  // Æ•  [LATIN SMALL LETTER HV]
-							output[outputPos++] = 'h';
-							output[outputPos++] = 'v';
-							break;
-						
-						case '\u00CC': 
-						// Ì  [LATIN CAPITAL LETTER I WITH GRAVE]
-						case '\u00CD': 
-						// �  [LATIN CAPITAL LETTER I WITH ACUTE]
-						case '\u00CE': 
-						// ÃŽ  [LATIN CAPITAL LETTER I WITH CIRCUMFLEX]
-						case '\u00CF': 
-						// �  [LATIN CAPITAL LETTER I WITH DIAERESIS]
-						case '\u0128': 
-						// Ĩ  [LATIN CAPITAL LETTER I WITH TILDE]
-						case '\u012A': 
-						// Ī  [LATIN CAPITAL LETTER I WITH MACRON]
-						case '\u012C': 
-						// Ĭ  [LATIN CAPITAL LETTER I WITH BREVE]
-						case '\u012E': 
-						// Ä®  [LATIN CAPITAL LETTER I WITH OGONEK]
-						case '\u0130': 
-						// Ä°  [LATIN CAPITAL LETTER I WITH DOT ABOVE]
-						case '\u0196': 
-						// Æ–  [LATIN CAPITAL LETTER IOTA]
-						case '\u0197': 
-						// Æ—  [LATIN CAPITAL LETTER I WITH STROKE]
-						case '\u01CF': 
-						// �  [LATIN CAPITAL LETTER I WITH CARON]
-						case '\u0208': 
-						// Ȉ  [LATIN CAPITAL LETTER I WITH DOUBLE GRAVE]
-						case '\u020A': 
-						// ÈŠ  [LATIN CAPITAL LETTER I WITH INVERTED BREVE]
-						case '\u026A': 
-						// ɪ  [LATIN LETTER SMALL CAPITAL I]
-						case '\u1D7B': 
-						// áµ»  [LATIN SMALL CAPITAL LETTER I WITH STROKE]
-						case '\u1E2C': 
-						// Ḭ  [LATIN CAPITAL LETTER I WITH TILDE BELOW]
-						case '\u1E2E': 
-						// Ḯ  [LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE]
-						case '\u1EC8': 
-						// Ỉ  [LATIN CAPITAL LETTER I WITH HOOK ABOVE]
-						case '\u1ECA': 
-						// Ị  [LATIN CAPITAL LETTER I WITH DOT BELOW]
-						case '\u24BE': 
-						// â’¾  [CIRCLED LATIN CAPITAL LETTER I]
-						case '\uA7FE': 
-						// ꟾ  [LATIN EPIGRAPHIC LETTER I LONGA]
-						case '\uFF29':  // I  [FULLWIDTH LATIN CAPITAL LETTER I]
-							output[outputPos++] = 'I';
-							break;
-						
-						case '\u00EC': 
-						// ì  [LATIN SMALL LETTER I WITH GRAVE]
-						case '\u00ED': 
-						// í  [LATIN SMALL LETTER I WITH ACUTE]
-						case '\u00EE': 
-						// î  [LATIN SMALL LETTER I WITH CIRCUMFLEX]
-						case '\u00EF': 
-						// ï  [LATIN SMALL LETTER I WITH DIAERESIS]
-						case '\u0129': 
-						// Ä©  [LATIN SMALL LETTER I WITH TILDE]
-						case '\u012B': 
-						// Ä«  [LATIN SMALL LETTER I WITH MACRON]
-						case '\u012D': 
-						// Ä­  [LATIN SMALL LETTER I WITH BREVE]
-						case '\u012F': 
-						// į  [LATIN SMALL LETTER I WITH OGONEK]
-						case '\u0131': 
-						// ı  [LATIN SMALL LETTER DOTLESS I]
-						case '\u01D0': 
-						// �  [LATIN SMALL LETTER I WITH CARON]
-						case '\u0209': 
-						// ȉ  [LATIN SMALL LETTER I WITH DOUBLE GRAVE]
-						case '\u020B': 
-						// È‹  [LATIN SMALL LETTER I WITH INVERTED BREVE]
-						case '\u0268': 
-						// ɨ  [LATIN SMALL LETTER I WITH STROKE]
-						case '\u1D09': 
-						// á´‰  [LATIN SMALL LETTER TURNED I]
-						case '\u1D62': 
-						// áµ¢  [LATIN SUBSCRIPT SMALL LETTER I]
-						case '\u1D7C': 
-						// áµ¼  [LATIN SMALL LETTER IOTA WITH STROKE]
-						case '\u1D96': 
-						// ᶖ  [LATIN SMALL LETTER I WITH RETROFLEX HOOK]
-						case '\u1E2D': 
-						// ḭ  [LATIN SMALL LETTER I WITH TILDE BELOW]
-						case '\u1E2F': 
-						// ḯ  [LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE]
-						case '\u1EC9': 
-						// ỉ  [LATIN SMALL LETTER I WITH HOOK ABOVE]
-						case '\u1ECB': 
-						// ị  [LATIN SMALL LETTER I WITH DOT BELOW]
-						case '\u2071': 
-						// �  [SUPERSCRIPT LATIN SMALL LETTER I]
-						case '\u24D8': 
-						// ⓘ  [CIRCLED LATIN SMALL LETTER I]
-						case '\uFF49':  // i  [FULLWIDTH LATIN SMALL LETTER I]
-							output[outputPos++] = 'i';
-							break;
-						
-						case '\u0132':  // IJ  [LATIN CAPITAL LIGATURE IJ]
-							output[outputPos++] = 'I';
-							output[outputPos++] = 'J';
-							break;
-						
-						case '\u24A4':  // â’¤  [PARENTHESIZED LATIN SMALL LETTER I]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'i';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0133':  // ij  [LATIN SMALL LIGATURE IJ]
-							output[outputPos++] = 'i';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u0134': 
-						// Ä´  [LATIN CAPITAL LETTER J WITH CIRCUMFLEX]
-						case '\u0248': 
-						// Ɉ  [LATIN CAPITAL LETTER J WITH STROKE]
-						case '\u1D0A': 
-						// á´Š  [LATIN LETTER SMALL CAPITAL J]
-						case '\u24BF': 
-						// â’¿  [CIRCLED LATIN CAPITAL LETTER J]
-						case '\uFF2A':  // J  [FULLWIDTH LATIN CAPITAL LETTER J]
-							output[outputPos++] = 'J';
-							break;
-						
-						case '\u0135': 
-						// ĵ  [LATIN SMALL LETTER J WITH CIRCUMFLEX]
-						case '\u01F0': 
-						// Ç°  [LATIN SMALL LETTER J WITH CARON]
-						case '\u0237': 
-						// È·  [LATIN SMALL LETTER DOTLESS J]
-						case '\u0249': 
-						// ɉ  [LATIN SMALL LETTER J WITH STROKE]
-						case '\u025F': 
-						// ÉŸ  [LATIN SMALL LETTER DOTLESS J WITH STROKE]
-						case '\u0284': 
-						// Ê„  [LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK]
-						case '\u029D': 
-						// �  [LATIN SMALL LETTER J WITH CROSSED-TAIL]
-						case '\u24D9': 
-						// â“™  [CIRCLED LATIN SMALL LETTER J]
-						case '\u2C7C': 
-						// â±¼  [LATIN SUBSCRIPT SMALL LETTER J]
-						case '\uFF4A':  // j  [FULLWIDTH LATIN SMALL LETTER J]
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u24A5':  // â’¥  [PARENTHESIZED LATIN SMALL LETTER J]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'j';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0136': 
-						// Ķ  [LATIN CAPITAL LETTER K WITH CEDILLA]
-						case '\u0198': 
-						// Ƙ  [LATIN CAPITAL LETTER K WITH HOOK]
-						case '\u01E8': 
-						// Ǩ  [LATIN CAPITAL LETTER K WITH CARON]
-						case '\u1D0B': 
-						// á´‹  [LATIN LETTER SMALL CAPITAL K]
-						case '\u1E30': 
-						// Ḱ  [LATIN CAPITAL LETTER K WITH ACUTE]
-						case '\u1E32': 
-						// Ḳ  [LATIN CAPITAL LETTER K WITH DOT BELOW]
-						case '\u1E34': 
-						// Ḵ  [LATIN CAPITAL LETTER K WITH LINE BELOW]
-						case '\u24C0': 
-						// â“€  [CIRCLED LATIN CAPITAL LETTER K]
-						case '\u2C69': 
-						// Ⱪ  [LATIN CAPITAL LETTER K WITH DESCENDER]
-						case '\uA740': 
-						// �  [LATIN CAPITAL LETTER K WITH STROKE]
-						case '\uA742': 
-						// �  [LATIN CAPITAL LETTER K WITH DIAGONAL STROKE]
-						case '\uA744': 
-						// �  [LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE]
-						case '\uFF2B':  // K  [FULLWIDTH LATIN CAPITAL LETTER K]
-							output[outputPos++] = 'K';
-							break;
-						
-						case '\u0137': 
-						// Ä·  [LATIN SMALL LETTER K WITH CEDILLA]
-						case '\u0199': 
-						// Æ™  [LATIN SMALL LETTER K WITH HOOK]
-						case '\u01E9': 
-						// Ç©  [LATIN SMALL LETTER K WITH CARON]
-						case '\u029E': 
-						// Êž  [LATIN SMALL LETTER TURNED K]
-						case '\u1D84': 
-						// ᶄ  [LATIN SMALL LETTER K WITH PALATAL HOOK]
-						case '\u1E31': 
-						// ḱ  [LATIN SMALL LETTER K WITH ACUTE]
-						case '\u1E33': 
-						// ḳ  [LATIN SMALL LETTER K WITH DOT BELOW]
-						case '\u1E35': 
-						// ḵ  [LATIN SMALL LETTER K WITH LINE BELOW]
-						case '\u24DA': 
-						// â“š  [CIRCLED LATIN SMALL LETTER K]
-						case '\u2C6A': 
-						// ⱪ  [LATIN SMALL LETTER K WITH DESCENDER]
-						case '\uA741': 
-						// �  [LATIN SMALL LETTER K WITH STROKE]
-						case '\uA743': 
-						// �  [LATIN SMALL LETTER K WITH DIAGONAL STROKE]
-						case '\uA745': 
-						// �  [LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE]
-						case '\uFF4B':  // k  [FULLWIDTH LATIN SMALL LETTER K]
-							output[outputPos++] = 'k';
-							break;
-						
-						case '\u24A6':  // â’¦  [PARENTHESIZED LATIN SMALL LETTER K]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'k';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0139': 
-						// Ĺ  [LATIN CAPITAL LETTER L WITH ACUTE]
-						case '\u013B': 
-						// Ä»  [LATIN CAPITAL LETTER L WITH CEDILLA]
-						case '\u013D': 
-						// Ľ  [LATIN CAPITAL LETTER L WITH CARON]
-						case '\u013F': 
-						// Ä¿  [LATIN CAPITAL LETTER L WITH MIDDLE DOT]
-						case '\u0141': 
-						// �  [LATIN CAPITAL LETTER L WITH STROKE]
-						case '\u023D': 
-						// Ƚ  [LATIN CAPITAL LETTER L WITH BAR]
-						case '\u029F': 
-						// ÊŸ  [LATIN LETTER SMALL CAPITAL L]
-						case '\u1D0C': 
-						// ᴌ  [LATIN LETTER SMALL CAPITAL L WITH STROKE]
-						case '\u1E36': 
-						// Ḷ  [LATIN CAPITAL LETTER L WITH DOT BELOW]
-						case '\u1E38': 
-						// Ḹ  [LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON]
-						case '\u1E3A': 
-						// Ḻ  [LATIN CAPITAL LETTER L WITH LINE BELOW]
-						case '\u1E3C': 
-						// Ḽ  [LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW]
-						case '\u24C1': 
-						// �  [CIRCLED LATIN CAPITAL LETTER L]
-						case '\u2C60': 
-						// â±   [LATIN CAPITAL LETTER L WITH DOUBLE BAR]
-						case '\u2C62': 
-						// â±¢  [LATIN CAPITAL LETTER L WITH MIDDLE TILDE]
-						case '\uA746': 
-						// �  [LATIN CAPITAL LETTER BROKEN L]
-						case '\uA748': 
-						// �  [LATIN CAPITAL LETTER L WITH HIGH STROKE]
-						case '\uA780': 
-						// Ꞁ  [LATIN CAPITAL LETTER TURNED L]
-						case '\uFF2C':  // L  [FULLWIDTH LATIN CAPITAL LETTER L]
-							output[outputPos++] = 'L';
-							break;
-						
-						case '\u013A': 
-						// ĺ  [LATIN SMALL LETTER L WITH ACUTE]
-						case '\u013C': 
-						// ļ  [LATIN SMALL LETTER L WITH CEDILLA]
-						case '\u013E': 
-						// ľ  [LATIN SMALL LETTER L WITH CARON]
-						case '\u0140': 
-						// ŀ  [LATIN SMALL LETTER L WITH MIDDLE DOT]
-						case '\u0142': 
-						// Å‚  [LATIN SMALL LETTER L WITH STROKE]
-						case '\u019A': 
-						// Æš  [LATIN SMALL LETTER L WITH BAR]
-						case '\u0234': 
-						// È´  [LATIN SMALL LETTER L WITH CURL]
-						case '\u026B': 
-						// É«  [LATIN SMALL LETTER L WITH MIDDLE TILDE]
-						case '\u026C': 
-						// ɬ  [LATIN SMALL LETTER L WITH BELT]
-						case '\u026D': 
-						// É­  [LATIN SMALL LETTER L WITH RETROFLEX HOOK]
-						case '\u1D85': 
-						// ᶅ  [LATIN SMALL LETTER L WITH PALATAL HOOK]
-						case '\u1E37': 
-						// ḷ  [LATIN SMALL LETTER L WITH DOT BELOW]
-						case '\u1E39': 
-						// ḹ  [LATIN SMALL LETTER L WITH DOT BELOW AND MACRON]
-						case '\u1E3B': 
-						// ḻ  [LATIN SMALL LETTER L WITH LINE BELOW]
-						case '\u1E3D': 
-						// ḽ  [LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW]
-						case '\u24DB': 
-						// â“›  [CIRCLED LATIN SMALL LETTER L]
-						case '\u2C61': 
-						// ⱡ  [LATIN SMALL LETTER L WITH DOUBLE BAR]
-						case '\uA747': 
-						// �  [LATIN SMALL LETTER BROKEN L]
-						case '\uA749': 
-						// �  [LATIN SMALL LETTER L WITH HIGH STROKE]
-						case '\uA781': 
-						// �  [LATIN SMALL LETTER TURNED L]
-						case '\uFF4C':  // l  [FULLWIDTH LATIN SMALL LETTER L]
-							output[outputPos++] = 'l';
-							break;
-						
-						case '\u01C7':  // LJ  [LATIN CAPITAL LETTER LJ]
-							output[outputPos++] = 'L';
-							output[outputPos++] = 'J';
-							break;
-						
-						case '\u1EFA':  // Ỻ  [LATIN CAPITAL LETTER MIDDLE-WELSH LL]
-							output[outputPos++] = 'L';
-							output[outputPos++] = 'L';
-							break;
-						
-						case '\u01C8':  // Lj  [LATIN CAPITAL LETTER L WITH SMALL LETTER J]
-							output[outputPos++] = 'L';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u24A7':  // â’§  [PARENTHESIZED LATIN SMALL LETTER L]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'l';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u01C9':  // lj  [LATIN SMALL LETTER LJ]
-							output[outputPos++] = 'l';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u1EFB':  // á»»  [LATIN SMALL LETTER MIDDLE-WELSH LL]
-							output[outputPos++] = 'l';
-							output[outputPos++] = 'l';
-							break;
-						
-						case '\u02AA':  // ʪ  [LATIN SMALL LETTER LS DIGRAPH]
-							output[outputPos++] = 'l';
-							output[outputPos++] = 's';
-							break;
-						
-						case '\u02AB':  // Ê«  [LATIN SMALL LETTER LZ DIGRAPH]
-							output[outputPos++] = 'l';
-							output[outputPos++] = 'z';
-							break;
-						
-						case '\u019C': 
-						// Ɯ  [LATIN CAPITAL LETTER TURNED M]
-						case '\u1D0D': 
-						// á´�  [LATIN LETTER SMALL CAPITAL M]
-						case '\u1E3E': 
-						// Ḿ  [LATIN CAPITAL LETTER M WITH ACUTE]
-						case '\u1E40': 
-						// á¹€  [LATIN CAPITAL LETTER M WITH DOT ABOVE]
-						case '\u1E42': 
-						// Ṃ  [LATIN CAPITAL LETTER M WITH DOT BELOW]
-						case '\u24C2': 
-						// â“‚  [CIRCLED LATIN CAPITAL LETTER M]
-						case '\u2C6E': 
-						// â±®  [LATIN CAPITAL LETTER M WITH HOOK]
-						case '\uA7FD': 
-						// ꟽ  [LATIN EPIGRAPHIC LETTER INVERTED M]
-						case '\uA7FF': 
-						// ꟿ  [LATIN EPIGRAPHIC LETTER ARCHAIC M]
-						case '\uFF2D':  // ï¼­  [FULLWIDTH LATIN CAPITAL LETTER M]
-							output[outputPos++] = 'M';
-							break;
-						
-						case '\u026F': 
-						// ɯ  [LATIN SMALL LETTER TURNED M]
-						case '\u0270': 
-						// É°  [LATIN SMALL LETTER TURNED M WITH LONG LEG]
-						case '\u0271': 
-						// ɱ  [LATIN SMALL LETTER M WITH HOOK]
-						case '\u1D6F': 
-						// ᵯ  [LATIN SMALL LETTER M WITH MIDDLE TILDE]
-						case '\u1D86': 
-						// ᶆ  [LATIN SMALL LETTER M WITH PALATAL HOOK]
-						case '\u1E3F': 
-						// ḿ  [LATIN SMALL LETTER M WITH ACUTE]
-						case '\u1E41': 
-						// �  [LATIN SMALL LETTER M WITH DOT ABOVE]
-						case '\u1E43': 
-						// ṃ  [LATIN SMALL LETTER M WITH DOT BELOW]
-						case '\u24DC': 
-						// ⓜ  [CIRCLED LATIN SMALL LETTER M]
-						case '\uFF4D':  // �  [FULLWIDTH LATIN SMALL LETTER M]
-							output[outputPos++] = 'm';
-							break;
-						
-						case '\u24A8':  // â’¨  [PARENTHESIZED LATIN SMALL LETTER M]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'm';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u00D1': 
-						// Ñ  [LATIN CAPITAL LETTER N WITH TILDE]
-						case '\u0143': 
-						// Ã…Æ’  [LATIN CAPITAL LETTER N WITH ACUTE]
-						case '\u0145': 
-						// Å…  [LATIN CAPITAL LETTER N WITH CEDILLA]
-						case '\u0147': 
-						// Ň  [LATIN CAPITAL LETTER N WITH CARON]
-						case '\u014A': 
-						// Ã…Å   http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN CAPITAL LETTER ENG]
-						case '\u019D': 
-						// �  [LATIN CAPITAL LETTER N WITH LEFT HOOK]
-						case '\u01F8': 
-						// Ǹ  [LATIN CAPITAL LETTER N WITH GRAVE]
-						case '\u0220': 
-						// È   [LATIN CAPITAL LETTER N WITH LONG RIGHT LEG]
-						case '\u0274': 
-						// É´  [LATIN LETTER SMALL CAPITAL N]
-						case '\u1D0E': 
-						// á´Ž  [LATIN LETTER SMALL CAPITAL REVERSED N]
-						case '\u1E44': 
-						// Ṅ  [LATIN CAPITAL LETTER N WITH DOT ABOVE]
-						case '\u1E46': 
-						// Ṇ  [LATIN CAPITAL LETTER N WITH DOT BELOW]
-						case '\u1E48': 
-						// Ṉ  [LATIN CAPITAL LETTER N WITH LINE BELOW]
-						case '\u1E4A': 
-						// Ṋ  [LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW]
-						case '\u24C3': 
-						// Ⓝ  [CIRCLED LATIN CAPITAL LETTER N]
-						case '\uFF2E':  // ï¼®  [FULLWIDTH LATIN CAPITAL LETTER N]
-							output[outputPos++] = 'N';
-							break;
-						
-						case '\u00F1': 
-						// ñ  [LATIN SMALL LETTER N WITH TILDE]
-						case '\u0144': 
-						// Å„  [LATIN SMALL LETTER N WITH ACUTE]
-						case '\u0146': 
-						// ņ  [LATIN SMALL LETTER N WITH CEDILLA]
-						case '\u0148': 
-						// ň  [LATIN SMALL LETTER N WITH CARON]
-						case '\u0149': 
-						// ʼn  [LATIN SMALL LETTER N PRECEDED BY APOSTROPHE]
-						case '\u014B': 
-						// Å‹  http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN SMALL LETTER ENG]
-						case '\u019E': 
-						// Æž  [LATIN SMALL LETTER N WITH LONG RIGHT LEG]
-						case '\u01F9': 
-						// ǹ  [LATIN SMALL LETTER N WITH GRAVE]
-						case '\u0235': 
-						// ȵ  [LATIN SMALL LETTER N WITH CURL]
-						case '\u0272': 
-						// ɲ  [LATIN SMALL LETTER N WITH LEFT HOOK]
-						case '\u0273': 
-						// ɳ  [LATIN SMALL LETTER N WITH RETROFLEX HOOK]
-						case '\u1D70': 
-						// áµ°  [LATIN SMALL LETTER N WITH MIDDLE TILDE]
-						case '\u1D87': 
-						// ᶇ  [LATIN SMALL LETTER N WITH PALATAL HOOK]
-						case '\u1E45': 
-						// á¹…  [LATIN SMALL LETTER N WITH DOT ABOVE]
-						case '\u1E47': 
-						// ṇ  [LATIN SMALL LETTER N WITH DOT BELOW]
-						case '\u1E49': 
-						// ṉ  [LATIN SMALL LETTER N WITH LINE BELOW]
-						case '\u1E4B': 
-						// ṋ  [LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW]
-						case '\u207F': 
-						// �  [SUPERSCRIPT LATIN SMALL LETTER N]
-						case '\u24DD': 
-						// �  [CIRCLED LATIN SMALL LETTER N]
-						case '\uFF4E':  // n  [FULLWIDTH LATIN SMALL LETTER N]
-							output[outputPos++] = 'n';
-							break;
-						
-						case '\u01CA':  // ÇŠ  [LATIN CAPITAL LETTER NJ]
-							output[outputPos++] = 'N';
-							output[outputPos++] = 'J';
-							break;
-						
-						case '\u01CB':  // Ç‹  [LATIN CAPITAL LETTER N WITH SMALL LETTER J]
-							output[outputPos++] = 'N';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u24A9':  // â’©  [PARENTHESIZED LATIN SMALL LETTER N]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'n';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u01CC':  // nj  [LATIN SMALL LETTER NJ]
-							output[outputPos++] = 'n';
-							output[outputPos++] = 'j';
-							break;
-						
-						case '\u00D2': 
-						// Ã’  [LATIN CAPITAL LETTER O WITH GRAVE]
-						case '\u00D3': 
-						// Ó  [LATIN CAPITAL LETTER O WITH ACUTE]
-						case '\u00D4': 
-						// �?  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX]
-						case '\u00D5': 
-						// Õ  [LATIN CAPITAL LETTER O WITH TILDE]
-						case '\u00D6': 
-						// Ö  [LATIN CAPITAL LETTER O WITH DIAERESIS]
-						case '\u00D8': 
-						// Ø  [LATIN CAPITAL LETTER O WITH STROKE]
-						case '\u014C': 
-						// Ã…Å’  [LATIN CAPITAL LETTER O WITH MACRON]
-						case '\u014E': 
-						// ÅŽ  [LATIN CAPITAL LETTER O WITH BREVE]
-						case '\u0150': 
-						// �  [LATIN CAPITAL LETTER O WITH DOUBLE ACUTE]
-						case '\u0186': 
-						// Ɔ  [LATIN CAPITAL LETTER OPEN O]
-						case '\u019F': 
-						// ÆŸ  [LATIN CAPITAL LETTER O WITH MIDDLE TILDE]
-						case '\u01A0': 
-						// Æ   [LATIN CAPITAL LETTER O WITH HORN]
-						case '\u01D1': 
-						// Ç‘  [LATIN CAPITAL LETTER O WITH CARON]
-						case '\u01EA': 
-						// Ǫ  [LATIN CAPITAL LETTER O WITH OGONEK]
-						case '\u01EC': 
-						// Ǭ  [LATIN CAPITAL LETTER O WITH OGONEK AND MACRON]
-						case '\u01FE': 
-						// Ǿ  [LATIN CAPITAL LETTER O WITH STROKE AND ACUTE]
-						case '\u020C': 
-						// Ȍ  [LATIN CAPITAL LETTER O WITH DOUBLE GRAVE]
-						case '\u020E': 
-						// ÈŽ  [LATIN CAPITAL LETTER O WITH INVERTED BREVE]
-						case '\u022A': 
-						// Ȫ  [LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON]
-						case '\u022C': 
-						// Ȭ  [LATIN CAPITAL LETTER O WITH TILDE AND MACRON]
-						case '\u022E': 
-						// È®  [LATIN CAPITAL LETTER O WITH DOT ABOVE]
-						case '\u0230': 
-						// È°  [LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON]
-						case '\u1D0F': 
-						// á´�  [LATIN LETTER SMALL CAPITAL O]
-						case '\u1D10': 
-						// á´�  [LATIN LETTER SMALL CAPITAL OPEN O]
-						case '\u1E4C': 
-						// Ṍ  [LATIN CAPITAL LETTER O WITH TILDE AND ACUTE]
-						case '\u1E4E': 
-						// Ṏ  [LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS]
-						case '\u1E50': 
-						// �  [LATIN CAPITAL LETTER O WITH MACRON AND GRAVE]
-						case '\u1E52': 
-						// á¹’  [LATIN CAPITAL LETTER O WITH MACRON AND ACUTE]
-						case '\u1ECC': 
-						// Ọ  [LATIN CAPITAL LETTER O WITH DOT BELOW]
-						case '\u1ECE': 
-						// Ỏ  [LATIN CAPITAL LETTER O WITH HOOK ABOVE]
-						case '\u1ED0': 
-						// �  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE]
-						case '\u1ED2': 
-						// á»’  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE]
-						case '\u1ED4': 
-						// �?  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1ED6': 
-						// á»–  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE]
-						case '\u1ED8': 
-						// Ộ  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u1EDA': 
-						// Ớ  [LATIN CAPITAL LETTER O WITH HORN AND ACUTE]
-						case '\u1EDC': 
-						// Ờ  [LATIN CAPITAL LETTER O WITH HORN AND GRAVE]
-						case '\u1EDE': 
-						// Ở  [LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE]
-						case '\u1EE0': 
-						// á»   [LATIN CAPITAL LETTER O WITH HORN AND TILDE]
-						case '\u1EE2': 
-						// Ợ  [LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW]
-						case '\u24C4': 
-						// â“„  [CIRCLED LATIN CAPITAL LETTER O]
-						case '\uA74A': 
-						// �  [LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY]
-						case '\uA74C': 
-						// �  [LATIN CAPITAL LETTER O WITH LOOP]
-						case '\uFF2F':  // O  [FULLWIDTH LATIN CAPITAL LETTER O]
-							output[outputPos++] = 'O';
-							break;
-						
-						case '\u00F2': 
-						// ò  [LATIN SMALL LETTER O WITH GRAVE]
-						case '\u00F3': 
-						// ó  [LATIN SMALL LETTER O WITH ACUTE]
-						case '\u00F4': 
-						// ô  [LATIN SMALL LETTER O WITH CIRCUMFLEX]
-						case '\u00F5': 
-						// õ  [LATIN SMALL LETTER O WITH TILDE]
-						case '\u00F6': 
-						// ö  [LATIN SMALL LETTER O WITH DIAERESIS]
-						case '\u00F8': 
-						// ø  [LATIN SMALL LETTER O WITH STROKE]
-						case '\u014D': 
-						// �  [LATIN SMALL LETTER O WITH MACRON]
-						case '\u014F': 
-						// �  [LATIN SMALL LETTER O WITH BREVE]
-						case '\u0151': 
-						// Å‘  [LATIN SMALL LETTER O WITH DOUBLE ACUTE]
-						case '\u01A1': 
-						// Æ¡  [LATIN SMALL LETTER O WITH HORN]
-						case '\u01D2': 
-						// Ç’  [LATIN SMALL LETTER O WITH CARON]
-						case '\u01EB': 
-						// Ç«  [LATIN SMALL LETTER O WITH OGONEK]
-						case '\u01ED': 
-						// Ç­  [LATIN SMALL LETTER O WITH OGONEK AND MACRON]
-						case '\u01FF': 
-						// Ç¿  [LATIN SMALL LETTER O WITH STROKE AND ACUTE]
-						case '\u020D': 
-						// �  [LATIN SMALL LETTER O WITH DOUBLE GRAVE]
-						case '\u020F': 
-						// �  [LATIN SMALL LETTER O WITH INVERTED BREVE]
-						case '\u022B': 
-						// È«  [LATIN SMALL LETTER O WITH DIAERESIS AND MACRON]
-						case '\u022D': 
-						// È­  [LATIN SMALL LETTER O WITH TILDE AND MACRON]
-						case '\u022F': 
-						// ȯ  [LATIN SMALL LETTER O WITH DOT ABOVE]
-						case '\u0231': 
-						// ȱ  [LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON]
-						case '\u0254': 
-						// �?  [LATIN SMALL LETTER OPEN O]
-						case '\u0275': 
-						// ɵ  [LATIN SMALL LETTER BARRED O]
-						case '\u1D16': 
-						// á´–  [LATIN SMALL LETTER TOP HALF O]
-						case '\u1D17': 
-						// á´—  [LATIN SMALL LETTER BOTTOM HALF O]
-						case '\u1D97': 
-						// ᶗ  [LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK]
-						case '\u1E4D': 
-						// �  [LATIN SMALL LETTER O WITH TILDE AND ACUTE]
-						case '\u1E4F': 
-						// �  [LATIN SMALL LETTER O WITH TILDE AND DIAERESIS]
-						case '\u1E51': 
-						// ṑ  [LATIN SMALL LETTER O WITH MACRON AND GRAVE]
-						case '\u1E53': 
-						// ṓ  [LATIN SMALL LETTER O WITH MACRON AND ACUTE]
-						case '\u1ECD': 
-						// �  [LATIN SMALL LETTER O WITH DOT BELOW]
-						case '\u1ECF': 
-						// �  [LATIN SMALL LETTER O WITH HOOK ABOVE]
-						case '\u1ED1': 
-						// ố  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE]
-						case '\u1ED3': 
-						// ồ  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE]
-						case '\u1ED5': 
-						// ổ  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
-						case '\u1ED7': 
-						// á»—  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE]
-						case '\u1ED9': 
-						// á»™  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
-						case '\u1EDB': 
-						// á»›  [LATIN SMALL LETTER O WITH HORN AND ACUTE]
-						case '\u1EDD': 
-						// �  [LATIN SMALL LETTER O WITH HORN AND GRAVE]
-						case '\u1EDF': 
-						// ở  [LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE]
-						case '\u1EE1': 
-						// ỡ  [LATIN SMALL LETTER O WITH HORN AND TILDE]
-						case '\u1EE3': 
-						// ợ  [LATIN SMALL LETTER O WITH HORN AND DOT BELOW]
-						case '\u2092': 
-						// â‚’  [LATIN SUBSCRIPT SMALL LETTER O]
-						case '\u24DE': 
-						// â“ž  [CIRCLED LATIN SMALL LETTER O]
-						case '\u2C7A': 
-						// ⱺ  [LATIN SMALL LETTER O WITH LOW RING INSIDE]
-						case '\uA74B': 
-						// �  [LATIN SMALL LETTER O WITH LONG STROKE OVERLAY]
-						case '\uA74D': 
-						// �  [LATIN SMALL LETTER O WITH LOOP]
-						case '\uFF4F':  // �  [FULLWIDTH LATIN SMALL LETTER O]
-							output[outputPos++] = 'o';
-							break;
-						
-						case '\u0152': 
-						// Å’  [LATIN CAPITAL LIGATURE OE]
-						case '\u0276':  // ɶ  [LATIN LETTER SMALL CAPITAL OE]
-							output[outputPos++] = 'O';
-							output[outputPos++] = 'E';
-							break;
-						
-						case '\uA74E':  // �  [LATIN CAPITAL LETTER OO]
-							output[outputPos++] = 'O';
-							output[outputPos++] = 'O';
-							break;
-						
-						case '\u0222': 
-						// Ȣ  http://en.wikipedia.org/wiki/OU  [LATIN CAPITAL LETTER OU]
-						case '\u1D15':  // á´•  [LATIN LETTER SMALL CAPITAL OU]
-							output[outputPos++] = 'O';
-							output[outputPos++] = 'U';
-							break;
-						
-						case '\u24AA':  // â’ª  [PARENTHESIZED LATIN SMALL LETTER O]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'o';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0153': 
-						// Å“  [LATIN SMALL LIGATURE OE]
-						case '\u1D14':  // á´�?  [LATIN SMALL LETTER TURNED OE]
-							output[outputPos++] = 'o';
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\uA74F':  // �  [LATIN SMALL LETTER OO]
-							output[outputPos++] = 'o';
-							output[outputPos++] = 'o';
-							break;
-						
-						case '\u0223':  // ȣ  http://en.wikipedia.org/wiki/OU  [LATIN SMALL LETTER OU]
-							output[outputPos++] = 'o';
-							output[outputPos++] = 'u';
-							break;
-						
-						case '\u01A4': 
-						// Ƥ  [LATIN CAPITAL LETTER P WITH HOOK]
-						case '\u1D18': 
-						// á´˜  [LATIN LETTER SMALL CAPITAL P]
-						case '\u1E54': 
-						// �?  [LATIN CAPITAL LETTER P WITH ACUTE]
-						case '\u1E56': 
-						// á¹–  [LATIN CAPITAL LETTER P WITH DOT ABOVE]
-						case '\u24C5': 
-						// â“…  [CIRCLED LATIN CAPITAL LETTER P]
-						case '\u2C63': 
-						// â±£  [LATIN CAPITAL LETTER P WITH STROKE]
-						case '\uA750': 
-						// �  [LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER]
-						case '\uA752': 
-						// �  [LATIN CAPITAL LETTER P WITH FLOURISH]
-						case '\uA754': 
-						// �?  [LATIN CAPITAL LETTER P WITH SQUIRREL TAIL]
-						case '\uFF30':  // ï¼°  [FULLWIDTH LATIN CAPITAL LETTER P]
-							output[outputPos++] = 'P';
-							break;
-						
-						case '\u01A5': 
-						// ƥ  [LATIN SMALL LETTER P WITH HOOK]
-						case '\u1D71': 
-						// áµ±  [LATIN SMALL LETTER P WITH MIDDLE TILDE]
-						case '\u1D7D': 
-						// áµ½  [LATIN SMALL LETTER P WITH STROKE]
-						case '\u1D88': 
-						// ᶈ  [LATIN SMALL LETTER P WITH PALATAL HOOK]
-						case '\u1E55': 
-						// ṕ  [LATIN SMALL LETTER P WITH ACUTE]
-						case '\u1E57': 
-						// á¹—  [LATIN SMALL LETTER P WITH DOT ABOVE]
-						case '\u24DF': 
-						// â“Ÿ  [CIRCLED LATIN SMALL LETTER P]
-						case '\uA751': 
-						// �  [LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER]
-						case '\uA753': 
-						// �  [LATIN SMALL LETTER P WITH FLOURISH]
-						case '\uA755': 
-						// �  [LATIN SMALL LETTER P WITH SQUIRREL TAIL]
-						case '\uA7FC': 
-						// ꟼ  [LATIN EPIGRAPHIC LETTER REVERSED P]
-						case '\uFF50':  // �  [FULLWIDTH LATIN SMALL LETTER P]
-							output[outputPos++] = 'p';
-							break;
-						
-						case '\u24AB':  // â’«  [PARENTHESIZED LATIN SMALL LETTER P]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'p';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u024A': 
-						// ÉŠ  [LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL]
-						case '\u24C6': 
-						// Ⓠ  [CIRCLED LATIN CAPITAL LETTER Q]
-						case '\uA756': 
-						// �  [LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER]
-						case '\uA758': 
-						// �  [LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE]
-						case '\uFF31':  // ï¼±  [FULLWIDTH LATIN CAPITAL LETTER Q]
-							output[outputPos++] = 'Q';
-							break;
-						
-						case '\u0138': 
-						// ĸ  http://en.wikipedia.org/wiki/Kra_(letter)  [LATIN SMALL LETTER KRA]
-						case '\u024B': 
-						// É‹  [LATIN SMALL LETTER Q WITH HOOK TAIL]
-						case '\u02A0': 
-						// Ê   [LATIN SMALL LETTER Q WITH HOOK]
-						case '\u24E0': 
-						// â“   [CIRCLED LATIN SMALL LETTER Q]
-						case '\uA757': 
-						// �  [LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER]
-						case '\uA759': 
-						// �  [LATIN SMALL LETTER Q WITH DIAGONAL STROKE]
-						case '\uFF51':  // q  [FULLWIDTH LATIN SMALL LETTER Q]
-							output[outputPos++] = 'q';
-							break;
-						
-						case '\u24AC':  // â’¬  [PARENTHESIZED LATIN SMALL LETTER Q]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'q';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u0239':  // ȹ  [LATIN SMALL LETTER QP DIGRAPH]
-							output[outputPos++] = 'q';
-							output[outputPos++] = 'p';
-							break;
-						
-						case '\u0154': 
-						// �?  [LATIN CAPITAL LETTER R WITH ACUTE]
-						case '\u0156': 
-						// Å–  [LATIN CAPITAL LETTER R WITH CEDILLA]
-						case '\u0158': 
-						// Ř  [LATIN CAPITAL LETTER R WITH CARON]
-						case '\u0210': 
-						// È’  [LATIN CAPITAL LETTER R WITH DOUBLE GRAVE]
-						case '\u0212': 
-						// È’  [LATIN CAPITAL LETTER R WITH INVERTED BREVE]
-						case '\u024C': 
-						// Ɍ  [LATIN CAPITAL LETTER R WITH STROKE]
-						case '\u0280': 
-						// ʀ  [LATIN LETTER SMALL CAPITAL R]
-						case '\u0281': 
-						// �  [LATIN LETTER SMALL CAPITAL INVERTED R]
-						case '\u1D19': 
-						// á´™  [LATIN LETTER SMALL CAPITAL REVERSED R]
-						case '\u1D1A': 
-						// á´š  [LATIN LETTER SMALL CAPITAL TURNED R]
-						case '\u1E58': 
-						// Ṙ  [LATIN CAPITAL LETTER R WITH DOT ABOVE]
-						case '\u1E5A': 
-						// Ṛ  [LATIN CAPITAL LETTER R WITH DOT BELOW]
-						case '\u1E5C': 
-						// Ṝ  [LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON]
-						case '\u1E5E': 
-						// Ṟ  [LATIN CAPITAL LETTER R WITH LINE BELOW]
-						case '\u24C7': 
-						// Ⓡ  [CIRCLED LATIN CAPITAL LETTER R]
-						case '\u2C64': 
-						// Ɽ  [LATIN CAPITAL LETTER R WITH TAIL]
-						case '\uA75A': 
-						// �  [LATIN CAPITAL LETTER R ROTUNDA]
-						case '\uA782': 
-						// êž‚  [LATIN CAPITAL LETTER INSULAR R]
-						case '\uFF32':  // ï¼²  [FULLWIDTH LATIN CAPITAL LETTER R]
-							output[outputPos++] = 'R';
-							break;
-						
-						case '\u0155': 
-						// Å•  [LATIN SMALL LETTER R WITH ACUTE]
-						case '\u0157': 
-						// Å—  [LATIN SMALL LETTER R WITH CEDILLA]
-						case '\u0159': 
-						// Ã…â„¢  [LATIN SMALL LETTER R WITH CARON]
-						case '\u0211': 
-						// È‘  [LATIN SMALL LETTER R WITH DOUBLE GRAVE]
-						case '\u0213': 
-						// È“  [LATIN SMALL LETTER R WITH INVERTED BREVE]
-						case '\u024D': 
-						// �  [LATIN SMALL LETTER R WITH STROKE]
-						case '\u027C': 
-						// ɼ  [LATIN SMALL LETTER R WITH LONG LEG]
-						case '\u027D': 
-						// ɽ  [LATIN SMALL LETTER R WITH TAIL]
-						case '\u027E': 
-						// ɾ  [LATIN SMALL LETTER R WITH FISHHOOK]
-						case '\u027F': 
-						// É¿  [LATIN SMALL LETTER REVERSED R WITH FISHHOOK]
-						case '\u1D63': 
-						// áµ£  [LATIN SUBSCRIPT SMALL LETTER R]
-						case '\u1D72': 
-						// áµ²  [LATIN SMALL LETTER R WITH MIDDLE TILDE]
-						case '\u1D73': 
-						// áµ³  [LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE]
-						case '\u1D89': 
-						// ᶉ  [LATIN SMALL LETTER R WITH PALATAL HOOK]
-						case '\u1E59': 
-						// á¹™  [LATIN SMALL LETTER R WITH DOT ABOVE]
-						case '\u1E5B': 
-						// á¹›  [LATIN SMALL LETTER R WITH DOT BELOW]
-						case '\u1E5D': 
-						// �  [LATIN SMALL LETTER R WITH DOT BELOW AND MACRON]
-						case '\u1E5F': 
-						// ṟ  [LATIN SMALL LETTER R WITH LINE BELOW]
-						case '\u24E1': 
-						// â“¡  [CIRCLED LATIN SMALL LETTER R]
-						case '\uA75B': 
-						// �  [LATIN SMALL LETTER R ROTUNDA]
-						case '\uA783': 
-						// ꞃ  [LATIN SMALL LETTER INSULAR R]
-						case '\uFF52':  // ï½’  [FULLWIDTH LATIN SMALL LETTER R]
-							output[outputPos++] = 'r';
-							break;
-						
-						case '\u24AD':  // â’­  [PARENTHESIZED LATIN SMALL LETTER R]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'r';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u015A': 
-						// Ã…Å¡  [LATIN CAPITAL LETTER S WITH ACUTE]
-						case '\u015C': 
-						// Ã…Å“  [LATIN CAPITAL LETTER S WITH CIRCUMFLEX]
-						case '\u015E': 
-						// Åž  [LATIN CAPITAL LETTER S WITH CEDILLA]
-						case '\u0160': 
-						// Å   [LATIN CAPITAL LETTER S WITH CARON]
-						case '\u0218': 
-						// Ș  [LATIN CAPITAL LETTER S WITH COMMA BELOW]
-						case '\u1E60': 
-						// á¹   [LATIN CAPITAL LETTER S WITH DOT ABOVE]
-						case '\u1E62': 
-						// á¹¢  [LATIN CAPITAL LETTER S WITH DOT BELOW]
-						case '\u1E64': 
-						// Ṥ  [LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE]
-						case '\u1E66': 
-						// Ṧ  [LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE]
-						case '\u1E68': 
-						// Ṩ  [LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE]
-						case '\u24C8': 
-						// Ⓢ  [CIRCLED LATIN CAPITAL LETTER S]
-						case '\uA731': 
-						// ꜱ  [LATIN LETTER SMALL CAPITAL S]
-						case '\uA785': 
-						// êž…  [LATIN SMALL LETTER INSULAR S]
-						case '\uFF33':  // ï¼³  [FULLWIDTH LATIN CAPITAL LETTER S]
-							output[outputPos++] = 'S';
-							break;
-						
-						case '\u015B': 
-						// Å›  [LATIN SMALL LETTER S WITH ACUTE]
-						case '\u015D': 
-						// �  [LATIN SMALL LETTER S WITH CIRCUMFLEX]
-						case '\u015F': 
-						// ÅŸ  [LATIN SMALL LETTER S WITH CEDILLA]
-						case '\u0161': 
-						// Å¡  [LATIN SMALL LETTER S WITH CARON]
-						case '\u017F': 
-						// Å¿  http://en.wikipedia.org/wiki/Long_S  [LATIN SMALL LETTER LONG S]
-						case '\u0219': 
-						// È™  [LATIN SMALL LETTER S WITH COMMA BELOW]
-						case '\u023F': 
-						// È¿  [LATIN SMALL LETTER S WITH SWASH TAIL]
-						case '\u0282': 
-						// Ê‚  [LATIN SMALL LETTER S WITH HOOK]
-						case '\u1D74': 
-						// áµ´  [LATIN SMALL LETTER S WITH MIDDLE TILDE]
-						case '\u1D8A': 
-						// ᶊ  [LATIN SMALL LETTER S WITH PALATAL HOOK]
-						case '\u1E61': 
-						// ṡ  [LATIN SMALL LETTER S WITH DOT ABOVE]
-						case '\u1E63': 
-						// á¹£  [LATIN SMALL LETTER S WITH DOT BELOW]
-						case '\u1E65': 
-						// á¹¥  [LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE]
-						case '\u1E67': 
-						// ṧ  [LATIN SMALL LETTER S WITH CARON AND DOT ABOVE]
-						case '\u1E69': 
-						// ṩ  [LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE]
-						case '\u1E9C': 
-						// ẜ  [LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE]
-						case '\u1E9D': 
-						// �  [LATIN SMALL LETTER LONG S WITH HIGH STROKE]
-						case '\u24E2': 
-						// â“¢  [CIRCLED LATIN SMALL LETTER S]
-						case '\uA784': 
-						// êž„  [LATIN CAPITAL LETTER INSULAR S]
-						case '\uFF53':  // s  [FULLWIDTH LATIN SMALL LETTER S]
-							output[outputPos++] = 's';
-							break;
-						
-						case '\u1E9E':  // ẞ  [LATIN CAPITAL LETTER SHARP S]
-							output[outputPos++] = 'S';
-							output[outputPos++] = 'S';
-							break;
-						
-						case '\u24AE':  // â’®  [PARENTHESIZED LATIN SMALL LETTER S]
-							output[outputPos++] = '(';
-							output[outputPos++] = 's';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u00DF':  // ß  [LATIN SMALL LETTER SHARP S]
-							output[outputPos++] = 's';
-							output[outputPos++] = 's';
-							break;
-						
-						case '\uFB06':  // st  [LATIN SMALL LIGATURE ST]
-							output[outputPos++] = 's';
-							output[outputPos++] = 't';
-							break;
-						
-						case '\u0162': 
-						// Ţ  [LATIN CAPITAL LETTER T WITH CEDILLA]
-						case '\u0164': 
-						// Ť  [LATIN CAPITAL LETTER T WITH CARON]
-						case '\u0166': 
-						// Ŧ  [LATIN CAPITAL LETTER T WITH STROKE]
-						case '\u01AC': 
-						// Ƭ  [LATIN CAPITAL LETTER T WITH HOOK]
-						case '\u01AE': 
-						// Æ®  [LATIN CAPITAL LETTER T WITH RETROFLEX HOOK]
-						case '\u021A': 
-						// Èš  [LATIN CAPITAL LETTER T WITH COMMA BELOW]
-						case '\u023E': 
-						// Ⱦ  [LATIN CAPITAL LETTER T WITH DIAGONAL STROKE]
-						case '\u1D1B': 
-						// á´›  [LATIN LETTER SMALL CAPITAL T]
-						case '\u1E6A': 
-						// Ṫ  [LATIN CAPITAL LETTER T WITH DOT ABOVE]
-						case '\u1E6C': 
-						// Ṭ  [LATIN CAPITAL LETTER T WITH DOT BELOW]
-						case '\u1E6E': 
-						// á¹®  [LATIN CAPITAL LETTER T WITH LINE BELOW]
-						case '\u1E70': 
-						// á¹°  [LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW]
-						case '\u24C9': 
-						// Ⓣ  [CIRCLED LATIN CAPITAL LETTER T]
-						case '\uA786': 
-						// Ꞇ  [LATIN CAPITAL LETTER INSULAR T]
-						case '\uFF34':  // ï¼´  [FULLWIDTH LATIN CAPITAL LETTER T]
-							output[outputPos++] = 'T';
-							break;
-						
-						case '\u0163': 
-						// ţ  [LATIN SMALL LETTER T WITH CEDILLA]
-						case '\u0165': 
-						// Ã…Â¥  [LATIN SMALL LETTER T WITH CARON]
-						case '\u0167': 
-						// ŧ  [LATIN SMALL LETTER T WITH STROKE]
-						case '\u01AB': 
-						// Æ«  [LATIN SMALL LETTER T WITH PALATAL HOOK]
-						case '\u01AD': 
-						// Æ­  [LATIN SMALL LETTER T WITH HOOK]
-						case '\u021B': 
-						// È›  [LATIN SMALL LETTER T WITH COMMA BELOW]
-						case '\u0236': 
-						// ȶ  [LATIN SMALL LETTER T WITH CURL]
-						case '\u0287': 
-						// ʇ  [LATIN SMALL LETTER TURNED T]
-						case '\u0288': 
-						// ʈ  [LATIN SMALL LETTER T WITH RETROFLEX HOOK]
-						case '\u1D75': 
-						// áµµ  [LATIN SMALL LETTER T WITH MIDDLE TILDE]
-						case '\u1E6B': 
-						// ṫ  [LATIN SMALL LETTER T WITH DOT ABOVE]
-						case '\u1E6D': 
-						// á¹­  [LATIN SMALL LETTER T WITH DOT BELOW]
-						case '\u1E6F': 
-						// ṯ  [LATIN SMALL LETTER T WITH LINE BELOW]
-						case '\u1E71': 
-						// á¹±  [LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW]
-						case '\u1E97': 
-						// ẗ  [LATIN SMALL LETTER T WITH DIAERESIS]
-						case '\u24E3': 
-						// â“£  [CIRCLED LATIN SMALL LETTER T]
-						case '\u2C66': 
-						// ⱦ  [LATIN SMALL LETTER T WITH DIAGONAL STROKE]
-						case '\uFF54':  // �?  [FULLWIDTH LATIN SMALL LETTER T]
-							output[outputPos++] = 't';
-							break;
-						
-						case '\u00DE': 
-						// Þ  [LATIN CAPITAL LETTER THORN]
-						case '\uA766':  // �  [LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER]
-							output[outputPos++] = 'T';
-							output[outputPos++] = 'H';
-							break;
-						
-						case '\uA728':  // Ꜩ  [LATIN CAPITAL LETTER TZ]
-							output[outputPos++] = 'T';
-							output[outputPos++] = 'Z';
-							break;
-						
-						case '\u24AF':  // â’¯  [PARENTHESIZED LATIN SMALL LETTER T]
-							output[outputPos++] = '(';
-							output[outputPos++] = 't';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u02A8':  // ʨ  [LATIN SMALL LETTER TC DIGRAPH WITH CURL]
-							output[outputPos++] = 't';
-							output[outputPos++] = 'c';
-							break;
-						
-						case '\u00FE': 
-						// þ  [LATIN SMALL LETTER THORN]
-						case '\u1D7A': 
-						// ᵺ  [LATIN SMALL LETTER TH WITH STRIKETHROUGH]
-						case '\uA767':  // �  [LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER]
-							output[outputPos++] = 't';
-							output[outputPos++] = 'h';
-							break;
-						
-						case '\u02A6':  // ʦ  [LATIN SMALL LETTER TS DIGRAPH]
-							output[outputPos++] = 't';
-							output[outputPos++] = 's';
-							break;
-						
-						case '\uA729':  // ꜩ  [LATIN SMALL LETTER TZ]
-							output[outputPos++] = 't';
-							output[outputPos++] = 'z';
-							break;
-						
-						case '\u00D9': 
-						// Ù  [LATIN CAPITAL LETTER U WITH GRAVE]
-						case '\u00DA': 
-						// Ú  [LATIN CAPITAL LETTER U WITH ACUTE]
-						case '\u00DB': 
-						// Û  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX]
-						case '\u00DC': 
-						// Ü  [LATIN CAPITAL LETTER U WITH DIAERESIS]
-						case '\u0168': 
-						// Ũ  [LATIN CAPITAL LETTER U WITH TILDE]
-						case '\u016A': 
-						// Ū  [LATIN CAPITAL LETTER U WITH MACRON]
-						case '\u016C': 
-						// Ŭ  [LATIN CAPITAL LETTER U WITH BREVE]
-						case '\u016E': 
-						// Å®  [LATIN CAPITAL LETTER U WITH RING ABOVE]
-						case '\u0170': 
-						// Å°  [LATIN CAPITAL LETTER U WITH DOUBLE ACUTE]
-						case '\u0172': 
-						// Ų  [LATIN CAPITAL LETTER U WITH OGONEK]
-						case '\u01AF': 
-						// Ư  [LATIN CAPITAL LETTER U WITH HORN]
-						case '\u01D3': 
-						// Ç“  [LATIN CAPITAL LETTER U WITH CARON]
-						case '\u01D5': 
-						// Ç•  [LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON]
-						case '\u01D7': 
-						// Ç—  [LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE]
-						case '\u01D9': 
-						// Ç™  [LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON]
-						case '\u01DB': 
-						// Ç›  [LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE]
-						case '\u0214': 
-						// �?  [LATIN CAPITAL LETTER U WITH DOUBLE GRAVE]
-						case '\u0216': 
-						// È–  [LATIN CAPITAL LETTER U WITH INVERTED BREVE]
-						case '\u0244': 
-						// É„  [LATIN CAPITAL LETTER U BAR]
-						case '\u1D1C': 
-						// ᴜ  [LATIN LETTER SMALL CAPITAL U]
-						case '\u1D7E': 
-						// áµ¾  [LATIN SMALL CAPITAL LETTER U WITH STROKE]
-						case '\u1E72': 
-						// á¹²  [LATIN CAPITAL LETTER U WITH DIAERESIS BELOW]
-						case '\u1E74': 
-						// á¹´  [LATIN CAPITAL LETTER U WITH TILDE BELOW]
-						case '\u1E76': 
-						// Ṷ  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW]
-						case '\u1E78': 
-						// Ṹ  [LATIN CAPITAL LETTER U WITH TILDE AND ACUTE]
-						case '\u1E7A': 
-						// Ṻ  [LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS]
-						case '\u1EE4': 
-						// Ụ  [LATIN CAPITAL LETTER U WITH DOT BELOW]
-						case '\u1EE6': 
-						// Ủ  [LATIN CAPITAL LETTER U WITH HOOK ABOVE]
-						case '\u1EE8': 
-						// Ứ  [LATIN CAPITAL LETTER U WITH HORN AND ACUTE]
-						case '\u1EEA': 
-						// Ừ  [LATIN CAPITAL LETTER U WITH HORN AND GRAVE]
-						case '\u1EEC': 
-						// Ử  [LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE]
-						case '\u1EEE': 
-						// á»®  [LATIN CAPITAL LETTER U WITH HORN AND TILDE]
-						case '\u1EF0': 
-						// á»°  [LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW]
-						case '\u24CA': 
-						// â“Š  [CIRCLED LATIN CAPITAL LETTER U]
-						case '\uFF35':  // ï¼µ  [FULLWIDTH LATIN CAPITAL LETTER U]
-							output[outputPos++] = 'U';
-							break;
-						
-						case '\u00F9': 
-						// ù  [LATIN SMALL LETTER U WITH GRAVE]
-						case '\u00FA': 
-						// ú  [LATIN SMALL LETTER U WITH ACUTE]
-						case '\u00FB': 
-						// û  [LATIN SMALL LETTER U WITH CIRCUMFLEX]
-						case '\u00FC': 
-						// ü  [LATIN SMALL LETTER U WITH DIAERESIS]
-						case '\u0169': 
-						// Å©  [LATIN SMALL LETTER U WITH TILDE]
-						case '\u016B': 
-						// Å«  [LATIN SMALL LETTER U WITH MACRON]
-						case '\u016D': 
-						// Å­  [LATIN SMALL LETTER U WITH BREVE]
-						case '\u016F': 
-						// ů  [LATIN SMALL LETTER U WITH RING ABOVE]
-						case '\u0171': 
-						// ű  [LATIN SMALL LETTER U WITH DOUBLE ACUTE]
-						case '\u0173': 
-						// ų  [LATIN SMALL LETTER U WITH OGONEK]
-						case '\u01B0': 
-						// Æ°  [LATIN SMALL LETTER U WITH HORN]
-						case '\u01D4': 
-						// �?  [LATIN SMALL LETTER U WITH CARON]
-						case '\u01D6': 
-						// Ç–  [LATIN SMALL LETTER U WITH DIAERESIS AND MACRON]
-						case '\u01D8': 
-						// ǘ  [LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE]
-						case '\u01DA': 
-						// Çš  [LATIN SMALL LETTER U WITH DIAERESIS AND CARON]
-						case '\u01DC': 
-						// ǜ  [LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE]
-						case '\u0215': 
-						// È•  [LATIN SMALL LETTER U WITH DOUBLE GRAVE]
-						case '\u0217': 
-						// È—  [LATIN SMALL LETTER U WITH INVERTED BREVE]
-						case '\u0289': 
-						// ʉ  [LATIN SMALL LETTER U BAR]
-						case '\u1D64': 
-						// ᵤ  [LATIN SUBSCRIPT SMALL LETTER U]
-						case '\u1D99': 
-						// ᶙ  [LATIN SMALL LETTER U WITH RETROFLEX HOOK]
-						case '\u1E73': 
-						// á¹³  [LATIN SMALL LETTER U WITH DIAERESIS BELOW]
-						case '\u1E75': 
-						// á¹µ  [LATIN SMALL LETTER U WITH TILDE BELOW]
-						case '\u1E77': 
-						// á¹·  [LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW]
-						case '\u1E79': 
-						// á¹¹  [LATIN SMALL LETTER U WITH TILDE AND ACUTE]
-						case '\u1E7B': 
-						// á¹»  [LATIN SMALL LETTER U WITH MACRON AND DIAERESIS]
-						case '\u1EE5': 
-						// ụ  [LATIN SMALL LETTER U WITH DOT BELOW]
-						case '\u1EE7': 
-						// ủ  [LATIN SMALL LETTER U WITH HOOK ABOVE]
-						case '\u1EE9': 
-						// ứ  [LATIN SMALL LETTER U WITH HORN AND ACUTE]
-						case '\u1EEB': 
-						// ừ  [LATIN SMALL LETTER U WITH HORN AND GRAVE]
-						case '\u1EED': 
-						// á»­  [LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE]
-						case '\u1EEF': 
-						// ữ  [LATIN SMALL LETTER U WITH HORN AND TILDE]
-						case '\u1EF1': 
-						// á»±  [LATIN SMALL LETTER U WITH HORN AND DOT BELOW]
-						case '\u24E4': 
-						// ⓤ  [CIRCLED LATIN SMALL LETTER U]
-						case '\uFF55':  // u  [FULLWIDTH LATIN SMALL LETTER U]
-							output[outputPos++] = 'u';
-							break;
-						
-						case '\u24B0':  // â’°  [PARENTHESIZED LATIN SMALL LETTER U]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'u';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u1D6B':  // ᵫ  [LATIN SMALL LETTER UE]
-							output[outputPos++] = 'u';
-							output[outputPos++] = 'e';
-							break;
-						
-						case '\u01B2': 
-						// Ʋ  [LATIN CAPITAL LETTER V WITH HOOK]
-						case '\u0245': 
-						// É…  [LATIN CAPITAL LETTER TURNED V]
-						case '\u1D20': 
-						// á´   [LATIN LETTER SMALL CAPITAL V]
-						case '\u1E7C': 
-						// á¹¼  [LATIN CAPITAL LETTER V WITH TILDE]
-						case '\u1E7E': 
-						// á¹¾  [LATIN CAPITAL LETTER V WITH DOT BELOW]
-						case '\u1EFC': 
-						// Ỽ  [LATIN CAPITAL LETTER MIDDLE-WELSH V]
-						case '\u24CB': 
-						// â“‹  [CIRCLED LATIN CAPITAL LETTER V]
-						case '\uA75E': 
-						// �  [LATIN CAPITAL LETTER V WITH DIAGONAL STROKE]
-						case '\uA768': 
-						// �  [LATIN CAPITAL LETTER VEND]
-						case '\uFF36':  // V  [FULLWIDTH LATIN CAPITAL LETTER V]
-							output[outputPos++] = 'V';
-							break;
-						
-						case '\u028B': 
-						// Ê‹  [LATIN SMALL LETTER V WITH HOOK]
-						case '\u028C': 
-						// ʌ  [LATIN SMALL LETTER TURNED V]
-						case '\u1D65': 
-						// áµ¥  [LATIN SUBSCRIPT SMALL LETTER V]
-						case '\u1D8C': 
-						// ᶌ  [LATIN SMALL LETTER V WITH PALATAL HOOK]
-						case '\u1E7D': 
-						// á¹½  [LATIN SMALL LETTER V WITH TILDE]
-						case '\u1E7F': 
-						// ṿ  [LATIN SMALL LETTER V WITH DOT BELOW]
-						case '\u24E5': 
-						// â“¥  [CIRCLED LATIN SMALL LETTER V]
-						case '\u2C71': 
-						// â±±  [LATIN SMALL LETTER V WITH RIGHT HOOK]
-						case '\u2C74': 
-						// â±´  [LATIN SMALL LETTER V WITH CURL]
-						case '\uA75F': 
-						// �  [LATIN SMALL LETTER V WITH DIAGONAL STROKE]
-						case '\uFF56':  // ï½–  [FULLWIDTH LATIN SMALL LETTER V]
-							output[outputPos++] = 'v';
-							break;
-						
-						case '\uA760':  // �  [LATIN CAPITAL LETTER VY]
-							output[outputPos++] = 'V';
-							output[outputPos++] = 'Y';
-							break;
-						
-						case '\u24B1':  // â’±  [PARENTHESIZED LATIN SMALL LETTER V]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'v';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\uA761':  // �  [LATIN SMALL LETTER VY]
-							output[outputPos++] = 'v';
-							output[outputPos++] = 'y';
-							break;
-						
-						case '\u0174': 
-						// Å´  [LATIN CAPITAL LETTER W WITH CIRCUMFLEX]
-						case '\u01F7': 
-						// Ç·  http://en.wikipedia.org/wiki/Wynn  [LATIN CAPITAL LETTER WYNN]
-						case '\u1D21': 
-						// á´¡  [LATIN LETTER SMALL CAPITAL W]
-						case '\u1E80': 
-						// Ẁ  [LATIN CAPITAL LETTER W WITH GRAVE]
-						case '\u1E82': 
-						// Ẃ  [LATIN CAPITAL LETTER W WITH ACUTE]
-						case '\u1E84': 
-						// Ẅ  [LATIN CAPITAL LETTER W WITH DIAERESIS]
-						case '\u1E86': 
-						// Ẇ  [LATIN CAPITAL LETTER W WITH DOT ABOVE]
-						case '\u1E88': 
-						// Ẉ  [LATIN CAPITAL LETTER W WITH DOT BELOW]
-						case '\u24CC': 
-						// Ⓦ  [CIRCLED LATIN CAPITAL LETTER W]
-						case '\u2C72': 
-						// â±²  [LATIN CAPITAL LETTER W WITH HOOK]
-						case '\uFF37':  // ï¼·  [FULLWIDTH LATIN CAPITAL LETTER W]
-							output[outputPos++] = 'W';
-							break;
-						
-						case '\u0175': 
-						// ŵ  [LATIN SMALL LETTER W WITH CIRCUMFLEX]
-						case '\u01BF': 
-						// Æ¿  http://en.wikipedia.org/wiki/Wynn  [LATIN LETTER WYNN]
-						case '\u028D': 
-						// �  [LATIN SMALL LETTER TURNED W]
-						case '\u1E81': 
-						// �  [LATIN SMALL LETTER W WITH GRAVE]
-						case '\u1E83': 
-						// ẃ  [LATIN SMALL LETTER W WITH ACUTE]
-						case '\u1E85': 
-						// ẅ  [LATIN SMALL LETTER W WITH DIAERESIS]
-						case '\u1E87': 
-						// ẇ  [LATIN SMALL LETTER W WITH DOT ABOVE]
-						case '\u1E89': 
-						// ẉ  [LATIN SMALL LETTER W WITH DOT BELOW]
-						case '\u1E98': 
-						// ẘ  [LATIN SMALL LETTER W WITH RING ABOVE]
-						case '\u24E6': 
-						// ⓦ  [CIRCLED LATIN SMALL LETTER W]
-						case '\u2C73': 
-						// â±³  [LATIN SMALL LETTER W WITH HOOK]
-						case '\uFF57':  // ï½—  [FULLWIDTH LATIN SMALL LETTER W]
-							output[outputPos++] = 'w';
-							break;
-						
-						case '\u24B2':  // â’²  [PARENTHESIZED LATIN SMALL LETTER W]
-							output[outputPos++] = '(';
-							output[outputPos++] = 'w';
-							output[outputPos++] = ')';
-							break;
-						
-						case '\u1E8A': 
-						// Ẋ  [LATIN CAPITAL LETTER X WITH DOT ABOVE]
-						case '\u1E8C': 
-						// Ẍ  [LATIN CAPITAL LETTER X WITH DIAERESIS]
-						case '\u24CD': 
-						// �  [CIRCLED LATIN CAPITAL LETTER X]
-						case '\uFF38':  // X  [FULLWIDTH LATIN CAPITAL LETTER X]
-							output[outputPos++] = 'X';
-							break;
-						
-						case '\u1D8D': 
-				

<TRUNCATED>

[46/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/DutchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/DutchStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/DutchStemmer.cs
index 938b24f..07c19c6 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/DutchStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/DutchStemmer.cs
@@ -24,999 +24,999 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162
     
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class DutchStemmer : SnowballProgram
-	{
-		public DutchStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E4", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00EB", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00EF", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this), new Among("\u00FC", 0, 5, "", this)};
-			a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("I", 0, 2, "", this), new Among("Y", 0, 1, "", this)};
-			a_2 = new Among[]{new Among("dd", - 1, - 1, "", this), new Among("kk", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
-			a_3 = new Among[]{new Among("ene", - 1, 2, "", this), new Among("se", - 1, 3, "", this), new Among("en", - 1, 2, "", this), new Among("heden", 2, 1, "", this), new Among("s", - 1, 3, "", this)};
-			a_4 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ing", - 1, 1, "", this), new Among("lijk", - 1, 3, "", this), new Among("baar", - 1, 4, "", this), new Among("bar", - 1, 5, "", this)};
-			a_5 = new Among[]{new Among("aa", - 1, - 1, "", this), new Among("ee", - 1, - 1, "", this), new Among("oo", - 1, - 1, "", this), new Among("uu", - 1, - 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
-		private static readonly char[] g_v_I = new char[]{(char) (1), (char) (0), (char) (0), (char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
-		private static readonly char[] g_v_j = new char[]{(char) (17), (char) (67), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
-		
-		private int I_p2;
-		private int I_p1;
-		private bool B_e_found;
-		
-		protected internal virtual void  copy_from(DutchStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			B_e_found = other.B_e_found;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			// (, line 41
-			// test, line 42
-			v_1 = cursor;
-			// repeat, line 42
-			while (true)
-			{
-				v_2 = cursor;
-				do 
-				{
-					// (, line 42
-					// [, line 43
-					bra = cursor;
-					// substring, line 43
-					among_var = find_among(a_0, 11);
-					if (among_var == 0)
-					{
-						goto lab1_brk;
-					}
-					// ], line 43
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab1_brk;
-						
-						case 1: 
-							// (, line 45
-							// <-, line 45
-							slice_from("a");
-							break;
-						
-						case 2: 
-							// (, line 47
-							// <-, line 47
-							slice_from("e");
-							break;
-						
-						case 3: 
-							// (, line 49
-							// <-, line 49
-							slice_from("i");
-							break;
-						
-						case 4: 
-							// (, line 51
-							// <-, line 51
-							slice_from("o");
-							break;
-						
-						case 5: 
-							// (, line 53
-							// <-, line 53
-							slice_from("u");
-							break;
-						
-						case 6: 
-							// (, line 54
-							// next, line 54
-							if (cursor >= limit)
-							{
-								goto lab1_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab0;
-				}
-				while (false);
+    public class DutchStemmer : SnowballProgram
+    {
+        public DutchStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E4", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00EB", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00EF", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this), new Among("\u00FC", 0, 5, "", this)};
+            a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("I", 0, 2, "", this), new Among("Y", 0, 1, "", this)};
+            a_2 = new Among[]{new Among("dd", - 1, - 1, "", this), new Among("kk", - 1, - 1, "", this), new Among("tt", - 1, - 1, "", this)};
+            a_3 = new Among[]{new Among("ene", - 1, 2, "", this), new Among("se", - 1, 3, "", this), new Among("en", - 1, 2, "", this), new Among("heden", 2, 1, "", this), new Among("s", - 1, 3, "", this)};
+            a_4 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ing", - 1, 1, "", this), new Among("lijk", - 1, 3, "", this), new Among("baar", - 1, 4, "", this), new Among("bar", - 1, 5, "", this)};
+            a_5 = new Among[]{new Among("aa", - 1, - 1, "", this), new Among("ee", - 1, - 1, "", this), new Among("oo", - 1, - 1, "", this), new Among("uu", - 1, - 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
+        private static readonly char[] g_v_I = new char[]{(char) (1), (char) (0), (char) (0), (char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
+        private static readonly char[] g_v_j = new char[]{(char) (17), (char) (67), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
+        
+        private int I_p2;
+        private int I_p1;
+        private bool B_e_found;
+        
+        protected internal virtual void  copy_from(DutchStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            B_e_found = other.B_e_found;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            // (, line 41
+            // test, line 42
+            v_1 = cursor;
+            // repeat, line 42
+            while (true)
+            {
+                v_2 = cursor;
+                do 
+                {
+                    // (, line 42
+                    // [, line 43
+                    bra = cursor;
+                    // substring, line 43
+                    among_var = find_among(a_0, 11);
+                    if (among_var == 0)
+                    {
+                        goto lab1_brk;
+                    }
+                    // ], line 43
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab1_brk;
+                        
+                        case 1: 
+                            // (, line 45
+                            // <-, line 45
+                            slice_from("a");
+                            break;
+                        
+                        case 2: 
+                            // (, line 47
+                            // <-, line 47
+                            slice_from("e");
+                            break;
+                        
+                        case 3: 
+                            // (, line 49
+                            // <-, line 49
+                            slice_from("i");
+                            break;
+                        
+                        case 4: 
+                            // (, line 51
+                            // <-, line 51
+                            slice_from("o");
+                            break;
+                        
+                        case 5: 
+                            // (, line 53
+                            // <-, line 53
+                            slice_from("u");
+                            break;
+                        
+                        case 6: 
+                            // (, line 54
+                            // next, line 54
+                            if (cursor >= limit)
+                            {
+                                goto lab1_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_2;
-				goto replab0_brk;
+                
+                cursor = v_2;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			cursor = v_1;
-			// try, line 57
-			v_3 = cursor;
-			do 
-			{
-				// (, line 57
-				// [, line 57
-				bra = cursor;
-				// literal, line 57
-				if (!(eq_s(1, "y")))
-				{
-					cursor = v_3;
-					goto lab2_brk;
-				}
-				// ], line 57
-				ket = cursor;
-				// <-, line 57
-				slice_from("Y");
-			}
-			while (false);
+            
+            cursor = v_1;
+            // try, line 57
+            v_3 = cursor;
+            do 
+            {
+                // (, line 57
+                // [, line 57
+                bra = cursor;
+                // literal, line 57
+                if (!(eq_s(1, "y")))
+                {
+                    cursor = v_3;
+                    goto lab2_brk;
+                }
+                // ], line 57
+                ket = cursor;
+                // <-, line 57
+                slice_from("Y");
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			// repeat, line 58
-			while (true)
-			{
-				v_4 = cursor;
-				do 
-				{
-					// goto, line 58
-					while (true)
-					{
-						v_5 = cursor;
-						do 
-						{
-							// (, line 58
-							if (!(in_grouping(g_v, 97, 232)))
-							{
-								goto lab6_brk;
-							}
-							// [, line 59
-							bra = cursor;
-							// or, line 59
-							do 
-							{
-								v_6 = cursor;
-								do 
-								{
-									// (, line 59
-									// literal, line 59
-									if (!(eq_s(1, "i")))
-									{
-										goto lab8_brk;
-									}
-									// ], line 59
-									ket = cursor;
-									if (!(in_grouping(g_v, 97, 232)))
-									{
-										goto lab8_brk;
-									}
-									// <-, line 59
-									slice_from("I");
-									goto lab7_brk;
-								}
-								while (false);
+            
+            // repeat, line 58
+            while (true)
+            {
+                v_4 = cursor;
+                do 
+                {
+                    // goto, line 58
+                    while (true)
+                    {
+                        v_5 = cursor;
+                        do 
+                        {
+                            // (, line 58
+                            if (!(in_grouping(g_v, 97, 232)))
+                            {
+                                goto lab6_brk;
+                            }
+                            // [, line 59
+                            bra = cursor;
+                            // or, line 59
+                            do 
+                            {
+                                v_6 = cursor;
+                                do 
+                                {
+                                    // (, line 59
+                                    // literal, line 59
+                                    if (!(eq_s(1, "i")))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    // ], line 59
+                                    ket = cursor;
+                                    if (!(in_grouping(g_v, 97, 232)))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    // <-, line 59
+                                    slice_from("I");
+                                    goto lab7_brk;
+                                }
+                                while (false);
 
 lab8_brk: ;
-								
-								cursor = v_6;
-								// (, line 60
-								// literal, line 60
-								if (!(eq_s(1, "y")))
-								{
-									goto lab6_brk;
-								}
-								// ], line 60
-								ket = cursor;
-								// <-, line 60
-								slice_from("Y");
-							}
-							while (false);
+                                
+                                cursor = v_6;
+                                // (, line 60
+                                // literal, line 60
+                                if (!(eq_s(1, "y")))
+                                {
+                                    goto lab6_brk;
+                                }
+                                // ], line 60
+                                ket = cursor;
+                                // <-, line 60
+                                slice_from("Y");
+                            }
+                            while (false);
 
 lab7_brk: ;
-							
-							cursor = v_5;
-							goto golab5_brk;
-						}
-						while (false);
+                            
+                            cursor = v_5;
+                            goto golab5_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						cursor = v_5;
-						if (cursor >= limit)
-						{
-							goto lab4_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_5;
+                        if (cursor >= limit)
+                        {
+                            goto lab4_brk;
+                        }
+                        cursor++;
+                    }
 
 golab5_brk: ;
-					
-					goto replab3;
-				}
-				while (false);
+                    
+                    goto replab3;
+                }
+                while (false);
 
 lab4_brk: ;
-				
-				cursor = v_4;
-				goto replab3_brk;
+                
+                cursor = v_4;
+                goto replab3_brk;
 
 replab3: ;
-			}
+            }
 
 replab3_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			// (, line 64
-			I_p1 = limit;
-			I_p2 = limit;
-			// gopast, line 69
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 232)))
-					{
-						goto lab3_brk;
-					}
-					goto golab0_brk;
-				}
-				while (false);
+            
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            // (, line 64
+            I_p1 = limit;
+            I_p2 = limit;
+            // gopast, line 69
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 232)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 69
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 232)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 69
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 232)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 69
-			I_p1 = cursor;
-			// try, line 70
-			do 
-			{
-				// (, line 70
-				if (!(I_p1 < 3))
-				{
-					goto lab5_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            
+            // setmark p1, line 69
+            I_p1 = cursor;
+            // try, line 70
+            do 
+            {
+                // (, line 70
+                if (!(I_p1 < 3))
+                {
+                    goto lab5_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab5_brk: ;
-			
-			// gopast, line 71
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 232)))
-					{
-						goto lab9_brk;
-					}
-					goto golab6_brk;
-				}
-				while (false);
+            
+            // gopast, line 71
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 232)))
+                    {
+                        goto lab9_brk;
+                    }
+                    goto golab6_brk;
+                }
+                while (false);
 
 lab9_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab6_brk: ;
-			
-			// gopast, line 71
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 232)))
-					{
-						goto lab9_brk;
-					}
-					goto golab7_brk;
-				}
-				while (false);
+            
+            // gopast, line 71
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 232)))
+                    {
+                        goto lab9_brk;
+                    }
+                    goto golab7_brk;
+                }
+                while (false);
 
 lab9_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab7_brk: ;
-			
-			// setmark p2, line 71
-			I_p2 = cursor;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 75
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 75
-					// [, line 77
-					bra = cursor;
-					// substring, line 77
-					among_var = find_among(a_1, 3);
-					if (among_var == 0)
-					{
-						goto lab5_brk;
-					}
-					// ], line 77
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab5_brk;
-						
-						case 1: 
-							// (, line 78
-							// <-, line 78
-							slice_from("y");
-							break;
-						
-						case 2: 
-							// (, line 79
-							// <-, line 79
-							slice_from("i");
-							break;
-						
-						case 3: 
-							// (, line 80
-							// next, line 80
-							if (cursor >= limit)
-							{
-								goto lab5_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab1;
-				}
-				while (false);
+            
+            // setmark p2, line 71
+            I_p2 = cursor;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 75
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 75
+                    // [, line 77
+                    bra = cursor;
+                    // substring, line 77
+                    among_var = find_among(a_1, 3);
+                    if (among_var == 0)
+                    {
+                        goto lab5_brk;
+                    }
+                    // ], line 77
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab5_brk;
+                        
+                        case 1: 
+                            // (, line 78
+                            // <-, line 78
+                            slice_from("y");
+                            break;
+                        
+                        case 2: 
+                            // (, line 79
+                            // <-, line 79
+                            slice_from("i");
+                            break;
+                        
+                        case 3: 
+                            // (, line 80
+                            // next, line 80
+                            if (cursor >= limit)
+                            {
+                                goto lab5_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab1;
+                }
+                while (false);
 
 lab5_brk: ;
-				
-				cursor = v_1;
-				goto replab1_brk;
+                
+                cursor = v_1;
+                goto replab1_brk;
 
 replab1: ;
-			}
+            }
 
 replab1_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_undouble()
-		{
-			int v_1;
-			// (, line 90
-			// test, line 91
-			v_1 = limit - cursor;
-			// among, line 91
-			if (find_among_b(a_2, 3) == 0)
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			// [, line 91
-			ket = cursor;
-			// next, line 91
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 91
-			bra = cursor;
-			// delete, line 91
-			slice_del();
-			return true;
-		}
-		
-		private bool r_e_ending()
-		{
-			int v_1;
-			// (, line 94
-			// unset e_found, line 95
-			B_e_found = false;
-			// [, line 96
-			ket = cursor;
-			// literal, line 96
-			if (!(eq_s_b(1, "e")))
-			{
-				return false;
-			}
-			// ], line 96
-			bra = cursor;
-			// call R1, line 96
-			if (!r_R1())
-			{
-				return false;
-			}
-			// test, line 96
-			v_1 = limit - cursor;
-			if (!(out_grouping_b(g_v, 97, 232)))
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			// delete, line 96
-			slice_del();
-			// set e_found, line 97
-			B_e_found = true;
-			// call undouble, line 98
-			if (!r_undouble())
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_en_ending()
-		{
-			int v_1;
-			int v_2;
-			// (, line 101
-			// call R1, line 102
-			if (!r_R1())
-			{
-				return false;
-			}
-			// and, line 102
-			v_1 = limit - cursor;
-			if (!(out_grouping_b(g_v, 97, 232)))
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			// not, line 102
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 102
-					if (!(eq_s_b(3, "gem")))
-					{
-						goto lab0_brk;
-					}
-					return false;
-				}
-				while (false);
+            
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_undouble()
+        {
+            int v_1;
+            // (, line 90
+            // test, line 91
+            v_1 = limit - cursor;
+            // among, line 91
+            if (find_among_b(a_2, 3) == 0)
+            {
+                return false;
+            }
+            cursor = limit - v_1;
+            // [, line 91
+            ket = cursor;
+            // next, line 91
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // ], line 91
+            bra = cursor;
+            // delete, line 91
+            slice_del();
+            return true;
+        }
+        
+        private bool r_e_ending()
+        {
+            int v_1;
+            // (, line 94
+            // unset e_found, line 95
+            B_e_found = false;
+            // [, line 96
+            ket = cursor;
+            // literal, line 96
+            if (!(eq_s_b(1, "e")))
+            {
+                return false;
+            }
+            // ], line 96
+            bra = cursor;
+            // call R1, line 96
+            if (!r_R1())
+            {
+                return false;
+            }
+            // test, line 96
+            v_1 = limit - cursor;
+            if (!(out_grouping_b(g_v, 97, 232)))
+            {
+                return false;
+            }
+            cursor = limit - v_1;
+            // delete, line 96
+            slice_del();
+            // set e_found, line 97
+            B_e_found = true;
+            // call undouble, line 98
+            if (!r_undouble())
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_en_ending()
+        {
+            int v_1;
+            int v_2;
+            // (, line 101
+            // call R1, line 102
+            if (!r_R1())
+            {
+                return false;
+            }
+            // and, line 102
+            v_1 = limit - cursor;
+            if (!(out_grouping_b(g_v, 97, 232)))
+            {
+                return false;
+            }
+            cursor = limit - v_1;
+            // not, line 102
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 102
+                    if (!(eq_s_b(3, "gem")))
+                    {
+                        goto lab0_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab0_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			// delete, line 102
-			slice_del();
-			// call undouble, line 103
-			if (!r_undouble())
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			int v_10;
-			// (, line 106
-			// do, line 107
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 107
-				// [, line 108
-				ket = cursor;
-				// substring, line 108
-				among_var = find_among_b(a_3, 5);
-				if (among_var == 0)
-				{
-					goto lab0_brk;
-				}
-				// ], line 108
-				bra = cursor;
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab0_brk;
-					
-					case 1: 
-						// (, line 110
-						// call R1, line 110
-						if (!r_R1())
-						{
-							goto lab0_brk;
-						}
-						// <-, line 110
-						slice_from("heid");
-						break;
-					
-					case 2: 
-						// (, line 113
-						// call en_ending, line 113
-						if (!r_en_ending())
-						{
-							goto lab0_brk;
-						}
-						break;
-					
-					case 3: 
-						// (, line 116
-						// call R1, line 116
-						if (!r_R1())
-						{
-							goto lab0_brk;
-						}
-						if (!(out_grouping_b(g_v_j, 97, 232)))
-						{
-							goto lab0_brk;
-						}
-						// delete, line 116
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+            }
+            // delete, line 102
+            slice_del();
+            // call undouble, line 103
+            if (!r_undouble())
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            int v_10;
+            // (, line 106
+            // do, line 107
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 107
+                // [, line 108
+                ket = cursor;
+                // substring, line 108
+                among_var = find_among_b(a_3, 5);
+                if (among_var == 0)
+                {
+                    goto lab0_brk;
+                }
+                // ], line 108
+                bra = cursor;
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab0_brk;
+                    
+                    case 1: 
+                        // (, line 110
+                        // call R1, line 110
+                        if (!r_R1())
+                        {
+                            goto lab0_brk;
+                        }
+                        // <-, line 110
+                        slice_from("heid");
+                        break;
+                    
+                    case 2: 
+                        // (, line 113
+                        // call en_ending, line 113
+                        if (!r_en_ending())
+                        {
+                            goto lab0_brk;
+                        }
+                        break;
+                    
+                    case 3: 
+                        // (, line 116
+                        // call R1, line 116
+                        if (!r_R1())
+                        {
+                            goto lab0_brk;
+                        }
+                        if (!(out_grouping_b(g_v_j, 97, 232)))
+                        {
+                            goto lab0_brk;
+                        }
+                        // delete, line 116
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			cursor = limit - v_1;
-			// do, line 120
-			v_2 = limit - cursor;
-			do 
-			{
-				// call e_ending, line 120
-				if (!r_e_ending())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            cursor = limit - v_1;
+            // do, line 120
+            v_2 = limit - cursor;
+            do 
+            {
+                // call e_ending, line 120
+                if (!r_e_ending())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
 
-			cursor = limit - v_2;
-			// do, line 122
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 122
-				// [, line 122
-				ket = cursor;
-				// literal, line 122
-				if (!(eq_s_b(4, "heid")))
-				{
-					goto lab2_brk;
-				}
-				// ], line 122
-				bra = cursor;
-				// call R2, line 122
-				if (!r_R2())
-				{
-					goto lab2_brk;
-				}
-				// not, line 122
-				{
-					v_4 = limit - cursor;
-					do 
-					{
-						// literal, line 122
-						if (!(eq_s_b(1, "c")))
-						{
-							goto lab3_brk;
-						}
-						goto lab2_brk;
-					}
-					while (false);
+            cursor = limit - v_2;
+            // do, line 122
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 122
+                // [, line 122
+                ket = cursor;
+                // literal, line 122
+                if (!(eq_s_b(4, "heid")))
+                {
+                    goto lab2_brk;
+                }
+                // ], line 122
+                bra = cursor;
+                // call R2, line 122
+                if (!r_R2())
+                {
+                    goto lab2_brk;
+                }
+                // not, line 122
+                {
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // literal, line 122
+                        if (!(eq_s_b(1, "c")))
+                        {
+                            goto lab3_brk;
+                        }
+                        goto lab2_brk;
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					cursor = limit - v_4;
-				}
-				// delete, line 122
-				slice_del();
-				// [, line 123
-				ket = cursor;
-				// literal, line 123
-				if (!(eq_s_b(2, "en")))
-				{
-					goto lab2_brk;
-				}
-				// ], line 123
-				bra = cursor;
-				// call en_ending, line 123
-				if (!r_en_ending())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+                    
+                    cursor = limit - v_4;
+                }
+                // delete, line 122
+                slice_del();
+                // [, line 123
+                ket = cursor;
+                // literal, line 123
+                if (!(eq_s_b(2, "en")))
+                {
+                    goto lab2_brk;
+                }
+                // ], line 123
+                bra = cursor;
+                // call en_ending, line 123
+                if (!r_en_ending())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 126
-			v_5 = limit - cursor;
-			do 
-			{
-				// (, line 126
-				// [, line 127
-				ket = cursor;
-				// substring, line 127
-				among_var = find_among_b(a_4, 6);
-				if (among_var == 0)
-				{
-					goto lab4_brk;
-				}
-				// ], line 127
-				bra = cursor;
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab4_brk;
-					
-					case 1: 
-						// (, line 129
-						// call R2, line 129
-						if (!r_R2())
-						{
-							goto lab4_brk;
-						}
-						// delete, line 129
-						slice_del();
-						// or, line 130
-						do 
-						{
-							v_6 = limit - cursor;
-							do 
-							{
-								// (, line 130
-								// [, line 130
-								ket = cursor;
-								// literal, line 130
-								if (!(eq_s_b(2, "ig")))
-								{
-									goto lab6_brk;
-								}
-								// ], line 130
-								bra = cursor;
-								// call R2, line 130
-								if (!r_R2())
-								{
-									goto lab6_brk;
-								}
-								// not, line 130
-								{
-									v_7 = limit - cursor;
-									do 
-									{
-										// literal, line 130
-										if (!(eq_s_b(1, "e")))
-										{
-											goto lab7_brk;
-										}
-										goto lab6_brk;
-									}
-									while (false);
+            
+            cursor = limit - v_3;
+            // do, line 126
+            v_5 = limit - cursor;
+            do 
+            {
+                // (, line 126
+                // [, line 127
+                ket = cursor;
+                // substring, line 127
+                among_var = find_among_b(a_4, 6);
+                if (among_var == 0)
+                {
+                    goto lab4_brk;
+                }
+                // ], line 127
+                bra = cursor;
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab4_brk;
+                    
+                    case 1: 
+                        // (, line 129
+                        // call R2, line 129
+                        if (!r_R2())
+                        {
+                            goto lab4_brk;
+                        }
+                        // delete, line 129
+                        slice_del();
+                        // or, line 130
+                        do 
+                        {
+                            v_6 = limit - cursor;
+                            do 
+                            {
+                                // (, line 130
+                                // [, line 130
+                                ket = cursor;
+                                // literal, line 130
+                                if (!(eq_s_b(2, "ig")))
+                                {
+                                    goto lab6_brk;
+                                }
+                                // ], line 130
+                                bra = cursor;
+                                // call R2, line 130
+                                if (!r_R2())
+                                {
+                                    goto lab6_brk;
+                                }
+                                // not, line 130
+                                {
+                                    v_7 = limit - cursor;
+                                    do 
+                                    {
+                                        // literal, line 130
+                                        if (!(eq_s_b(1, "e")))
+                                        {
+                                            goto lab7_brk;
+                                        }
+                                        goto lab6_brk;
+                                    }
+                                    while (false);
 
 lab7_brk: ;
-									
-									cursor = limit - v_7;
-								}
-								// delete, line 130
-								slice_del();
-								goto lab5_brk;
-							}
-							while (false);
+                                    
+                                    cursor = limit - v_7;
+                                }
+                                // delete, line 130
+                                slice_del();
+                                goto lab5_brk;
+                            }
+                            while (false);
 
 lab6_brk: ;
-							
-							cursor = limit - v_6;
-							// call undouble, line 130
-							if (!r_undouble())
-							{
-								goto lab4_brk;
-							}
-						}
-						while (false);
+                            
+                            cursor = limit - v_6;
+                            // call undouble, line 130
+                            if (!r_undouble())
+                            {
+                                goto lab4_brk;
+                            }
+                        }
+                        while (false);
 
 lab5_brk: ;
-						
-						break;
-					
-					case 2: 
-						// (, line 133
-						// call R2, line 133
-						if (!r_R2())
-						{
-							goto lab4_brk;
-						}
-						// not, line 133
-						{
-							v_8 = limit - cursor;
-							do 
-							{
-								// literal, line 133
-								if (!(eq_s_b(1, "e")))
-								{
-									goto lab8_brk;
-								}
-								goto lab4_brk;
-							}
-							while (false);
+                        
+                        break;
+                    
+                    case 2: 
+                        // (, line 133
+                        // call R2, line 133
+                        if (!r_R2())
+                        {
+                            goto lab4_brk;
+                        }
+                        // not, line 133
+                        {
+                            v_8 = limit - cursor;
+                            do 
+                            {
+                                // literal, line 133
+                                if (!(eq_s_b(1, "e")))
+                                {
+                                    goto lab8_brk;
+                                }
+                                goto lab4_brk;
+                            }
+                            while (false);
 
 lab8_brk: ;
 
-							cursor = limit - v_8;
-						}
-						// delete, line 133
-						slice_del();
-						break;
-					
-					case 3: 
-						// (, line 136
-						// call R2, line 136
-						if (!r_R2())
-						{
-							goto lab4_brk;
-						}
-						// delete, line 136
-						slice_del();
-						// call e_ending, line 136
-						if (!r_e_ending())
-						{
-							goto lab4_brk;
-						}
-						break;
-					
-					case 4: 
-						// (, line 139
-						// call R2, line 139
-						if (!r_R2())
-						{
-							goto lab4_brk;
-						}
-						// delete, line 139
-						slice_del();
-						break;
-					
-					case 5: 
-						// (, line 142
-						// call R2, line 142
-						if (!r_R2())
-						{
-							goto lab4_brk;
-						}
-						// Boolean test e_found, line 142
-						if (!(B_e_found))
-						{
-							goto lab4_brk;
-						}
-						// delete, line 142
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+                            cursor = limit - v_8;
+                        }
+                        // delete, line 133
+                        slice_del();
+                        break;
+                    
+                    case 3: 
+                        // (, line 136
+                        // call R2, line 136
+                        if (!r_R2())
+                        {
+                            goto lab4_brk;
+                        }
+                        // delete, line 136
+                        slice_del();
+                        // call e_ending, line 136
+                        if (!r_e_ending())
+                        {
+                            goto lab4_brk;
+                        }
+                        break;
+                    
+                    case 4: 
+                        // (, line 139
+                        // call R2, line 139
+                        if (!r_R2())
+                        {
+                            goto lab4_brk;
+                        }
+                        // delete, line 139
+                        slice_del();
+                        break;
+                    
+                    case 5: 
+                        // (, line 142
+                        // call R2, line 142
+                        if (!r_R2())
+                        {
+                            goto lab4_brk;
+                        }
+                        // Boolean test e_found, line 142
+                        if (!(B_e_found))
+                        {
+                            goto lab4_brk;
+                        }
+                        // delete, line 142
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			cursor = limit - v_5;
-			// do, line 146
-			v_9 = limit - cursor;
-			do 
-			{
-				// (, line 146
-				if (!(out_grouping_b(g_v_I, 73, 232)))
-				{
-					goto lab9_brk;
-				}
-				// test, line 148
-				v_10 = limit - cursor;
-				// (, line 148
-				// among, line 149
-				if (find_among_b(a_5, 4) == 0)
-				{
-					goto lab9_brk;
-				}
-				if (!(out_grouping_b(g_v, 97, 232)))
-				{
-					goto lab9_brk;
-				}
-				cursor = limit - v_10;
-				// [, line 152
-				ket = cursor;
-				// next, line 152
-				if (cursor <= limit_backward)
-				{
-					goto lab9_brk;
-				}
-				cursor--;
-				// ], line 152
-				bra = cursor;
-				// delete, line 152
-				slice_del();
-			}
-			while (false);
+            
+            cursor = limit - v_5;
+            // do, line 146
+            v_9 = limit - cursor;
+            do 
+            {
+                // (, line 146
+                if (!(out_grouping_b(g_v_I, 73, 232)))
+                {
+                    goto lab9_brk;
+                }
+                // test, line 148
+                v_10 = limit - cursor;
+                // (, line 148
+                // among, line 149
+                if (find_among_b(a_5, 4) == 0)
+                {
+                    goto lab9_brk;
+                }
+                if (!(out_grouping_b(g_v, 97, 232)))
+                {
+                    goto lab9_brk;
+                }
+                cursor = limit - v_10;
+                // [, line 152
+                ket = cursor;
+                // next, line 152
+                if (cursor <= limit_backward)
+                {
+                    goto lab9_brk;
+                }
+                cursor--;
+                // ], line 152
+                bra = cursor;
+                // delete, line 152
+                slice_del();
+            }
+            while (false);
 
 lab9_brk: ;
-			
-			cursor = limit - v_9;
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 157
-			// do, line 159
-			v_1 = cursor;
-			do 
-			{
-				// call prelude, line 159
-				if (!r_prelude())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_9;
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 157
+            // do, line 159
+            v_1 = cursor;
+            do 
+            {
+                // call prelude, line 159
+                if (!r_prelude())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			cursor = v_1;
-			// do, line 160
-			v_2 = cursor;
-			do 
-			{
-				// call mark_regions, line 160
-				if (!r_mark_regions())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            cursor = v_1;
+            // do, line 160
+            v_2 = cursor;
+            do 
+            {
+                // call mark_regions, line 160
+                if (!r_mark_regions())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = v_2;
-			// backwards, line 161
-			limit_backward = cursor; cursor = limit;
-			// do, line 162
-			v_3 = limit - cursor;
-			do 
-			{
-				// call standard_suffix, line 162
-				if (!r_standard_suffix())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_2;
+            // backwards, line 161
+            limit_backward = cursor; cursor = limit;
+            // do, line 162
+            v_3 = limit - cursor;
+            do 
+            {
+                // call standard_suffix, line 162
+                if (!r_standard_suffix())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			cursor = limit_backward; // do, line 163
-			v_4 = cursor;
-			do 
-			{
-				// call postlude, line 163
-				if (!r_postlude())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            cursor = limit_backward; // do, line 163
+            v_4 = cursor;
+            do 
+            {
+                // call postlude, line 163
+                if (!r_postlude())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = v_4;
-			return true;
-		}
-	}
+            
+            cursor = v_4;
+            return true;
+        }
+    }
 }


[41/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
index 77c251f..6f83828 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/HungarianStemmer.cs
@@ -57,230 +57,230 @@ using SnowballProgram = SF.Snowball.SnowballProgram;
 namespace SF.Snowball.Ext
 {
     /*
-	 * Generated class implementing code defined by a snowball script.
-	 */
+     * Generated class implementing code defined by a snowball script.
+     */
     public class HungarianStemmer : SnowballProgram
     {
 
         public HungarianStemmer()
         {
             a_0 = new Among[] {
-				new Among("cs", -1, -1, "", null),
-				new Among("dzs", -1, -1, "", null),
-				new Among("gy", -1, -1, "", null),
-				new Among("ly", -1, -1, "", null),
-				new Among("ny", -1, -1, "", null),
-				new Among("sz", -1, -1, "", null),
-				new Among("ty", -1, -1, "", null),
-				new Among("zs", -1, -1, "", null)
-			};
+                new Among("cs", -1, -1, "", null),
+                new Among("dzs", -1, -1, "", null),
+                new Among("gy", -1, -1, "", null),
+                new Among("ly", -1, -1, "", null),
+                new Among("ny", -1, -1, "", null),
+                new Among("sz", -1, -1, "", null),
+                new Among("ty", -1, -1, "", null),
+                new Among("zs", -1, -1, "", null)
+            };
 
             a_1 = new Among[] {
-				new Among("\u00E1", -1, 1, "", null),
-				new Among("\u00E9", -1, 2, "", null)
-			};
+                new Among("\u00E1", -1, 1, "", null),
+                new Among("\u00E9", -1, 2, "", null)
+            };
 
             a_2 = new Among[] {
-				new Among("bb", -1, -1, "", null),
-				new Among("cc", -1, -1, "", null),
-				new Among("dd", -1, -1, "", null),
-				new Among("ff", -1, -1, "", null),
-				new Among("gg", -1, -1, "", null),
-				new Among("jj", -1, -1, "", null),
-				new Among("kk", -1, -1, "", null),
-				new Among("ll", -1, -1, "", null),
-				new Among("mm", -1, -1, "", null),
-				new Among("nn", -1, -1, "", null),
-				new Among("pp", -1, -1, "", null),
-				new Among("rr", -1, -1, "", null),
-				new Among("ccs", -1, -1, "", null),
-				new Among("ss", -1, -1, "", null),
-				new Among("zzs", -1, -1, "", null),
-				new Among("tt", -1, -1, "", null),
-				new Among("vv", -1, -1, "", null),
-				new Among("ggy", -1, -1, "", null),
-				new Among("lly", -1, -1, "", null),
-				new Among("nny", -1, -1, "", null),
-				new Among("tty", -1, -1, "", null),
-				new Among("ssz", -1, -1, "", null),
-				new Among("zz", -1, -1, "", null)
-			};
+                new Among("bb", -1, -1, "", null),
+                new Among("cc", -1, -1, "", null),
+                new Among("dd", -1, -1, "", null),
+                new Among("ff", -1, -1, "", null),
+                new Among("gg", -1, -1, "", null),
+                new Among("jj", -1, -1, "", null),
+                new Among("kk", -1, -1, "", null),
+                new Among("ll", -1, -1, "", null),
+                new Among("mm", -1, -1, "", null),
+                new Among("nn", -1, -1, "", null),
+                new Among("pp", -1, -1, "", null),
+                new Among("rr", -1, -1, "", null),
+                new Among("ccs", -1, -1, "", null),
+                new Among("ss", -1, -1, "", null),
+                new Among("zzs", -1, -1, "", null),
+                new Among("tt", -1, -1, "", null),
+                new Among("vv", -1, -1, "", null),
+                new Among("ggy", -1, -1, "", null),
+                new Among("lly", -1, -1, "", null),
+                new Among("nny", -1, -1, "", null),
+                new Among("tty", -1, -1, "", null),
+                new Among("ssz", -1, -1, "", null),
+                new Among("zz", -1, -1, "", null)
+            };
 
             a_3 = new Among[] {
-				new Among("al", -1, 1, "", null),
-				new Among("el", -1, 2, "", null)
-			};
+                new Among("al", -1, 1, "", null),
+                new Among("el", -1, 2, "", null)
+            };
 
             a_4 = new Among[] {
-				new Among("ba", -1, -1, "", null),
-				new Among("ra", -1, -1, "", null),
-				new Among("be", -1, -1, "", null),
-				new Among("re", -1, -1, "", null),
-				new Among("ig", -1, -1, "", null),
-				new Among("nak", -1, -1, "", null),
-				new Among("nek", -1, -1, "", null),
-				new Among("val", -1, -1, "", null),
-				new Among("vel", -1, -1, "", null),
-				new Among("ul", -1, -1, "", null),
-				new Among("n\u00E1l", -1, -1, "", null),
-				new Among("n\u00E9l", -1, -1, "", null),
-				new Among("b\u00F3l", -1, -1, "", null),
-				new Among("r\u00F3l", -1, -1, "", null),
-				new Among("t\u00F3l", -1, -1, "", null),
-				new Among("b\u00F5l", -1, -1, "", null),
-				new Among("r\u00F5l", -1, -1, "", null),
-				new Among("t\u00F5l", -1, -1, "", null),
-				new Among("\u00FCl", -1, -1, "", null),
-				new Among("n", -1, -1, "", null),
-				new Among("an", 19, -1, "", null),
-				new Among("ban", 20, -1, "", null),
-				new Among("en", 19, -1, "", null),
-				new Among("ben", 22, -1, "", null),
-				new Among("k\u00E9ppen", 22, -1, "", null),
-				new Among("on", 19, -1, "", null),
-				new Among("\u00F6n", 19, -1, "", null),
-				new Among("k\u00E9pp", -1, -1, "", null),
-				new Among("kor", -1, -1, "", null),
-				new Among("t", -1, -1, "", null),
-				new Among("at", 29, -1, "", null),
-				new Among("et", 29, -1, "", null),
-				new Among("k\u00E9nt", 29, -1, "", null),
-				new Among("ank\u00E9nt", 32, -1, "", null),
-				new Among("enk\u00E9nt", 32, -1, "", null),
-				new Among("onk\u00E9nt", 32, -1, "", null),
-				new Among("ot", 29, -1, "", null),
-				new Among("\u00E9rt", 29, -1, "", null),
-				new Among("\u00F6t", 29, -1, "", null),
-				new Among("hez", -1, -1, "", null),
-				new Among("hoz", -1, -1, "", null),
-				new Among("h\u00F6z", -1, -1, "", null),
-				new Among("v\u00E1", -1, -1, "", null),
-				new Among("v\u00E9", -1, -1, "", null)
-			};
+                new Among("ba", -1, -1, "", null),
+                new Among("ra", -1, -1, "", null),
+                new Among("be", -1, -1, "", null),
+                new Among("re", -1, -1, "", null),
+                new Among("ig", -1, -1, "", null),
+                new Among("nak", -1, -1, "", null),
+                new Among("nek", -1, -1, "", null),
+                new Among("val", -1, -1, "", null),
+                new Among("vel", -1, -1, "", null),
+                new Among("ul", -1, -1, "", null),
+                new Among("n\u00E1l", -1, -1, "", null),
+                new Among("n\u00E9l", -1, -1, "", null),
+                new Among("b\u00F3l", -1, -1, "", null),
+                new Among("r\u00F3l", -1, -1, "", null),
+                new Among("t\u00F3l", -1, -1, "", null),
+                new Among("b\u00F5l", -1, -1, "", null),
+                new Among("r\u00F5l", -1, -1, "", null),
+                new Among("t\u00F5l", -1, -1, "", null),
+                new Among("\u00FCl", -1, -1, "", null),
+                new Among("n", -1, -1, "", null),
+                new Among("an", 19, -1, "", null),
+                new Among("ban", 20, -1, "", null),
+                new Among("en", 19, -1, "", null),
+                new Among("ben", 22, -1, "", null),
+                new Among("k\u00E9ppen", 22, -1, "", null),
+                new Among("on", 19, -1, "", null),
+                new Among("\u00F6n", 19, -1, "", null),
+                new Among("k\u00E9pp", -1, -1, "", null),
+                new Among("kor", -1, -1, "", null),
+                new Among("t", -1, -1, "", null),
+                new Among("at", 29, -1, "", null),
+                new Among("et", 29, -1, "", null),
+                new Among("k\u00E9nt", 29, -1, "", null),
+                new Among("ank\u00E9nt", 32, -1, "", null),
+                new Among("enk\u00E9nt", 32, -1, "", null),
+                new Among("onk\u00E9nt", 32, -1, "", null),
+                new Among("ot", 29, -1, "", null),
+                new Among("\u00E9rt", 29, -1, "", null),
+                new Among("\u00F6t", 29, -1, "", null),
+                new Among("hez", -1, -1, "", null),
+                new Among("hoz", -1, -1, "", null),
+                new Among("h\u00F6z", -1, -1, "", null),
+                new Among("v\u00E1", -1, -1, "", null),
+                new Among("v\u00E9", -1, -1, "", null)
+            };
 
             a_5 = new Among[] {
-				new Among("\u00E1n", -1, 2, "", null),
-				new Among("\u00E9n", -1, 1, "", null),
-				new Among("\u00E1nk\u00E9nt", -1, 3, "", null)
-			};
+                new Among("\u00E1n", -1, 2, "", null),
+                new Among("\u00E9n", -1, 1, "", null),
+                new Among("\u00E1nk\u00E9nt", -1, 3, "", null)
+            };
 
             a_6 = new Among[] {
-				new Among("stul", -1, 2, "", null),
-				new Among("astul", 0, 1, "", null),
-				new Among("\u00E1stul", 0, 3, "", null),
-				new Among("st\u00FCl", -1, 2, "", null),
-				new Among("est\u00FCl", 3, 1, "", null),
-				new Among("\u00E9st\u00FCl", 3, 4, "", null)
-			};
+                new Among("stul", -1, 2, "", null),
+                new Among("astul", 0, 1, "", null),
+                new Among("\u00E1stul", 0, 3, "", null),
+                new Among("st\u00FCl", -1, 2, "", null),
+                new Among("est\u00FCl", 3, 1, "", null),
+                new Among("\u00E9st\u00FCl", 3, 4, "", null)
+            };
 
             a_7 = new Among[] {
-				new Among("\u00E1", -1, 1, "", null),
-				new Among("\u00E9", -1, 2, "", null)
-			};
+                new Among("\u00E1", -1, 1, "", null),
+                new Among("\u00E9", -1, 2, "", null)
+            };
 
             a_8 = new Among[] {
-				new Among("k", -1, 7, "", null),
-				new Among("ak", 0, 4, "", null),
-				new Among("ek", 0, 6, "", null),
-				new Among("ok", 0, 5, "", null),
-				new Among("\u00E1k", 0, 1, "", null),
-				new Among("\u00E9k", 0, 2, "", null),
-				new Among("\u00F6k", 0, 3, "", null)
-			};
+                new Among("k", -1, 7, "", null),
+                new Among("ak", 0, 4, "", null),
+                new Among("ek", 0, 6, "", null),
+                new Among("ok", 0, 5, "", null),
+                new Among("\u00E1k", 0, 1, "", null),
+                new Among("\u00E9k", 0, 2, "", null),
+                new Among("\u00F6k", 0, 3, "", null)
+            };
 
             a_9 = new Among[] {
-				new Among("\u00E9i", -1, 7, "", null),
-				new Among("\u00E1\u00E9i", 0, 6, "", null),
-				new Among("\u00E9\u00E9i", 0, 5, "", null),
-				new Among("\u00E9", -1, 9, "", null),
-				new Among("k\u00E9", 3, 4, "", null),
-				new Among("ak\u00E9", 4, 1, "", null),
-				new Among("ek\u00E9", 4, 1, "", null),
-				new Among("ok\u00E9", 4, 1, "", null),
-				new Among("\u00E1k\u00E9", 4, 3, "", null),
-				new Among("\u00E9k\u00E9", 4, 2, "", null),
-				new Among("\u00F6k\u00E9", 4, 1, "", null),
-				new Among("\u00E9\u00E9", 3, 8, "", null)
-			};
+                new Among("\u00E9i", -1, 7, "", null),
+                new Among("\u00E1\u00E9i", 0, 6, "", null),
+                new Among("\u00E9\u00E9i", 0, 5, "", null),
+                new Among("\u00E9", -1, 9, "", null),
+                new Among("k\u00E9", 3, 4, "", null),
+                new Among("ak\u00E9", 4, 1, "", null),
+                new Among("ek\u00E9", 4, 1, "", null),
+                new Among("ok\u00E9", 4, 1, "", null),
+                new Among("\u00E1k\u00E9", 4, 3, "", null),
+                new Among("\u00E9k\u00E9", 4, 2, "", null),
+                new Among("\u00F6k\u00E9", 4, 1, "", null),
+                new Among("\u00E9\u00E9", 3, 8, "", null)
+            };
 
             a_10 = new Among[] {
-				new Among("a", -1, 18, "", null),
-				new Among("ja", 0, 17, "", null),
-				new Among("d", -1, 16, "", null),
-				new Among("ad", 2, 13, "", null),
-				new Among("ed", 2, 13, "", null),
-				new Among("od", 2, 13, "", null),
-				new Among("\u00E1d", 2, 14, "", null),
-				new Among("\u00E9d", 2, 15, "", null),
-				new Among("\u00F6d", 2, 13, "", null),
-				new Among("e", -1, 18, "", null),
-				new Among("je", 9, 17, "", null),
-				new Among("nk", -1, 4, "", null),
-				new Among("unk", 11, 1, "", null),
-				new Among("\u00E1nk", 11, 2, "", null),
-				new Among("\u00E9nk", 11, 3, "", null),
-				new Among("\u00FCnk", 11, 1, "", null),
-				new Among("uk", -1, 8, "", null),
-				new Among("juk", 16, 7, "", null),
-				new Among("\u00E1juk", 17, 5, "", null),
-				new Among("\u00FCk", -1, 8, "", null),
-				new Among("j\u00FCk", 19, 7, "", null),
-				new Among("\u00E9j\u00FCk", 20, 6, "", null),
-				new Among("m", -1, 12, "", null),
-				new Among("am", 22, 9, "", null),
-				new Among("em", 22, 9, "", null),
-				new Among("om", 22, 9, "", null),
-				new Among("\u00E1m", 22, 10, "", null),
-				new Among("\u00E9m", 22, 11, "", null),
-				new Among("o", -1, 18, "", null),
-				new Among("\u00E1", -1, 19, "", null),
-				new Among("\u00E9", -1, 20, "", null)
-			};
+                new Among("a", -1, 18, "", null),
+                new Among("ja", 0, 17, "", null),
+                new Among("d", -1, 16, "", null),
+                new Among("ad", 2, 13, "", null),
+                new Among("ed", 2, 13, "", null),
+                new Among("od", 2, 13, "", null),
+                new Among("\u00E1d", 2, 14, "", null),
+                new Among("\u00E9d", 2, 15, "", null),
+                new Among("\u00F6d", 2, 13, "", null),
+                new Among("e", -1, 18, "", null),
+                new Among("je", 9, 17, "", null),
+                new Among("nk", -1, 4, "", null),
+                new Among("unk", 11, 1, "", null),
+                new Among("\u00E1nk", 11, 2, "", null),
+                new Among("\u00E9nk", 11, 3, "", null),
+                new Among("\u00FCnk", 11, 1, "", null),
+                new Among("uk", -1, 8, "", null),
+                new Among("juk", 16, 7, "", null),
+                new Among("\u00E1juk", 17, 5, "", null),
+                new Among("\u00FCk", -1, 8, "", null),
+                new Among("j\u00FCk", 19, 7, "", null),
+                new Among("\u00E9j\u00FCk", 20, 6, "", null),
+                new Among("m", -1, 12, "", null),
+                new Among("am", 22, 9, "", null),
+                new Among("em", 22, 9, "", null),
+                new Among("om", 22, 9, "", null),
+                new Among("\u00E1m", 22, 10, "", null),
+                new Among("\u00E9m", 22, 11, "", null),
+                new Among("o", -1, 18, "", null),
+                new Among("\u00E1", -1, 19, "", null),
+                new Among("\u00E9", -1, 20, "", null)
+            };
 
             a_11 = new Among[] {
-				new Among("id", -1, 10, "", null),
-				new Among("aid", 0, 9, "", null),
-				new Among("jaid", 1, 6, "", null),
-				new Among("eid", 0, 9, "", null),
-				new Among("jeid", 3, 6, "", null),
-				new Among("\u00E1id", 0, 7, "", null),
-				new Among("\u00E9id", 0, 8, "", null),
-				new Among("i", -1, 15, "", null),
-				new Among("ai", 7, 14, "", null),
-				new Among("jai", 8, 11, "", null),
-				new Among("ei", 7, 14, "", null),
-				new Among("jei", 10, 11, "", null),
-				new Among("\u00E1i", 7, 12, "", null),
-				new Among("\u00E9i", 7, 13, "", null),
-				new Among("itek", -1, 24, "", null),
-				new Among("eitek", 14, 21, "", null),
-				new Among("jeitek", 15, 20, "", null),
-				new Among("\u00E9itek", 14, 23, "", null),
-				new Among("ik", -1, 29, "", null),
-				new Among("aik", 18, 26, "", null),
-				new Among("jaik", 19, 25, "", null),
-				new Among("eik", 18, 26, "", null),
-				new Among("jeik", 21, 25, "", null),
-				new Among("\u00E1ik", 18, 27, "", null),
-				new Among("\u00E9ik", 18, 28, "", null),
-				new Among("ink", -1, 20, "", null),
-				new Among("aink", 25, 17, "", null),
-				new Among("jaink", 26, 16, "", null),
-				new Among("eink", 25, 17, "", null),
-				new Among("jeink", 28, 16, "", null),
-				new Among("\u00E1ink", 25, 18, "", null),
-				new Among("\u00E9ink", 25, 19, "", null),
-				new Among("aitok", -1, 21, "", null),
-				new Among("jaitok", 32, 20, "", null),
-				new Among("\u00E1itok", -1, 22, "", null),
-				new Among("im", -1, 5, "", null),
-				new Among("aim", 35, 4, "", null),
-				new Among("jaim", 36, 1, "", null),
-				new Among("eim", 35, 4, "", null),
-				new Among("jeim", 38, 1, "", null),
-				new Among("\u00E1im", 35, 2, "", null),
-				new Among("\u00E9im", 35, 3, "", null)
-			};
+                new Among("id", -1, 10, "", null),
+                new Among("aid", 0, 9, "", null),
+                new Among("jaid", 1, 6, "", null),
+                new Among("eid", 0, 9, "", null),
+                new Among("jeid", 3, 6, "", null),
+                new Among("\u00E1id", 0, 7, "", null),
+                new Among("\u00E9id", 0, 8, "", null),
+                new Among("i", -1, 15, "", null),
+                new Among("ai", 7, 14, "", null),
+                new Among("jai", 8, 11, "", null),
+                new Among("ei", 7, 14, "", null),
+                new Among("jei", 10, 11, "", null),
+                new Among("\u00E1i", 7, 12, "", null),
+                new Among("\u00E9i", 7, 13, "", null),
+                new Among("itek", -1, 24, "", null),
+                new Among("eitek", 14, 21, "", null),
+                new Among("jeitek", 15, 20, "", null),
+                new Among("\u00E9itek", 14, 23, "", null),
+                new Among("ik", -1, 29, "", null),
+                new Among("aik", 18, 26, "", null),
+                new Among("jaik", 19, 25, "", null),
+                new Among("eik", 18, 26, "", null),
+                new Among("jeik", 21, 25, "", null),
+                new Among("\u00E1ik", 18, 27, "", null),
+                new Among("\u00E9ik", 18, 28, "", null),
+                new Among("ink", -1, 20, "", null),
+                new Among("aink", 25, 17, "", null),
+                new Among("jaink", 26, 16, "", null),
+                new Among("eink", 25, 17, "", null),
+                new Among("jeink", 28, 16, "", null),
+                new Among("\u00E1ink", 25, 18, "", null),
+                new Among("\u00E9ink", 25, 19, "", null),
+                new Among("aitok", -1, 21, "", null),
+                new Among("jaitok", 32, 20, "", null),
+                new Among("\u00E1itok", -1, 22, "", null),
+                new Among("im", -1, 5, "", null),
+                new Among("aim", 35, 4, "", null),
+                new Among("jaim", 36, 1, "", null),
+                new Among("eim", 35, 4, "", null),
+                new Among("jeim", 38, 1, "", null),
+                new Among("\u00E1im", 35, 2, "", null),
+                new Among("\u00E9im", 35, 3, "", null)
+            };
 
         }
 


[42/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/German2Stemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/German2Stemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/German2Stemmer.cs
index d232004..7da55f8 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/German2Stemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/German2Stemmer.cs
@@ -24,864 +24,864 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
 
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class German2Stemmer : SnowballProgram
-	{
-		public German2Stemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("ae", 0, 2, "", this), new Among("oe", 0, 3, "", this), new Among("qu", 0, 5, "", this), new Among("ue", 0, 4, "", this), new Among("\u00DF", 0, 1, "", this)};
-			a_1 = new Among[]{new Among("", - 1, 6, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 1, "", this), new Among("\u00E4", 0, 3, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FC", 0, 5, "", this)};
-			a_2 = new Among[]{new Among("e", - 1, 1, "", this), new Among("em", - 1, 1, "", this), new Among("en", - 1, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("es", 5, 1, "", this)};
-			a_3 = new Among[]{new Among("en", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("st", - 1, 2, "", this), new Among("est", 2, 1, "", this)};
-			a_4 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lich", - 1, 1, "", this)};
-			a_5 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ung", - 1, 1, "", this), new Among("lich", - 1, 3, "", this), new Among("isch", - 1, 2, "", this), new Among("ik", - 1, 2, "", this), new Among("heit", - 1, 3, "", this), new Among("keit", - 1, 4, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32), (char) (8)};
-		private static readonly char[] g_s_ending = new char[]{(char) (117), (char) (30), (char) (5)};
-		private static readonly char[] g_st_ending = new char[]{(char) (117), (char) (30), (char) (4)};
-		
-		private int I_p2;
-		private int I_p1;
+    public class German2Stemmer : SnowballProgram
+    {
+        public German2Stemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("ae", 0, 2, "", this), new Among("oe", 0, 3, "", this), new Among("qu", 0, 5, "", this), new Among("ue", 0, 4, "", this), new Among("\u00DF", 0, 1, "", this)};
+            a_1 = new Among[]{new Among("", - 1, 6, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 1, "", this), new Among("\u00E4", 0, 3, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FC", 0, 5, "", this)};
+            a_2 = new Among[]{new Among("e", - 1, 1, "", this), new Among("em", - 1, 1, "", this), new Among("en", - 1, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("es", 5, 1, "", this)};
+            a_3 = new Among[]{new Among("en", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("st", - 1, 2, "", this), new Among("est", 2, 1, "", this)};
+            a_4 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lich", - 1, 1, "", this)};
+            a_5 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ung", - 1, 1, "", this), new Among("lich", - 1, 3, "", this), new Among("isch", - 1, 2, "", this), new Among("ik", - 1, 2, "", this), new Among("heit", - 1, 3, "", this), new Among("keit", - 1, 4, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32), (char) (8)};
+        private static readonly char[] g_s_ending = new char[]{(char) (117), (char) (30), (char) (5)};
+        private static readonly char[] g_st_ending = new char[]{(char) (117), (char) (30), (char) (4)};
+        
+        private int I_p2;
+        private int I_p1;
 
         protected internal virtual void  copy_from(German2Stemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 28
-			// test, line 30
-			v_1 = cursor;
-			// repeat, line 30
-			while (true)
-			{
-				v_2 = cursor;
-				do 
-				{
-					// goto, line 30
-					while (true)
-					{
-						v_3 = cursor;
-						do 
-						{
-							// (, line 30
-							if (!(in_grouping(g_v, 97, 252)))
-							{
-								goto lab3_brk;
-							}
-							// [, line 31
-							bra = cursor;
-							// or, line 31
-							do 
-							{
-								v_4 = cursor;
-								do 
-								{
-									// (, line 31
-									// literal, line 31
-									if (!(eq_s(1, "u")))
-									{
-										goto lab5_brk;
-									}
-									// ], line 31
-									ket = cursor;
-									if (!(in_grouping(g_v, 97, 252)))
-									{
-										goto lab5_brk;
-									}
-									// <-, line 31
-									slice_from("U");
-									goto lab4_brk;
-								}
-								while (false);
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 28
+            // test, line 30
+            v_1 = cursor;
+            // repeat, line 30
+            while (true)
+            {
+                v_2 = cursor;
+                do 
+                {
+                    // goto, line 30
+                    while (true)
+                    {
+                        v_3 = cursor;
+                        do 
+                        {
+                            // (, line 30
+                            if (!(in_grouping(g_v, 97, 252)))
+                            {
+                                goto lab3_brk;
+                            }
+                            // [, line 31
+                            bra = cursor;
+                            // or, line 31
+                            do 
+                            {
+                                v_4 = cursor;
+                                do 
+                                {
+                                    // (, line 31
+                                    // literal, line 31
+                                    if (!(eq_s(1, "u")))
+                                    {
+                                        goto lab5_brk;
+                                    }
+                                    // ], line 31
+                                    ket = cursor;
+                                    if (!(in_grouping(g_v, 97, 252)))
+                                    {
+                                        goto lab5_brk;
+                                    }
+                                    // <-, line 31
+                                    slice_from("U");
+                                    goto lab4_brk;
+                                }
+                                while (false);
 
 lab5_brk: ;
-								
-								cursor = v_4;
-								// (, line 32
-								// literal, line 32
-								if (!(eq_s(1, "y")))
-								{
-									goto lab3_brk;
-								}
-								// ], line 32
-								ket = cursor;
-								if (!(in_grouping(g_v, 97, 252)))
-								{
-									goto lab3_brk;
-								}
-								// <-, line 32
-								slice_from("Y");
-							}
-							while (false);
+                                
+                                cursor = v_4;
+                                // (, line 32
+                                // literal, line 32
+                                if (!(eq_s(1, "y")))
+                                {
+                                    goto lab3_brk;
+                                }
+                                // ], line 32
+                                ket = cursor;
+                                if (!(in_grouping(g_v, 97, 252)))
+                                {
+                                    goto lab3_brk;
+                                }
+                                // <-, line 32
+                                slice_from("Y");
+                            }
+                            while (false);
 
 lab4_brk: ;
-							
-							cursor = v_3;
-							goto golab2_brk;
-						}
-						while (false);
+                            
+                            cursor = v_3;
+                            goto golab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = v_3;
-						if (cursor >= limit)
-						{
-							goto lab1_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_3;
+                        if (cursor >= limit)
+                        {
+                            goto lab1_brk;
+                        }
+                        cursor++;
+                    }
 
 golab2_brk: ;
-					
-					goto replab0;
-				}
-				while (false);
+                    
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_2;
-				goto replab0_brk;
+                
+                cursor = v_2;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			cursor = v_1;
-			// repeat, line 35
-			while (true)
-			{
-				v_5 = cursor;
-				do 
-				{
-					// (, line 35
-					// [, line 36
-					bra = cursor;
-					// substring, line 36
-					among_var = find_among(a_0, 6);
-					if (among_var == 0)
-					{
-						goto lab7_brk;
-					}
-					// ], line 36
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab7_brk;
-						
-						case 1: 
-							// (, line 37
-							// <-, line 37
-							slice_from("ss");
-							break;
-						
-						case 2: 
-							// (, line 38
-							// <-, line 38
-							slice_from("\u00E4");
-							break;
-						
-						case 3: 
-							// (, line 39
-							// <-, line 39
-							slice_from("\u00F6");
-							break;
-						
-						case 4: 
-							// (, line 40
-							// <-, line 40
-							slice_from("\u00FC");
-							break;
-						
-						case 5: 
-							// (, line 41
-							// hop, line 41
-							{
-								int c = cursor + 2;
-								if (0 > c || c > limit)
-								{
-									goto lab7_brk;
-								}
-								cursor = c;
-							}
-							break;
-						
-						case 6: 
-							// (, line 42
-							// next, line 42
-							if (cursor >= limit)
-							{
-								goto lab7_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab6;
-				}
-				while (false);
+            
+            cursor = v_1;
+            // repeat, line 35
+            while (true)
+            {
+                v_5 = cursor;
+                do 
+                {
+                    // (, line 35
+                    // [, line 36
+                    bra = cursor;
+                    // substring, line 36
+                    among_var = find_among(a_0, 6);
+                    if (among_var == 0)
+                    {
+                        goto lab7_brk;
+                    }
+                    // ], line 36
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab7_brk;
+                        
+                        case 1: 
+                            // (, line 37
+                            // <-, line 37
+                            slice_from("ss");
+                            break;
+                        
+                        case 2: 
+                            // (, line 38
+                            // <-, line 38
+                            slice_from("\u00E4");
+                            break;
+                        
+                        case 3: 
+                            // (, line 39
+                            // <-, line 39
+                            slice_from("\u00F6");
+                            break;
+                        
+                        case 4: 
+                            // (, line 40
+                            // <-, line 40
+                            slice_from("\u00FC");
+                            break;
+                        
+                        case 5: 
+                            // (, line 41
+                            // hop, line 41
+                            {
+                                int c = cursor + 2;
+                                if (0 > c || c > limit)
+                                {
+                                    goto lab7_brk;
+                                }
+                                cursor = c;
+                            }
+                            break;
+                        
+                        case 6: 
+                            // (, line 42
+                            // next, line 42
+                            if (cursor >= limit)
+                            {
+                                goto lab7_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab6;
+                }
+                while (false);
 
 lab7_brk: ;
-				
-				cursor = v_5;
-				goto replab6_brk;
+                
+                cursor = v_5;
+                goto replab6_brk;
 
 replab6: ;
-			}
+            }
 
 replab6_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			// (, line 48
-			I_p1 = limit;
-			I_p2 = limit;
-			// gopast, line 53
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 252)))
-					{
-						goto lab1_brk;
-					}
-					goto golab0_brk;
-				}
-				while (false);
+            
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            // (, line 48
+            I_p1 = limit;
+            I_p2 = limit;
+            // gopast, line 53
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 252)))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 53
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 252)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 53
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 252)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 53
-			I_p1 = cursor;
-			// try, line 54
-			do 
-			{
-				// (, line 54
-				if (!(I_p1 < 3))
-				{
-					goto lab4_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            
+            // setmark p1, line 53
+            I_p1 = cursor;
+            // try, line 54
+            do 
+            {
+                // (, line 54
+                if (!(I_p1 < 3))
+                {
+                    goto lab4_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			// gopast, line 55
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 252)))
-					{
-						goto lab6_brk;
-					}
-					goto golab5_brk;
-				}
-				while (false);
+            
+            // gopast, line 55
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 252)))
+                    {
+                        goto lab6_brk;
+                    }
+                    goto golab5_brk;
+                }
+                while (false);
 
 lab6_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab5_brk: ;
-			
-			// gopast, line 55
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 252)))
-					{
-						goto lab8_brk;
-					}
-					goto golab7_brk;
-				}
-				while (false);
+            
+            // gopast, line 55
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 252)))
+                    {
+                        goto lab8_brk;
+                    }
+                    goto golab7_brk;
+                }
+                while (false);
 
 lab8_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab7_brk: ;
-			
-			// setmark p2, line 55
-			I_p2 = cursor;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 59
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 59
-					// [, line 61
-					bra = cursor;
-					// substring, line 61
-					among_var = find_among(a_1, 6);
-					if (among_var == 0)
-					{
-						goto lab2_brk;
-					}
-					// ], line 61
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab2_brk;
-						
-						case 1: 
-							// (, line 62
-							// <-, line 62
-							slice_from("y");
-							break;
-						
-						case 2: 
-							// (, line 63
-							// <-, line 63
-							slice_from("u");
-							break;
-						
-						case 3: 
-							// (, line 64
-							// <-, line 64
-							slice_from("a");
-							break;
-						
-						case 4: 
-							// (, line 65
-							// <-, line 65
-							slice_from("o");
-							break;
-						
-						case 5: 
-							// (, line 66
-							// <-, line 66
-							slice_from("u");
-							break;
-						
-						case 6: 
-							// (, line 67
-							// next, line 67
-							if (cursor >= limit)
-							{
-								goto lab2_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab1;
-				}
-				while (false);
+            
+            // setmark p2, line 55
+            I_p2 = cursor;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 59
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 59
+                    // [, line 61
+                    bra = cursor;
+                    // substring, line 61
+                    among_var = find_among(a_1, 6);
+                    if (among_var == 0)
+                    {
+                        goto lab2_brk;
+                    }
+                    // ], line 61
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab2_brk;
+                        
+                        case 1: 
+                            // (, line 62
+                            // <-, line 62
+                            slice_from("y");
+                            break;
+                        
+                        case 2: 
+                            // (, line 63
+                            // <-, line 63
+                            slice_from("u");
+                            break;
+                        
+                        case 3: 
+                            // (, line 64
+                            // <-, line 64
+                            slice_from("a");
+                            break;
+                        
+                        case 4: 
+                            // (, line 65
+                            // <-, line 65
+                            slice_from("o");
+                            break;
+                        
+                        case 5: 
+                            // (, line 66
+                            // <-, line 66
+                            slice_from("u");
+                            break;
+                        
+                        case 6: 
+                            // (, line 67
+                            // next, line 67
+                            if (cursor >= limit)
+                            {
+                                goto lab2_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab1;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = v_1;
-				goto replab1_brk;
+                
+                cursor = v_1;
+                goto replab1_brk;
 
 replab1: ;
-			}
+            }
 
 replab1_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			// (, line 77
-			// do, line 78
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 78
-				// [, line 79
-				ket = cursor;
-				// substring, line 79
-				among_var = find_among_b(a_2, 7);
-				if (among_var == 0)
-				{
-					goto lab0_brk;
-				}
-				// ], line 79
-				bra = cursor;
-				// call R1, line 79
-				if (!r_R1())
-				{
-					goto lab0_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab0_brk;
-					
-					case 1: 
-						// (, line 81
-						// delete, line 81
-						slice_del();
-						break;
-					
-					case 2: 
-						// (, line 84
-						if (!(in_grouping_b(g_s_ending, 98, 116)))
-						{
-							goto lab0_brk;
-						}
-						// delete, line 84
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            // (, line 77
+            // do, line 78
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 78
+                // [, line 79
+                ket = cursor;
+                // substring, line 79
+                among_var = find_among_b(a_2, 7);
+                if (among_var == 0)
+                {
+                    goto lab0_brk;
+                }
+                // ], line 79
+                bra = cursor;
+                // call R1, line 79
+                if (!r_R1())
+                {
+                    goto lab0_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab0_brk;
+                    
+                    case 1: 
+                        // (, line 81
+                        // delete, line 81
+                        slice_del();
+                        break;
+                    
+                    case 2: 
+                        // (, line 84
+                        if (!(in_grouping_b(g_s_ending, 98, 116)))
+                        {
+                            goto lab0_brk;
+                        }
+                        // delete, line 84
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = limit - v_1;
-			// do, line 88
-			v_2 = limit - cursor;
-			do 
-			{
-				// (, line 88
-				// [, line 89
-				ket = cursor;
-				// substring, line 89
-				among_var = find_among_b(a_3, 4);
-				if (among_var == 0)
-				{
-					goto lab1_brk;
-				}
-				// ], line 89
-				bra = cursor;
-				// call R1, line 89
-				if (!r_R1())
-				{
-					goto lab1_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab1_brk;
-					
-					case 1: 
-						// (, line 91
-						// delete, line 91
-						slice_del();
-						break;
-					
-					case 2: 
-						// (, line 94
-						if (!(in_grouping_b(g_st_ending, 98, 116)))
-						{
-							goto lab1_brk;
-						}
-						// hop, line 94
-						{
-							int c = cursor - 3;
-							if (limit_backward > c || c > limit)
-							{
-								goto lab1_brk;
-							}
-							cursor = c;
-						}
-						// delete, line 94
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+            
+            cursor = limit - v_1;
+            // do, line 88
+            v_2 = limit - cursor;
+            do 
+            {
+                // (, line 88
+                // [, line 89
+                ket = cursor;
+                // substring, line 89
+                among_var = find_among_b(a_3, 4);
+                if (among_var == 0)
+                {
+                    goto lab1_brk;
+                }
+                // ], line 89
+                bra = cursor;
+                // call R1, line 89
+                if (!r_R1())
+                {
+                    goto lab1_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab1_brk;
+                    
+                    case 1: 
+                        // (, line 91
+                        // delete, line 91
+                        slice_del();
+                        break;
+                    
+                    case 2: 
+                        // (, line 94
+                        if (!(in_grouping_b(g_st_ending, 98, 116)))
+                        {
+                            goto lab1_brk;
+                        }
+                        // hop, line 94
+                        {
+                            int c = cursor - 3;
+                            if (limit_backward > c || c > limit)
+                            {
+                                goto lab1_brk;
+                            }
+                            cursor = c;
+                        }
+                        // delete, line 94
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 98
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 98
-				// [, line 99
-				ket = cursor;
-				// substring, line 99
-				among_var = find_among_b(a_5, 8);
-				if (among_var == 0)
-				{
-					goto lab2_brk;
-				}
-				// ], line 99
-				bra = cursor;
-				// call R2, line 99
-				if (!r_R2())
-				{
-					goto lab2_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab2_brk;
-					
-					case 1: 
-						// (, line 101
-						// delete, line 101
-						slice_del();
-						// try, line 102
-						v_4 = limit - cursor;
-						do 
-						{
-							// (, line 102
-							// [, line 102
-							ket = cursor;
-							// literal, line 102
-							if (!(eq_s_b(2, "ig")))
-							{
-								cursor = limit - v_4;
-								goto lab3_brk;
-							}
-							// ], line 102
-							bra = cursor;
-							// not, line 102
-							{
-								v_5 = limit - cursor;
-								do 
-								{
-									// literal, line 102
-									if (!(eq_s_b(1, "e")))
-									{
-										goto lab4_brk;
-									}
-									cursor = limit - v_4;
-									goto lab3_brk;
-								}
-								while (false);
+            
+            cursor = limit - v_2;
+            // do, line 98
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 98
+                // [, line 99
+                ket = cursor;
+                // substring, line 99
+                among_var = find_among_b(a_5, 8);
+                if (among_var == 0)
+                {
+                    goto lab2_brk;
+                }
+                // ], line 99
+                bra = cursor;
+                // call R2, line 99
+                if (!r_R2())
+                {
+                    goto lab2_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab2_brk;
+                    
+                    case 1: 
+                        // (, line 101
+                        // delete, line 101
+                        slice_del();
+                        // try, line 102
+                        v_4 = limit - cursor;
+                        do 
+                        {
+                            // (, line 102
+                            // [, line 102
+                            ket = cursor;
+                            // literal, line 102
+                            if (!(eq_s_b(2, "ig")))
+                            {
+                                cursor = limit - v_4;
+                                goto lab3_brk;
+                            }
+                            // ], line 102
+                            bra = cursor;
+                            // not, line 102
+                            {
+                                v_5 = limit - cursor;
+                                do 
+                                {
+                                    // literal, line 102
+                                    if (!(eq_s_b(1, "e")))
+                                    {
+                                        goto lab4_brk;
+                                    }
+                                    cursor = limit - v_4;
+                                    goto lab3_brk;
+                                }
+                                while (false);
 
 lab4_brk: ;
-								
-								cursor = limit - v_5;
-							}
-							// call R2, line 102
-							if (!r_R2())
-							{
-								cursor = limit - v_4;
-								goto lab3_brk;
-							}
-							// delete, line 102
-							slice_del();
-						}
-						while (false);
+                                
+                                cursor = limit - v_5;
+                            }
+                            // call R2, line 102
+                            if (!r_R2())
+                            {
+                                cursor = limit - v_4;
+                                goto lab3_brk;
+                            }
+                            // delete, line 102
+                            slice_del();
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						break;
-					
-					case 2: 
-						// (, line 105
-						// not, line 105
-						{
-							v_6 = limit - cursor;
-							do 
-							{
-								// literal, line 105
-								if (!(eq_s_b(1, "e")))
-								{
-									goto lab5_brk;
-								}
-								goto lab2_brk;
-							}
-							while (false);
+                        
+                        break;
+                    
+                    case 2: 
+                        // (, line 105
+                        // not, line 105
+                        {
+                            v_6 = limit - cursor;
+                            do 
+                            {
+                                // literal, line 105
+                                if (!(eq_s_b(1, "e")))
+                                {
+                                    goto lab5_brk;
+                                }
+                                goto lab2_brk;
+                            }
+                            while (false);
 
 lab5_brk: ;
-							
-							cursor = limit - v_6;
-						}
-						// delete, line 105
-						slice_del();
-						break;
-					
-					case 3: 
-						// (, line 108
-						// delete, line 108
-						slice_del();
-						// try, line 109
-						v_7 = limit - cursor;
-						do 
-						{
-							// (, line 109
-							// [, line 110
-							ket = cursor;
-							// or, line 110
-							do 
-							{
-								v_8 = limit - cursor;
-								do 
-								{
-									// literal, line 110
-									if (!(eq_s_b(2, "er")))
-									{
-										goto lab8_brk;
-									}
-									goto lab7_brk;
-								}
-								while (false);
+                            
+                            cursor = limit - v_6;
+                        }
+                        // delete, line 105
+                        slice_del();
+                        break;
+                    
+                    case 3: 
+                        // (, line 108
+                        // delete, line 108
+                        slice_del();
+                        // try, line 109
+                        v_7 = limit - cursor;
+                        do 
+                        {
+                            // (, line 109
+                            // [, line 110
+                            ket = cursor;
+                            // or, line 110
+                            do 
+                            {
+                                v_8 = limit - cursor;
+                                do 
+                                {
+                                    // literal, line 110
+                                    if (!(eq_s_b(2, "er")))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    goto lab7_brk;
+                                }
+                                while (false);
 
 lab8_brk: ;
-								
-								cursor = limit - v_8;
-								// literal, line 110
-								if (!(eq_s_b(2, "en")))
-								{
-									cursor = limit - v_7;
-									goto lab6_brk;
-								}
-							}
-							while (false);
+                                
+                                cursor = limit - v_8;
+                                // literal, line 110
+                                if (!(eq_s_b(2, "en")))
+                                {
+                                    cursor = limit - v_7;
+                                    goto lab6_brk;
+                                }
+                            }
+                            while (false);
 
 lab7_brk: ;
-							
-							// ], line 110
-							bra = cursor;
-							// call R1, line 110
-							if (!r_R1())
-							{
-								cursor = limit - v_7;
-								goto lab6_brk;
-							}
-							// delete, line 110
-							slice_del();
-						}
-						while (false);
+                            
+                            // ], line 110
+                            bra = cursor;
+                            // call R1, line 110
+                            if (!r_R1())
+                            {
+                                cursor = limit - v_7;
+                                goto lab6_brk;
+                            }
+                            // delete, line 110
+                            slice_del();
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						break;
-					
-					case 4: 
-						// (, line 114
-						// delete, line 114
-						slice_del();
-						// try, line 115
-						v_9 = limit - cursor;
-						do 
-						{
-							// (, line 115
-							// [, line 116
-							ket = cursor;
-							// substring, line 116
-							among_var = find_among_b(a_4, 2);
-							if (among_var == 0)
-							{
-								cursor = limit - v_9;
-								goto lab9_brk;
-							}
-							// ], line 116
-							bra = cursor;
-							// call R2, line 116
-							if (!r_R2())
-							{
-								cursor = limit - v_9;
-								goto lab9_brk;
-							}
-							switch (among_var)
-							{
-								
-								case 0: 
-									cursor = limit - v_9;
-									goto lab9_brk;
-								
-								case 1: 
-									// (, line 118
-									// delete, line 118
-									slice_del();
-									break;
-								}
-						}
-						while (false);
+                        
+                        break;
+                    
+                    case 4: 
+                        // (, line 114
+                        // delete, line 114
+                        slice_del();
+                        // try, line 115
+                        v_9 = limit - cursor;
+                        do 
+                        {
+                            // (, line 115
+                            // [, line 116
+                            ket = cursor;
+                            // substring, line 116
+                            among_var = find_among_b(a_4, 2);
+                            if (among_var == 0)
+                            {
+                                cursor = limit - v_9;
+                                goto lab9_brk;
+                            }
+                            // ], line 116
+                            bra = cursor;
+                            // call R2, line 116
+                            if (!r_R2())
+                            {
+                                cursor = limit - v_9;
+                                goto lab9_brk;
+                            }
+                            switch (among_var)
+                            {
+                                
+                                case 0: 
+                                    cursor = limit - v_9;
+                                    goto lab9_brk;
+                                
+                                case 1: 
+                                    // (, line 118
+                                    // delete, line 118
+                                    slice_del();
+                                    break;
+                                }
+                        }
+                        while (false);
 
 lab9_brk: ;
-						
-						break;
-					}
-			}
-			while (false);
+                        
+                        break;
+                    }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 128
-			// do, line 129
-			v_1 = cursor;
-			do 
-			{
-				// call prelude, line 129
-				if (!r_prelude())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 128
+            // do, line 129
+            v_1 = cursor;
+            do 
+            {
+                // call prelude, line 129
+                if (!r_prelude())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 130
-			v_2 = cursor;
-			do 
-			{
-				// call mark_regions, line 130
-				if (!r_mark_regions())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // do, line 130
+            v_2 = cursor;
+            do 
+            {
+                // call mark_regions, line 130
+                if (!r_mark_regions())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = v_2;
-			// backwards, line 131
-			limit_backward = cursor; cursor = limit;
-			// do, line 132
-			v_3 = limit - cursor;
-			do 
-			{
-				// call standard_suffix, line 132
-				if (!r_standard_suffix())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_2;
+            // backwards, line 131
+            limit_backward = cursor; cursor = limit;
+            // do, line 132
+            v_3 = limit - cursor;
+            do 
+            {
+                // call standard_suffix, line 132
+                if (!r_standard_suffix())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			cursor = limit_backward; // do, line 133
-			v_4 = cursor;
-			do 
-			{
-				// call postlude, line 133
-				if (!r_postlude())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            cursor = limit_backward; // do, line 133
+            v_4 = cursor;
+            do 
+            {
+                // call postlude, line 133
+                if (!r_postlude())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = v_4;
-			return true;
-		}
-	}
+            
+            cursor = v_4;
+            return true;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/GermanStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/GermanStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/GermanStemmer.cs
index 250bd6f..16f888a 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/GermanStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/GermanStemmer.cs
@@ -24,836 +24,836 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162
     
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class GermanStemmer : SnowballProgram
-	{
-		public GermanStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 1, "", this), new Among("\u00E4", 0, 3, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FC", 0, 5, "", this)};
-			a_1 = new Among[]{new Among("e", - 1, 1, "", this), new Among("em", - 1, 1, "", this), new Among("en", - 1, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("es", 5, 1, "", this)};
-			a_2 = new Among[]{new Among("en", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("st", - 1, 2, "", this), new Among("est", 2, 1, "", this)};
-			a_3 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lich", - 1, 1, "", this)};
-			a_4 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ung", - 1, 1, "", this), new Among("lich", - 1, 3, "", this), new Among("isch", - 1, 2, "", this), new Among("ik", - 1, 2, "", this), new Among("heit", - 1, 3, "", this), new Among("keit", - 1, 4, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32), (char) (8)};
-		private static readonly char[] g_s_ending = new char[]{(char) (117), (char) (30), (char) (5)};
-		private static readonly char[] g_st_ending = new char[]{(char) (117), (char) (30), (char) (4)};
-		
-		private int I_p2;
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(GermanStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			// (, line 28
-			// test, line 30
-			v_1 = cursor;
-			// repeat, line 30
-			while (true)
-			{
-				v_2 = cursor;
-				do 
-				{
-					// (, line 30
-					// or, line 33
-					do 
-					{
-						v_3 = cursor;
-						do 
-						{
-							// (, line 31
-							// [, line 32
-							bra = cursor;
-							// literal, line 32
-							if (!(eq_s(1, "\u00DF")))
-							{
-								goto lab3_brk;
-							}
-							// ], line 32
-							ket = cursor;
-							// <-, line 32
-							slice_from("ss");
-							goto lab2_brk;
-						}
-						while (false);
+    public class GermanStemmer : SnowballProgram
+    {
+        public GermanStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 1, "", this), new Among("\u00E4", 0, 3, "", this), new Among("\u00F6", 0, 4, "", this), new Among("\u00FC", 0, 5, "", this)};
+            a_1 = new Among[]{new Among("e", - 1, 1, "", this), new Among("em", - 1, 1, "", this), new Among("en", - 1, 1, "", this), new Among("ern", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("s", - 1, 2, "", this), new Among("es", 5, 1, "", this)};
+            a_2 = new Among[]{new Among("en", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("st", - 1, 2, "", this), new Among("est", 2, 1, "", this)};
+            a_3 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("lich", - 1, 1, "", this)};
+            a_4 = new Among[]{new Among("end", - 1, 1, "", this), new Among("ig", - 1, 2, "", this), new Among("ung", - 1, 1, "", this), new Among("lich", - 1, 3, "", this), new Among("isch", - 1, 2, "", this), new Among("ik", - 1, 2, "", this), new Among("heit", - 1, 3, "", this), new Among("keit", - 1, 4, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32), (char) (8)};
+        private static readonly char[] g_s_ending = new char[]{(char) (117), (char) (30), (char) (5)};
+        private static readonly char[] g_st_ending = new char[]{(char) (117), (char) (30), (char) (4)};
+        
+        private int I_p2;
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(GermanStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            // (, line 28
+            // test, line 30
+            v_1 = cursor;
+            // repeat, line 30
+            while (true)
+            {
+                v_2 = cursor;
+                do 
+                {
+                    // (, line 30
+                    // or, line 33
+                    do 
+                    {
+                        v_3 = cursor;
+                        do 
+                        {
+                            // (, line 31
+                            // [, line 32
+                            bra = cursor;
+                            // literal, line 32
+                            if (!(eq_s(1, "\u00DF")))
+                            {
+                                goto lab3_brk;
+                            }
+                            // ], line 32
+                            ket = cursor;
+                            // <-, line 32
+                            slice_from("ss");
+                            goto lab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = v_3;
-						// next, line 33
-						if (cursor >= limit)
-						{
-							goto lab1_brk;
-						}
-						cursor++;
-					}
-					while (false);
+                        
+                        cursor = v_3;
+                        // next, line 33
+                        if (cursor >= limit)
+                        {
+                            goto lab1_brk;
+                        }
+                        cursor++;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					goto replab0;
-				}
-				while (false);
+                    
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_2;
-				goto replab0_brk;
+                
+                cursor = v_2;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			cursor = v_1;
-			// repeat, line 36
-			while (true)
-			{
-				v_4 = cursor;
-				do 
-				{
-					// goto, line 36
-					while (true)
-					{
-						v_5 = cursor;
-						do 
-						{
-							// (, line 36
-							if (!(in_grouping(g_v, 97, 252)))
-							{
-								goto lab7_brk;
-							}
-							// [, line 37
-							bra = cursor;
-							// or, line 37
-							do 
-							{
-								v_6 = cursor;
-								do 
-								{
-									// (, line 37
-									// literal, line 37
-									if (!(eq_s(1, "u")))
-									{
-										goto lab9_brk;
-									}
-									// ], line 37
-									ket = cursor;
-									if (!(in_grouping(g_v, 97, 252)))
-									{
-										goto lab9_brk;
-									}
-									// <-, line 37
-									slice_from("U");
-									goto lab8_brk;
-								}
-								while (false);
+            
+            cursor = v_1;
+            // repeat, line 36
+            while (true)
+            {
+                v_4 = cursor;
+                do 
+                {
+                    // goto, line 36
+                    while (true)
+                    {
+                        v_5 = cursor;
+                        do 
+                        {
+                            // (, line 36
+                            if (!(in_grouping(g_v, 97, 252)))
+                            {
+                                goto lab7_brk;
+                            }
+                            // [, line 37
+                            bra = cursor;
+                            // or, line 37
+                            do 
+                            {
+                                v_6 = cursor;
+                                do 
+                                {
+                                    // (, line 37
+                                    // literal, line 37
+                                    if (!(eq_s(1, "u")))
+                                    {
+                                        goto lab9_brk;
+                                    }
+                                    // ], line 37
+                                    ket = cursor;
+                                    if (!(in_grouping(g_v, 97, 252)))
+                                    {
+                                        goto lab9_brk;
+                                    }
+                                    // <-, line 37
+                                    slice_from("U");
+                                    goto lab8_brk;
+                                }
+                                while (false);
 
 lab9_brk: ;
-								
-								cursor = v_6;
-								// (, line 38
-								// literal, line 38
-								if (!(eq_s(1, "y")))
-								{
-									goto lab7_brk;
-								}
-								// ], line 38
-								ket = cursor;
-								if (!(in_grouping(g_v, 97, 252)))
-								{
-									goto lab7_brk;
-								}
-								// <-, line 38
-								slice_from("Y");
-							}
-							while (false);
+                                
+                                cursor = v_6;
+                                // (, line 38
+                                // literal, line 38
+                                if (!(eq_s(1, "y")))
+                                {
+                                    goto lab7_brk;
+                                }
+                                // ], line 38
+                                ket = cursor;
+                                if (!(in_grouping(g_v, 97, 252)))
+                                {
+                                    goto lab7_brk;
+                                }
+                                // <-, line 38
+                                slice_from("Y");
+                            }
+                            while (false);
 
 lab8_brk: ;
-							
-							cursor = v_5;
-							goto golab6_brk;
-						}
-						while (false);
+                            
+                            cursor = v_5;
+                            goto golab6_brk;
+                        }
+                        while (false);
 
 lab7_brk: ;
-						
-						cursor = v_5;
-						if (cursor >= limit)
-						{
-							goto lab5_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_5;
+                        if (cursor >= limit)
+                        {
+                            goto lab5_brk;
+                        }
+                        cursor++;
+                    }
 
 golab6_brk: ;
-					
-					goto replab4;
-				}
-				while (false);
+                    
+                    goto replab4;
+                }
+                while (false);
 
 lab5_brk: ;
-				
-				cursor = v_4;
-				goto replab4_brk;
+                
+                cursor = v_4;
+                goto replab4_brk;
 
 replab4: ;
-			}
+            }
 
 replab4_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			// (, line 42
-			I_p1 = limit;
-			I_p2 = limit;
-			// gopast, line 47
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 252)))
-					{
-						goto lab1_brk;
-					}
-					goto golab0_brk;
-				}
-				while (false);
+            
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            // (, line 42
+            I_p1 = limit;
+            I_p2 = limit;
+            // gopast, line 47
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 252)))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 47
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 252)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 47
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 252)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 47
-			I_p1 = cursor;
-			// try, line 48
-			do 
-			{
-				// (, line 48
-				if (!(I_p1 < 3))
-				{
-					goto lab4_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            
+            // setmark p1, line 47
+            I_p1 = cursor;
+            // try, line 48
+            do 
+            {
+                // (, line 48
+                if (!(I_p1 < 3))
+                {
+                    goto lab4_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			// gopast, line 49
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 252)))
-					{
-						goto lab6_brk;
-					}
-					goto golab5_brk;
-				}
-				while (false);
+            
+            // gopast, line 49
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 252)))
+                    {
+                        goto lab6_brk;
+                    }
+                    goto golab5_brk;
+                }
+                while (false);
 
 lab6_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab5_brk: ;
-			
-			// gopast, line 49
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 252)))
-					{
-						goto lab8_brk;
-					}
-					goto golab7_brk;
-				}
-				while (false);
+            
+            // gopast, line 49
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 252)))
+                    {
+                        goto lab8_brk;
+                    }
+                    goto golab7_brk;
+                }
+                while (false);
 
 lab8_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab7_brk: ;
-			
-			// setmark p2, line 49
-			I_p2 = cursor;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 53
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 53
-					// [, line 55
-					bra = cursor;
-					// substring, line 55
-					among_var = find_among(a_0, 6);
-					if (among_var == 0)
-					{
-						goto lab10_brk;
-					}
-					// ], line 55
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab10_brk;
-						
-						case 1: 
-							// (, line 56
-							// <-, line 56
-							slice_from("y");
-							break;
-						
-						case 2: 
-							// (, line 57
-							// <-, line 57
-							slice_from("u");
-							break;
-						
-						case 3: 
-							// (, line 58
-							// <-, line 58
-							slice_from("a");
-							break;
-						
-						case 4: 
-							// (, line 59
-							// <-, line 59
-							slice_from("o");
-							break;
-						
-						case 5: 
-							// (, line 60
-							// <-, line 60
-							slice_from("u");
-							break;
-						
-						case 6: 
-							// (, line 61
-							// next, line 61
-							if (cursor >= limit)
-							{
-								goto lab10_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab1;
-				}
-				while (false);
+            
+            // setmark p2, line 49
+            I_p2 = cursor;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 53
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 53
+                    // [, line 55
+                    bra = cursor;
+                    // substring, line 55
+                    among_var = find_among(a_0, 6);
+                    if (among_var == 0)
+                    {
+                        goto lab10_brk;
+                    }
+                    // ], line 55
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab10_brk;
+                        
+                        case 1: 
+                            // (, line 56
+                            // <-, line 56
+                            slice_from("y");
+                            break;
+                        
+                        case 2: 
+                            // (, line 57
+                            // <-, line 57
+                            slice_from("u");
+                            break;
+                        
+                        case 3: 
+                            // (, line 58
+                            // <-, line 58
+                            slice_from("a");
+                            break;
+                        
+                        case 4: 
+                            // (, line 59
+                            // <-, line 59
+                            slice_from("o");
+                            break;
+                        
+                        case 5: 
+                            // (, line 60
+                            // <-, line 60
+                            slice_from("u");
+                            break;
+                        
+                        case 6: 
+                            // (, line 61
+                            // next, line 61
+                            if (cursor >= limit)
+                            {
+                                goto lab10_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab1;
+                }
+                while (false);
 
 lab10_brk: ;
-				
-				cursor = v_1;
-				goto replab1_brk;
+                
+                cursor = v_1;
+                goto replab1_brk;
 
 replab1: ;
-			}
+            }
 
 replab1_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			// (, line 71
-			// do, line 72
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 72
-				// [, line 73
-				ket = cursor;
-				// substring, line 73
-				among_var = find_among_b(a_1, 7);
-				if (among_var == 0)
-				{
-					goto lab0_brk;
-				}
-				// ], line 73
-				bra = cursor;
-				// call R1, line 73
-				if (!r_R1())
-				{
-					goto lab0_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab0_brk;
-					
-					case 1: 
-						// (, line 75
-						// delete, line 75
-						slice_del();
-						break;
-					
-					case 2: 
-						// (, line 78
-						if (!(in_grouping_b(g_s_ending, 98, 116)))
-						{
-							goto lab0_brk;
-						}
-						// delete, line 78
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            // (, line 71
+            // do, line 72
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 72
+                // [, line 73
+                ket = cursor;
+                // substring, line 73
+                among_var = find_among_b(a_1, 7);
+                if (among_var == 0)
+                {
+                    goto lab0_brk;
+                }
+                // ], line 73
+                bra = cursor;
+                // call R1, line 73
+                if (!r_R1())
+                {
+                    goto lab0_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab0_brk;
+                    
+                    case 1: 
+                        // (, line 75
+                        // delete, line 75
+                        slice_del();
+                        break;
+                    
+                    case 2: 
+                        // (, line 78
+                        if (!(in_grouping_b(g_s_ending, 98, 116)))
+                        {
+                            goto lab0_brk;
+                        }
+                        // delete, line 78
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = limit - v_1;
-			// do, line 82
-			v_2 = limit - cursor;
-			do 
-			{
-				// (, line 82
-				// [, line 83
-				ket = cursor;
-				// substring, line 83
-				among_var = find_among_b(a_2, 4);
-				if (among_var == 0)
-				{
-					goto lab1_brk;
-				}
-				// ], line 83
-				bra = cursor;
-				// call R1, line 83
-				if (!r_R1())
-				{
-					goto lab1_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
-						goto lab1_brk;
-					
-					case 1: 
-						// (, line 85
-						// delete, line 85
-						slice_del();
-						break;
-					
-					case 2: 
-						// (, line 88
-						if (!(in_grouping_b(g_st_ending, 98, 116)))
-						{
-							goto lab1_brk;
-						}
-						// hop, line 88
-						{
-							int c = cursor - 3;
-							if (limit_backward > c || c > limit)
-							{
-								goto lab1_brk;
-							}
-							cursor = c;
-						}
-						// delete, line 88
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+            
+            cursor = limit - v_1;
+            // do, line 82
+            v_2 = limit - cursor;
+            do 
+            {
+                // (, line 82
+                // [, line 83
+                ket = cursor;
+                // substring, line 83
+                among_var = find_among_b(a_2, 4);
+                if (among_var == 0)
+                {
+                    goto lab1_brk;
+                }
+                // ], line 83
+                bra = cursor;
+                // call R1, line 83
+                if (!r_R1())
+                {
+                    goto lab1_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        goto lab1_brk;
+                    
+                    case 1: 
+                        // (, line 85
+                        // delete, line 85
+                        slice_del();
+                        break;
+                    
+                    case 2: 
+                        // (, line 88
+                        if (!(in_grouping_b(g_st_ending, 98, 116)))
+                        {
+                            goto lab1_brk;
+                        }
+                        // hop, line 88
+                        {
+                            int c = cursor - 3;
+                            if (limit_backward > c || c > limit)
+                            {
+                                goto lab1_brk;
+                            }
+                            cursor = c;
+                        }
+                        // delete, line 88
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 92
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 92
-				// [, line 93
-				ket = cursor;
-				// substring, line 93
-				among_var = find_among_b(a_4, 8);
-				if (among_var == 0)
-				{
-					goto lab2_brk;
-				}
-				// ], line 93
-				bra = cursor;
-				// call R2, line 93
-				if (!r_R2())
-				{
-					goto lab2_brk;
-				}
-				switch (among_var)
-				{
-					
-					case 0: 
+            
+            cursor = limit - v_2;
+            // do, line 92
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 92
+                // [, line 93
+                ket = cursor;
+                // substring, line 93
+                among_var = find_among_b(a_4, 8);
+                if (among_var == 0)
+                {
+                    goto lab2_brk;
+                }
+                // ], line 93
+                bra = cursor;
+                // call R2, line 93
+                if (!r_R2())
+                {
+                    goto lab2_brk;
+                }
+                switch (among_var)
+                {
+                    
+                    case 0: 
 
                         goto lab2_brk;
-					
-					case 1: 
-						// (, line 95
-						// delete, line 95
-						slice_del();
-						// try, line 96
-						v_4 = limit - cursor;
-						do 
-						{
-							// (, line 96
-							// [, line 96
-							ket = cursor;
-							// literal, line 96
-							if (!(eq_s_b(2, "ig")))
-							{
-								cursor = limit - v_4;
-								goto lab3_brk;
-							}
-							// ], line 96
-							bra = cursor;
-							// not, line 96
-							{
-								v_5 = limit - cursor;
-								do 
-								{
-									// literal, line 96
-									if (!(eq_s_b(1, "e")))
-									{
-										goto lab4_brk;
-									}
-									cursor = limit - v_4;
-									goto lab3_brk;
-								}
-								while (false);
+                    
+                    case 1: 
+                        // (, line 95
+                        // delete, line 95
+                        slice_del();
+                        // try, line 96
+                        v_4 = limit - cursor;
+                        do 
+                        {
+                            // (, line 96
+                            // [, line 96
+                            ket = cursor;
+                            // literal, line 96
+                            if (!(eq_s_b(2, "ig")))
+                            {
+                                cursor = limit - v_4;
+                                goto lab3_brk;
+                            }
+                            // ], line 96
+                            bra = cursor;
+                            // not, line 96
+                            {
+                                v_5 = limit - cursor;
+                                do 
+                                {
+                                    // literal, line 96
+                                    if (!(eq_s_b(1, "e")))
+                                    {
+                                        goto lab4_brk;
+                                    }
+                                    cursor = limit - v_4;
+                                    goto lab3_brk;
+                                }
+                                while (false);
 
 lab4_brk: ;
-								
-								cursor = limit - v_5;
-							}
-							// call R2, line 96
-							if (!r_R2())
-							{
-								cursor = limit - v_4;
-								goto lab3_brk;
-							}
-							// delete, line 96
-							slice_del();
-						}
-						while (false);
+                                
+                                cursor = limit - v_5;
+                            }
+                            // call R2, line 96
+                            if (!r_R2())
+                            {
+                                cursor = limit - v_4;
+                                goto lab3_brk;
+                            }
+                            // delete, line 96
+                            slice_del();
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						break;
-					
-					case 2: 
-						// (, line 99
-						// not, line 99
-						{
-							v_6 = limit - cursor;
-							do 
-							{
-								// literal, line 99
-								if (!(eq_s_b(1, "e")))
-								{
-									goto lab5_brk;
-								}
-								goto lab2_brk;
-							}
-							while (false);
+                        
+                        break;
+                    
+                    case 2: 
+                        // (, line 99
+                        // not, line 99
+                        {
+                            v_6 = limit - cursor;
+                            do 
+                            {
+                                // literal, line 99
+                                if (!(eq_s_b(1, "e")))
+                                {
+                                    goto lab5_brk;
+                                }
+                                goto lab2_brk;
+                            }
+                            while (false);
 
 lab5_brk: ;
-							
-							cursor = limit - v_6;
-						}
-						// delete, line 99
-						slice_del();
-						break;
-					
-					case 3: 
-						// (, line 102
-						// delete, line 102
-						slice_del();
-						// try, line 103
-						v_7 = limit - cursor;
-						do 
-						{
-							// (, line 103
-							// [, line 104
-							ket = cursor;
-							// or, line 104
-							do 
-							{
-								v_8 = limit - cursor;
-								do 
-								{
-									// literal, line 104
-									if (!(eq_s_b(2, "er")))
-									{
-										goto lab8_brk;
-									}
-									goto lab7_brk;
-								}
-								while (false);
+                            
+                            cursor = limit - v_6;
+                        }
+                        // delete, line 99
+                        slice_del();
+                        break;
+                    
+                    case 3: 
+                        // (, line 102
+                        // delete, line 102
+                        slice_del();
+                        // try, line 103
+                        v_7 = limit - cursor;
+                        do 
+                        {
+                            // (, line 103
+                            // [, line 104
+                            ket = cursor;
+                            // or, line 104
+                            do 
+                            {
+                                v_8 = limit - cursor;
+                                do 
+                                {
+                                    // literal, line 104
+                                    if (!(eq_s_b(2, "er")))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    goto lab7_brk;
+                                }
+                                while (false);
 
 lab8_brk: ;
-								
-								cursor = limit - v_8;
-								// literal, line 104
-								if (!(eq_s_b(2, "en")))
-								{
-									cursor = limit - v_7;
-									goto lab6_brk;
-								}
-							}
-							while (false);
+                                
+                                cursor = limit - v_8;
+                                // literal, line 104
+                                if (!(eq_s_b(2, "en")))
+                                {
+                                    cursor = limit - v_7;
+                                    goto lab6_brk;
+                                }
+                            }
+                            while (false);
 
 lab7_brk: ;
-							
-							// ], line 104
-							bra = cursor;
-							// call R1, line 104
-							if (!r_R1())
-							{
-								cursor = limit - v_7;
-								goto lab6_brk;
-							}
-							// delete, line 104
-							slice_del();
-						}
-						while (false);
+                            
+                            // ], line 104
+                            bra = cursor;
+                            // call R1, line 104
+                            if (!r_R1())
+                            {
+                                cursor = limit - v_7;
+                                goto lab6_brk;
+                            }
+                            // delete, line 104
+                            slice_del();
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						break;
-					
-					case 4: 
-						// (, line 108
-						// delete, line 108
-						slice_del();
-						// try, line 109
-						v_9 = limit - cursor;
-						do 
-						{
-							// (, line 109
-							// [, line 110
-							ket = cursor;
-							// substring, line 110
-							among_var = find_among_b(a_3, 2);
-							if (among_var == 0)
-							{
-								cursor = limit - v_9;
-								goto lab9_brk;
-							}
-							// ], line 110
-							bra = cursor;
-							// call R2, line 110
-							if (!r_R2())
-							{
-								cursor = limit - v_9;
-								goto lab9_brk;
-							}
-							switch (among_var)
-							{
-								
-								case 0: 
-									cursor = limit - v_9;
-									goto lab9_brk;
-								
-								case 1: 
-									// (, line 112
-									// delete, line 112
-									slice_del();
-									break;
-								}
-						}
-						while (false);
+                        
+                        break;
+                    
+                    case 4: 
+                        // (, line 108
+                        // delete, line 108
+                        slice_del();
+                        // try, line 109
+                        v_9 = limit - cursor;
+                        do 
+                        {
+                            // (, line 109
+                            // [, line 110
+                            ket = cursor;
+                            // substring, line 110
+                            among_var = find_among_b(a_3, 2);
+                            if (among_var == 0)
+                            {
+                                cursor = limit - v_9;
+                                goto lab9_brk;
+                            }
+                            // ], line 110
+                            bra = cursor;
+                            // call R2, line 110
+                            if (!r_R2())
+                            {
+                                cursor = limit - v_9;
+                                goto lab9_brk;
+                            }
+                            switch (among_var)
+                            {
+                                
+                                case 0: 
+                                    cursor = limit - v_9;
+                                    goto lab9_brk;
+                                
+                                case 1: 
+                                    // (, line 112
+                                    // delete, line 112
+                                    slice_del();
+                                    break;
+                                }
+                        }
+                        while (false);
 
 lab9_brk: ;
-						
-						break;
-					}
-			}
-			while (false);
+                        
+                        break;
+                    }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 122
-			// do, line 123
-			v_1 = cursor;
-			do 
-			{
-				// call prelude, line 123
-				if (!r_prelude())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 122
+            // do, line 123
+            v_1 = cursor;
+            do 
+            {
+                // call prelude, line 123
+                if (!r_prelude())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 124
-			v_2 = cursor;
-			do 
-			{
-				// call mark_regions, line 124
-				if (!r_mark_regions())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // do, line 124
+            v_2 = cursor;
+            do 
+            {
+                // call mark_regions, line 124
+                if (!r_mark_regions())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = v_2;
-			// backwards, line 125
-			limit_backward = cursor; cursor = limit;
-			// do, line 126
-			v_3 = limit - cursor;
-			do 
-			{
-				// call standard_suffix, line 126
-				if (!r_standard_suffix())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_2;
+            // backwards, line 125
+            limit_backward = cursor; cursor = limit;
+            // do, line 126
+            v_3 = limit - cursor;
+            do 
+            {
+                // call standard_suffix, line 126
+                if (!r_standard_suffix())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			cursor = limit_backward; // do, line 127
-			v_4 = cursor;
-			do 
-			{
-				// call postlude, line 127
-				if (!r_postlude())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            cursor = limit_backward; // do, line 127
+            v_4 = cursor;
+            do 
+            {
+                // call postlude, line 127
+                if (!r_postlude())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = v_4;
-			return true;
-		}
-	}
+            
+            cursor = v_4;
+            return true;
+        }
+    }
 }


[31/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/FixedBitSet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/FixedBitSet.cs b/src/contrib/Spatial/Util/FixedBitSet.cs
index 8d58d7e..bfc5c45 100644
--- a/src/contrib/Spatial/Util/FixedBitSet.cs
+++ b/src/contrib/Spatial/Util/FixedBitSet.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -22,7 +22,7 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/* BitSet of fixed length (numBits), backed by accessible
+    /* BitSet of fixed length (numBits), backed by accessible
  *  ({@link #getBits}) long[], accessed with an int index,
  *  implementing Bits and DocIdSet.  Unlike {@link
  *  OpenBitSet} this bit set does not auto-expand, cannot
@@ -31,424 +31,424 @@ namespace Lucene.Net.Spatial.Util
  *
  * @lucene.internal
  **/
-	public class FixedBitSet : DocIdSet, IBits
-	{
-		private readonly BitArray bits;
-
-		/// <summary>
-		/// returns the number of 64 bit words it would take to hold numBits
-		/// </summary>
-		/// <param name="numBits"></param>
-		/// <returns></returns>
-		public static int bits2words(int numBits)
-		{
-			var numLong = (int)((uint)numBits >> 6);
-			if ((numBits & 63) != 0)
-			{
-				numLong++;
-			}
-			return numLong;
-		}
-
-		public FixedBitSet(int numBits)
-		{
-			bits = new BitArray(numBits);
-		}
-
-		/// <summary>
-		/// Makes full copy.
-		/// </summary>
-		/// <param name="other"></param>
-		public FixedBitSet(FixedBitSet other)
-		{
-			bits = new BitArray(other.bits);
-		}
-
-		public IBits Bits()
-		{
-			return this;
-		}
-
-		public int Length()
-		{
-			return bits.Length;
-		}
-
-		public override bool IsCacheable
-		{
-			get { return true; }
-		}
-
-		/// <summary>
-		/// Returns number of set bits.  NOTE: this visits every
-		/// long in the backing bits array, and the result is not
-		/// internally cached!
-		/// </summary>
-		/// <returns></returns>
-		public int Cardinality()
-		{
-			int ret = 0;
-			for (var i = 0; i < bits.Length; i++)
-			{
-				if (bits[i]) ret++;
-			}
-			return ret;
-		}
-
-		public bool Get(int index)
-		{
-			return bits[index];
-		}
-
-		public void Set(int index)
-		{
-			bits.Set(index, true);
-		}
-
-		public bool GetAndSet(int index)
-		{
-			var ret = bits[index];
-			bits.Set(index, true);
-			return ret;
-		}
-
-		public void Clear(int index)
-		{
-			bits.Set(index, false);
-		}
-
-		public bool GetAndClear(int index)
-		{
-			var ret = bits[index];
-			bits.Set(index, false);
-			return ret;
-		}
-
-		/// <summary>
-		/// Returns the index of the first set bit starting at the index specified.
-		/// -1 is returned if there are no more set bits.
-		/// </summary>
-		/// <param name="index"></param>
-		/// <returns></returns>
-		public int NextSetBit(int index)
-		{
-			if (index >= bits.Length || index < 0)
-				throw new ArgumentException("Invalid index", "index");
-
-			for (var i = index; i < bits.Length; i++)
-			{
-				if (bits[i]) return i;
-			}
-
-			return -1;
-		}
-
-		/* Returns the index of the last set bit before or on the index specified.
-		 *  -1 is returned if there are no more set bits.
-		 */
-		public int PrevSetBit(int index)
-		{
-			if (index >= bits.Length || index < 0)
-				throw new ArgumentException("Invalid index", "index");
-
-			for (var i = index; i >= 0; i--)
-			{
-				if (bits[i]) return i;
-			}
-
-			return -1;
-		}
-
-		/* Does in-place OR of the bits provided by the
-		 *  iterator. */
-		//public void Or(DocIdSetIterator iter)
-		//{
-		//    if (iter is OpenBitSetIterator && iter.DocID() == -1)
-		//    {
-		//        var obs = (OpenBitSetIterator)iter;
-		//        Or(obs.arr, obs.words);
-		//        // advance after last doc that would be accepted if standard
-		//        // iteration is used (to exhaust it):
-		//        obs.Advance(bits.Length);
-		//    }
-		//    else
-		//    {
-		//        int doc;
-		//        while ((doc = iter.NextDoc()) < bits.Length)
-		//        {
-		//            Set(doc);
-		//        }
-		//    }
-		//}
-
-		/* this = this OR other */
-		public void Or(FixedBitSet other)
-		{
-			Or(other.bits, other.bits.Length);
-		}
-
-		private void Or(BitArray otherArr, int otherLen)
-		{
-			var thisArr = this.bits;
-			int pos = Math.Min(thisArr.Length, otherLen);
-			while (--pos >= 0)
-			{
-				thisArr[pos] |= otherArr[pos];
-			}
-		}
-
-		/* Does in-place AND of the bits provided by the
-		 *  iterator. */
-		//public void And(DocIdSetIterator iter)
-		//{
-		//    if (iter is OpenBitSetIterator && iter.DocID() == -1)
-		//    {
-		//        var obs = (OpenBitSetIterator)iter;
-		//        And(obs.arr, obs.words);
-		//        // advance after last doc that would be accepted if standard
-		//        // iteration is used (to exhaust it):
-		//        obs.Advance(bits.Length);
-		//    }
-		//    else
-		//    {
-		//        if (bits.Length == 0) return;
-		//        int disiDoc, bitSetDoc = NextSetBit(0);
-		//        while (bitSetDoc != -1 && (disiDoc = iter.Advance(bitSetDoc)) < bits.Length)
-		//        {
-		//            Clear(bitSetDoc, disiDoc);
-		//            disiDoc++;
-		//            bitSetDoc = (disiDoc < bits.Length) ? NextSetBit(disiDoc) : -1;
-		//        }
-		//        if (bitSetDoc != -1)
-		//        {
-		//            Clear(bitSetDoc, bits.Length);
-		//        }
-		//    }
-		//}
-
-		/* this = this AND other */
-		public void And(FixedBitSet other)
-		{
-			And(other.bits, other.bits.Length);
-		}
-
-		private void And(BitArray otherArr, int otherLen)
-		{
-			var thisArr = this.bits;
-			int pos = Math.Min(thisArr.Length, otherLen);
-			while (--pos >= 0)
-			{
-				thisArr[pos] &= otherArr[pos];
-			}
-			if (thisArr.Length > otherLen)
-			{
-				for (var i = otherLen; i < thisArr.Length; i++)
-				{
-					thisArr[i] = false;
-				}
-			}
-		}
-
-		/* Does in-place AND NOT of the bits provided by the
-		 *  iterator. */
-		//public void AndNot(DocIdSetIterator iter)
-		//{
-		//    var obs = iter as OpenBitSetIterator;
-		//    if (obs != null && iter.DocID() == -1)
-		//    {
-		//        AndNot(obs.arr, obs.words);
-		//        // advance after last doc that would be accepted if standard
-		//        // iteration is used (to exhaust it):
-		//        obs.Advance(bits.Length);
-		//    }
-		//    else
-		//    {
-		//        int doc;
-		//        while ((doc = iter.NextDoc()) < bits.Length)
-		//        {
-		//            Clear(doc);
-		//        }
-		//    }
-		//}
-
-		/* this = this AND NOT other */
-		public void AndNot(FixedBitSet other)
-		{
-			AndNot(other.bits, other.bits.Length);
-		}
-
-		private void AndNot(BitArray otherArr, int otherLen)
-		{
-			var thisArr = this.bits;
-			int pos = Math.Min(thisArr.Length, otherLen);
-			while (--pos >= 0)
-			{
-				thisArr[pos] &= !otherArr[pos];
-			}
-		}
-
-		// NOTE: no .isEmpty() here because that's trappy (ie,
-		// typically isEmpty is low cost, but this one wouldn't
-		// be)
-
-		/* Flips a range of bits
-		 *
-		 * @param startIndex lower index
-		 * @param endIndex one-past the last bit to flip
-		 */
-		//      public void Flip(int startIndex, int endIndex) {
-		//  Debug.Assert(startIndex >= 0 && startIndex < numBits);
-		//  Debug.Assert(endIndex >= 0 && endIndex <= numBits);
-		//  if (endIndex <= startIndex) {
-		//    return;
-		//  }
-
-		//  int startWord = startIndex >> 6;
-		//  int endWord = (endIndex-1) >> 6;
-
-		//  /* Grrr, java shifting wraps around so -1L>>>64 == -1
-		//   * for that reason, make sure not to use endmask if the bits to flip will
-		//   * be zero in the last word (redefine endWord to be the last changed...)
-		//  long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
-		//  long endmask = -1L >>> (64-(endIndex & 0x3f));   // example: 00111...111111
-		//  ***/
-
-		//  long startmask = -1L << startIndex;
-		//  long endmask =  -1L >>> -endIndex;  // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
-
-		//  if (startWord == endWord) {
-		//    bits[startWord] ^= (startmask & endmask);
-		//    return;
-		//  }
-
-		//  bits[startWord] ^= startmask;
-
-		//  for (var i=startWord+1; i<endWord; i++) {
-		//    bits[i] = ~bits[i];
-		//  }
-
-		//  bits[endWord] ^= endmask;
-		//}
-
-		/* Sets a range of bits
-		 *
-		 * @param startIndex lower index
-		 * @param endIndex one-past the last bit to set
-		 */
-		public void Set(int startIndex, int endIndex)
-		{
-			// Naive implementation
-			for (int i = startIndex; i < endIndex; i++)
-			{
-				Set(i);
-			}
-		}
-
-		//      public void Set(int startIndex, int endIndex) {
-		//  Debug.Assert(startIndex >= 0 && startIndex < numBits);
-		//  Debug.Assert(endIndex >= 0 && endIndex <= numBits);
-		//  if (endIndex <= startIndex) {
-		//    return;
-		//  }
-
-		//  int startWord = startIndex >> 6;
-		//  int endWord = (endIndex-1) >> 6;
-
-		//  long startmask = -1L << startIndex;
-		//  long endmask = -1L >>> -endIndex;  // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
-
-		//  if (startWord == endWord) {
-		//    bits[startWord] |= (startmask & endmask);
-		//    return;
-		//  }
-
-		//  bits[startWord] |= startmask;
-		//  Arrays.Fill(bits, startWord+1, endWord, -1L);
-		//  bits[endWord] |= endmask;
-		//}
-
-		/* Clears a range of bits.
-		 *
-		 * @param startIndex lower index
-		 * @param endIndex one-past the last bit to clear
-		 */
-		public void Clear(int startIndex, int endIndex)
-		{
-			for (int i = startIndex; i < endIndex; i++)
-			{
-				Clear(i);
-			}
-		}
-
-		//@Override
-		public FixedBitSet Clone()
-		{
-			return new FixedBitSet(this);
-		}
-
-		/* returns true if both sets have the same bits set */
-		public override bool Equals(Object o)
-		{
-			if (this == o)
-			{
-				return true;
-			}
-
-			var other = o as FixedBitSet;
-			if (other == null)
-			{
-				return false;
-			}
-
-			return bits.Equals(other.bits);
-		}
-
-		public override int GetHashCode()
-		{
-			return bits.GetHashCode();
-		}
-
-		public override DocIdSetIterator Iterator()
-		{
-			return new FixedBitSetIterator(this);
-		}
-
-		/// <summary>
-		/// A FixedBitSet Iterator implementation
-		/// </summary>
-		public class FixedBitSetIterator : DocIdSetIterator
-		{
-			private int curDocId = -1;
-			private readonly IEnumerator enumerator;
-
-			public FixedBitSetIterator(FixedBitSet bitset)
-			{
-				enumerator = bitset.bits.GetEnumerator();
-			}
-
-			public override int DocID()
-			{
-				return curDocId;
-			}
-
-			public override int NextDoc()
-			{
-				while (enumerator.MoveNext())
-				{
-					++curDocId;
-					if ((bool)enumerator.Current) return curDocId;
-				}
-				return curDocId = NO_MORE_DOCS;
-			}
-
-			public override int Advance(int target)
-			{
-				int doc;
-				while ((doc = NextDoc()) < target)
-				{
-				}
-				return doc;
-			}
-		}
-	}
+    public class FixedBitSet : DocIdSet, IBits
+    {
+        private readonly BitArray bits;
+
+        /// <summary>
+        /// returns the number of 64 bit words it would take to hold numBits
+        /// </summary>
+        /// <param name="numBits"></param>
+        /// <returns></returns>
+        public static int bits2words(int numBits)
+        {
+            var numLong = (int)((uint)numBits >> 6);
+            if ((numBits & 63) != 0)
+            {
+                numLong++;
+            }
+            return numLong;
+        }
+
+        public FixedBitSet(int numBits)
+        {
+            bits = new BitArray(numBits);
+        }
+
+        /// <summary>
+        /// Makes full copy.
+        /// </summary>
+        /// <param name="other"></param>
+        public FixedBitSet(FixedBitSet other)
+        {
+            bits = new BitArray(other.bits);
+        }
+
+        public IBits Bits()
+        {
+            return this;
+        }
+
+        public int Length()
+        {
+            return bits.Length;
+        }
+
+        public override bool IsCacheable
+        {
+            get { return true; }
+        }
+
+        /// <summary>
+        /// Returns number of set bits.  NOTE: this visits every
+        /// long in the backing bits array, and the result is not
+        /// internally cached!
+        /// </summary>
+        /// <returns></returns>
+        public int Cardinality()
+        {
+            int ret = 0;
+            for (var i = 0; i < bits.Length; i++)
+            {
+                if (bits[i]) ret++;
+            }
+            return ret;
+        }
+
+        public bool Get(int index)
+        {
+            return bits[index];
+        }
+
+        public void Set(int index)
+        {
+            bits.Set(index, true);
+        }
+
+        public bool GetAndSet(int index)
+        {
+            var ret = bits[index];
+            bits.Set(index, true);
+            return ret;
+        }
+
+        public void Clear(int index)
+        {
+            bits.Set(index, false);
+        }
+
+        public bool GetAndClear(int index)
+        {
+            var ret = bits[index];
+            bits.Set(index, false);
+            return ret;
+        }
+
+        /// <summary>
+        /// Returns the index of the first set bit starting at the index specified.
+        /// -1 is returned if there are no more set bits.
+        /// </summary>
+        /// <param name="index"></param>
+        /// <returns></returns>
+        public int NextSetBit(int index)
+        {
+            if (index >= bits.Length || index < 0)
+                throw new ArgumentException("Invalid index", "index");
+
+            for (var i = index; i < bits.Length; i++)
+            {
+                if (bits[i]) return i;
+            }
+
+            return -1;
+        }
+
+        /* Returns the index of the last set bit before or on the index specified.
+         *  -1 is returned if there are no more set bits.
+         */
+        public int PrevSetBit(int index)
+        {
+            if (index >= bits.Length || index < 0)
+                throw new ArgumentException("Invalid index", "index");
+
+            for (var i = index; i >= 0; i--)
+            {
+                if (bits[i]) return i;
+            }
+
+            return -1;
+        }
+
+        /* Does in-place OR of the bits provided by the
+         *  iterator. */
+        //public void Or(DocIdSetIterator iter)
+        //{
+        //    if (iter is OpenBitSetIterator && iter.DocID() == -1)
+        //    {
+        //        var obs = (OpenBitSetIterator)iter;
+        //        Or(obs.arr, obs.words);
+        //        // advance after last doc that would be accepted if standard
+        //        // iteration is used (to exhaust it):
+        //        obs.Advance(bits.Length);
+        //    }
+        //    else
+        //    {
+        //        int doc;
+        //        while ((doc = iter.NextDoc()) < bits.Length)
+        //        {
+        //            Set(doc);
+        //        }
+        //    }
+        //}
+
+        /* this = this OR other */
+        public void Or(FixedBitSet other)
+        {
+            Or(other.bits, other.bits.Length);
+        }
+
+        private void Or(BitArray otherArr, int otherLen)
+        {
+            var thisArr = this.bits;
+            int pos = Math.Min(thisArr.Length, otherLen);
+            while (--pos >= 0)
+            {
+                thisArr[pos] |= otherArr[pos];
+            }
+        }
+
+        /* Does in-place AND of the bits provided by the
+         *  iterator. */
+        //public void And(DocIdSetIterator iter)
+        //{
+        //    if (iter is OpenBitSetIterator && iter.DocID() == -1)
+        //    {
+        //        var obs = (OpenBitSetIterator)iter;
+        //        And(obs.arr, obs.words);
+        //        // advance after last doc that would be accepted if standard
+        //        // iteration is used (to exhaust it):
+        //        obs.Advance(bits.Length);
+        //    }
+        //    else
+        //    {
+        //        if (bits.Length == 0) return;
+        //        int disiDoc, bitSetDoc = NextSetBit(0);
+        //        while (bitSetDoc != -1 && (disiDoc = iter.Advance(bitSetDoc)) < bits.Length)
+        //        {
+        //            Clear(bitSetDoc, disiDoc);
+        //            disiDoc++;
+        //            bitSetDoc = (disiDoc < bits.Length) ? NextSetBit(disiDoc) : -1;
+        //        }
+        //        if (bitSetDoc != -1)
+        //        {
+        //            Clear(bitSetDoc, bits.Length);
+        //        }
+        //    }
+        //}
+
+        /* this = this AND other */
+        public void And(FixedBitSet other)
+        {
+            And(other.bits, other.bits.Length);
+        }
+
+        private void And(BitArray otherArr, int otherLen)
+        {
+            var thisArr = this.bits;
+            int pos = Math.Min(thisArr.Length, otherLen);
+            while (--pos >= 0)
+            {
+                thisArr[pos] &= otherArr[pos];
+            }
+            if (thisArr.Length > otherLen)
+            {
+                for (var i = otherLen; i < thisArr.Length; i++)
+                {
+                    thisArr[i] = false;
+                }
+            }
+        }
+
+        /* Does in-place AND NOT of the bits provided by the
+         *  iterator. */
+        //public void AndNot(DocIdSetIterator iter)
+        //{
+        //    var obs = iter as OpenBitSetIterator;
+        //    if (obs != null && iter.DocID() == -1)
+        //    {
+        //        AndNot(obs.arr, obs.words);
+        //        // advance after last doc that would be accepted if standard
+        //        // iteration is used (to exhaust it):
+        //        obs.Advance(bits.Length);
+        //    }
+        //    else
+        //    {
+        //        int doc;
+        //        while ((doc = iter.NextDoc()) < bits.Length)
+        //        {
+        //            Clear(doc);
+        //        }
+        //    }
+        //}
+
+        /* this = this AND NOT other */
+        public void AndNot(FixedBitSet other)
+        {
+            AndNot(other.bits, other.bits.Length);
+        }
+
+        private void AndNot(BitArray otherArr, int otherLen)
+        {
+            var thisArr = this.bits;
+            int pos = Math.Min(thisArr.Length, otherLen);
+            while (--pos >= 0)
+            {
+                thisArr[pos] &= !otherArr[pos];
+            }
+        }
+
+        // NOTE: no .isEmpty() here because that's trappy (ie,
+        // typically isEmpty is low cost, but this one wouldn't
+        // be)
+
+        /* Flips a range of bits
+         *
+         * @param startIndex lower index
+         * @param endIndex one-past the last bit to flip
+         */
+        //      public void Flip(int startIndex, int endIndex) {
+        //  Debug.Assert(startIndex >= 0 && startIndex < numBits);
+        //  Debug.Assert(endIndex >= 0 && endIndex <= numBits);
+        //  if (endIndex <= startIndex) {
+        //    return;
+        //  }
+
+        //  int startWord = startIndex >> 6;
+        //  int endWord = (endIndex-1) >> 6;
+
+        //  /* Grrr, java shifting wraps around so -1L>>>64 == -1
+        //   * for that reason, make sure not to use endmask if the bits to flip will
+        //   * be zero in the last word (redefine endWord to be the last changed...)
+        //  long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
+        //  long endmask = -1L >>> (64-(endIndex & 0x3f));   // example: 00111...111111
+        //  ***/
+
+        //  long startmask = -1L << startIndex;
+        //  long endmask =  -1L >>> -endIndex;  // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+
+        //  if (startWord == endWord) {
+        //    bits[startWord] ^= (startmask & endmask);
+        //    return;
+        //  }
+
+        //  bits[startWord] ^= startmask;
+
+        //  for (var i=startWord+1; i<endWord; i++) {
+        //    bits[i] = ~bits[i];
+        //  }
+
+        //  bits[endWord] ^= endmask;
+        //}
+
+        /* Sets a range of bits
+         *
+         * @param startIndex lower index
+         * @param endIndex one-past the last bit to set
+         */
+        public void Set(int startIndex, int endIndex)
+        {
+            // Naive implementation
+            for (int i = startIndex; i < endIndex; i++)
+            {
+                Set(i);
+            }
+        }
+
+        //      public void Set(int startIndex, int endIndex) {
+        //  Debug.Assert(startIndex >= 0 && startIndex < numBits);
+        //  Debug.Assert(endIndex >= 0 && endIndex <= numBits);
+        //  if (endIndex <= startIndex) {
+        //    return;
+        //  }
+
+        //  int startWord = startIndex >> 6;
+        //  int endWord = (endIndex-1) >> 6;
+
+        //  long startmask = -1L << startIndex;
+        //  long endmask = -1L >>> -endIndex;  // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+
+        //  if (startWord == endWord) {
+        //    bits[startWord] |= (startmask & endmask);
+        //    return;
+        //  }
+
+        //  bits[startWord] |= startmask;
+        //  Arrays.Fill(bits, startWord+1, endWord, -1L);
+        //  bits[endWord] |= endmask;
+        //}
+
+        /* Clears a range of bits.
+         *
+         * @param startIndex lower index
+         * @param endIndex one-past the last bit to clear
+         */
+        public void Clear(int startIndex, int endIndex)
+        {
+            for (int i = startIndex; i < endIndex; i++)
+            {
+                Clear(i);
+            }
+        }
+
+        //@Override
+        public FixedBitSet Clone()
+        {
+            return new FixedBitSet(this);
+        }
+
+        /* returns true if both sets have the same bits set */
+        public override bool Equals(Object o)
+        {
+            if (this == o)
+            {
+                return true;
+            }
+
+            var other = o as FixedBitSet;
+            if (other == null)
+            {
+                return false;
+            }
+
+            return bits.Equals(other.bits);
+        }
+
+        public override int GetHashCode()
+        {
+            return bits.GetHashCode();
+        }
+
+        public override DocIdSetIterator Iterator()
+        {
+            return new FixedBitSetIterator(this);
+        }
+
+        /// <summary>
+        /// A FixedBitSet Iterator implementation
+        /// </summary>
+        public class FixedBitSetIterator : DocIdSetIterator
+        {
+            private int curDocId = -1;
+            private readonly IEnumerator enumerator;
+
+            public FixedBitSetIterator(FixedBitSet bitset)
+            {
+                enumerator = bitset.bits.GetEnumerator();
+            }
+
+            public override int DocID()
+            {
+                return curDocId;
+            }
+
+            public override int NextDoc()
+            {
+                while (enumerator.MoveNext())
+                {
+                    ++curDocId;
+                    if ((bool)enumerator.Current) return curDocId;
+                }
+                return curDocId = NO_MORE_DOCS;
+            }
+
+            public override int Advance(int target)
+            {
+                int doc;
+                while ((doc = NextDoc()) < target)
+                {
+                }
+                return doc;
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/FunctionQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/FunctionQuery.cs b/src/contrib/Spatial/Util/FunctionQuery.cs
index a382cbb..64eda65 100644
--- a/src/contrib/Spatial/Util/FunctionQuery.cs
+++ b/src/contrib/Spatial/Util/FunctionQuery.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -23,193 +23,193 @@ using Lucene.Net.Search.Function;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/// <summary>
-	/// Port of Solr's FunctionQuery (v1.4)
-	/// 
-	/// Returns a score for each document based on a ValueSource,
-	/// often some function of the value of a field.
-	/// 
-	/// <b>Note: This API is experimental and may change in non backward-compatible ways in the future</b>
-	/// </summary>
-	public class FunctionQuery : Query
-	{
-		protected readonly ValueSource func;
-
-		public FunctionQuery(ValueSource func)
-		{
-			this.func = func;
-		}
-
-		/// <summary>
-		/// 
-		/// </summary>
-		/// <returns>The associated ValueSource</returns>
-		public ValueSource GetValueSource()
-		{
-			return func;
-		}
-
-		public override Query Rewrite(Index.IndexReader reader)
-		{
-			return this;
-		}
-
-		public override void ExtractTerms(System.Collections.Generic.ISet<Term> terms)
-		{
-			//base.ExtractTerms(terms);
-		}
-
-		protected class FunctionWeight : Weight
-		{
-			protected Searcher searcher;
-			protected float queryNorm;
-			protected float queryWeight;
-			protected readonly FunctionQuery enclosingInstance;
-
-			public FunctionWeight(Searcher searcher, FunctionQuery q)
-			{
-				enclosingInstance = q;
-				this.searcher = searcher;
-				//q.func.CreateWeight(searcher);
-			}
-
-			internal float GetQueryNorm()
-			{
-				return queryNorm;
-			}
-
-			public override Query Query
-			{
-				get { return enclosingInstance; }
-			}
-
-			public override float Value
-			{
-				get { return queryWeight; }
-			}
-
-			public override float GetSumOfSquaredWeights()
-			{
-				queryWeight = enclosingInstance.Boost;
-				return queryWeight * queryWeight;
-			}
-
-			public override void Normalize(float norm)
-			{
-				this.queryNorm = norm;
-				queryWeight *= this.queryNorm;
-			}
-
-			public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
-			{
-				return new AllScorer(enclosingInstance.GetSimilarity(searcher), reader, this);
-			}
-
-			public override Explanation Explain(IndexReader reader, int doc)
-			{
-				return ((AllScorer)Scorer(reader, true, true)).Explain(doc);
-			}
-		}
-
-		protected class AllScorer : Scorer
-		{
-			readonly IndexReader reader;
-			readonly FunctionWeight weight;
-			readonly int maxDoc;
-			readonly float qWeight;
-			int doc = -1;
-			readonly DocValues vals;
-			readonly bool hasDeletions;
-
-			public AllScorer(Similarity similarity, IndexReader reader, FunctionWeight w)
-				: base(similarity)
-			{
-				this.weight = w;
-				this.qWeight = w.Value;
-				this.reader = reader;
-				this.maxDoc = reader.MaxDoc;
-				this.hasDeletions = reader.HasDeletions;
-				vals = ((FunctionQuery)w.Query).func.GetValues(reader);
-			}
-
-			public override int DocID()
-			{
-				return doc;
-			}
-
-			// instead of matching all docs, we could also embed a query.
-			// the score could either ignore the subscore, or boost it.
-			// Containment:  floatline(foo:myTerm, "myFloatField", 1.0, 0.0f)
-			// Boost:        foo:myTerm^floatline("myFloatField",1.0,0.0f)
-			public override int NextDoc()
-			{
-				for (; ; )
-				{
-					++doc;
-					if (doc >= maxDoc)
-					{
-						return doc = NO_MORE_DOCS;
-					}
-					if (hasDeletions && reader.IsDeleted(doc)) continue;
-					return doc;
-				}
-			}
-
-			public override int Advance(int target)
-			{
-				// this will work even if target==NO_MORE_DOCS
-				doc = target - 1;
-				return NextDoc();
-			}
-
-			public override float Score()
-			{
-				float score = qWeight * vals.FloatVal(doc);
-
-				// Current Lucene priority queues can't handle NaN and -Infinity, so
-				// map to -Float.MAX_VALUE. This conditional handles both -infinity
-				// and NaN since comparisons with NaN are always false.
-				return score > float.NegativeInfinity ? score : -float.MaxValue;
-			}
-
-			public /*override*/ Explanation Explain(int doc)
-			{
-				float sc = qWeight * vals.FloatVal(doc);
-
-				Explanation result = new ComplexExplanation
-				  (true, sc, "FunctionQuery(" + ((FunctionQuery)weight.Query).func + "), product of:");
-
-				result.AddDetail(vals.Explain(doc));
-				result.AddDetail(new Explanation(weight.Query.Boost, "boost"));
-				result.AddDetail(new Explanation(weight.GetQueryNorm(), "queryNorm"));
-				return result;
-			}
-		}
-
-		public override Weight CreateWeight(Searcher searcher)
-		{
-			return new FunctionQuery.FunctionWeight(searcher, this);
-		}
-
-		public override string ToString(string field)
-		{
-			float boost = Boost;
-			return (boost != 1.0 ? "(" : "") + func.ToString()
-					+ (boost == 1.0 ? "" : ")^" + boost);
-		}
-
-		public override bool Equals(object o)
-		{
-			var other = o as FunctionQuery;
-
-			if (other == null) return false;
-
-			return this.Boost == other.Boost && this.func.Equals(other.func);
-		}
-
-		public override int GetHashCode()
-		{
-			return (int) (func.GetHashCode() * 31 + BitConverter.DoubleToInt64Bits(Boost));
-		}
-	}
+    /// <summary>
+    /// Port of Solr's FunctionQuery (v1.4)
+    /// 
+    /// Returns a score for each document based on a ValueSource,
+    /// often some function of the value of a field.
+    /// 
+    /// <b>Note: This API is experimental and may change in non backward-compatible ways in the future</b>
+    /// </summary>
+    public class FunctionQuery : Query
+    {
+        protected readonly ValueSource func;
+
+        public FunctionQuery(ValueSource func)
+        {
+            this.func = func;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <returns>The associated ValueSource</returns>
+        public ValueSource GetValueSource()
+        {
+            return func;
+        }
+
+        public override Query Rewrite(Index.IndexReader reader)
+        {
+            return this;
+        }
+
+        public override void ExtractTerms(System.Collections.Generic.ISet<Term> terms)
+        {
+            //base.ExtractTerms(terms);
+        }
+
+        protected class FunctionWeight : Weight
+        {
+            protected Searcher searcher;
+            protected float queryNorm;
+            protected float queryWeight;
+            protected readonly FunctionQuery enclosingInstance;
+
+            public FunctionWeight(Searcher searcher, FunctionQuery q)
+            {
+                enclosingInstance = q;
+                this.searcher = searcher;
+                //q.func.CreateWeight(searcher);
+            }
+
+            internal float GetQueryNorm()
+            {
+                return queryNorm;
+            }
+
+            public override Query Query
+            {
+                get { return enclosingInstance; }
+            }
+
+            public override float Value
+            {
+                get { return queryWeight; }
+            }
+
+            public override float GetSumOfSquaredWeights()
+            {
+                queryWeight = enclosingInstance.Boost;
+                return queryWeight * queryWeight;
+            }
+
+            public override void Normalize(float norm)
+            {
+                this.queryNorm = norm;
+                queryWeight *= this.queryNorm;
+            }
+
+            public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+            {
+                return new AllScorer(enclosingInstance.GetSimilarity(searcher), reader, this);
+            }
+
+            public override Explanation Explain(IndexReader reader, int doc)
+            {
+                return ((AllScorer)Scorer(reader, true, true)).Explain(doc);
+            }
+        }
+
+        protected class AllScorer : Scorer
+        {
+            readonly IndexReader reader;
+            readonly FunctionWeight weight;
+            readonly int maxDoc;
+            readonly float qWeight;
+            int doc = -1;
+            readonly DocValues vals;
+            readonly bool hasDeletions;
+
+            public AllScorer(Similarity similarity, IndexReader reader, FunctionWeight w)
+                : base(similarity)
+            {
+                this.weight = w;
+                this.qWeight = w.Value;
+                this.reader = reader;
+                this.maxDoc = reader.MaxDoc;
+                this.hasDeletions = reader.HasDeletions;
+                vals = ((FunctionQuery)w.Query).func.GetValues(reader);
+            }
+
+            public override int DocID()
+            {
+                return doc;
+            }
+
+            // instead of matching all docs, we could also embed a query.
+            // the score could either ignore the subscore, or boost it.
+            // Containment:  floatline(foo:myTerm, "myFloatField", 1.0, 0.0f)
+            // Boost:        foo:myTerm^floatline("myFloatField",1.0,0.0f)
+            public override int NextDoc()
+            {
+                for (; ; )
+                {
+                    ++doc;
+                    if (doc >= maxDoc)
+                    {
+                        return doc = NO_MORE_DOCS;
+                    }
+                    if (hasDeletions && reader.IsDeleted(doc)) continue;
+                    return doc;
+                }
+            }
+
+            public override int Advance(int target)
+            {
+                // this will work even if target==NO_MORE_DOCS
+                doc = target - 1;
+                return NextDoc();
+            }
+
+            public override float Score()
+            {
+                float score = qWeight * vals.FloatVal(doc);
+
+                // Current Lucene priority queues can't handle NaN and -Infinity, so
+                // map to -Float.MAX_VALUE. This conditional handles both -infinity
+                // and NaN since comparisons with NaN are always false.
+                return score > float.NegativeInfinity ? score : -float.MaxValue;
+            }
+
+            public /*override*/ Explanation Explain(int doc)
+            {
+                float sc = qWeight * vals.FloatVal(doc);
+
+                Explanation result = new ComplexExplanation
+                  (true, sc, "FunctionQuery(" + ((FunctionQuery)weight.Query).func + "), product of:");
+
+                result.AddDetail(vals.Explain(doc));
+                result.AddDetail(new Explanation(weight.Query.Boost, "boost"));
+                result.AddDetail(new Explanation(weight.GetQueryNorm(), "queryNorm"));
+                return result;
+            }
+        }
+
+        public override Weight CreateWeight(Searcher searcher)
+        {
+            return new FunctionQuery.FunctionWeight(searcher, this);
+        }
+
+        public override string ToString(string field)
+        {
+            float boost = Boost;
+            return (boost != 1.0 ? "(" : "") + func.ToString()
+                    + (boost == 1.0 ? "" : ")^" + boost);
+        }
+
+        public override bool Equals(object o)
+        {
+            var other = o as FunctionQuery;
+
+            if (other == null) return false;
+
+            return this.Boost == other.Boost && this.func.Equals(other.func);
+        }
+
+        public override int GetHashCode()
+        {
+            return (int) (func.GetHashCode() * 31 + BitConverter.DoubleToInt64Bits(Boost));
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/ReciprocalFloatFunction.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/ReciprocalFloatFunction.cs b/src/contrib/Spatial/Util/ReciprocalFloatFunction.cs
index faa71df..5789df4 100644
--- a/src/contrib/Spatial/Util/ReciprocalFloatFunction.cs
+++ b/src/contrib/Spatial/Util/ReciprocalFloatFunction.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/ShapeFieldCache.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/ShapeFieldCache.cs b/src/contrib/Spatial/Util/ShapeFieldCache.cs
index 89fa951..59db379 100644
--- a/src/contrib/Spatial/Util/ShapeFieldCache.cs
+++ b/src/contrib/Spatial/Util/ShapeFieldCache.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -25,31 +25,31 @@ namespace Lucene.Net.Spatial.Util
     /// associated with a given docId
     /// </summary>
     /// <typeparam name="T"></typeparam>
-	public class ShapeFieldCache<T> where T : Shape
-	{
-		private readonly IList<T>[] cache;
-		public int defaultLength;
+    public class ShapeFieldCache<T> where T : Shape
+    {
+        private readonly IList<T>[] cache;
+        public int defaultLength;
 
-		public ShapeFieldCache(int length, int defaultLength)
-		{
-			cache = new IList<T>[length];
-			this.defaultLength = defaultLength;
-		}
+        public ShapeFieldCache(int length, int defaultLength)
+        {
+            cache = new IList<T>[length];
+            this.defaultLength = defaultLength;
+        }
 
-		public void Add(int docid, T s)
-		{
-			IList<T> list = cache[docid];
-			if (list == null)
-			{
-				list = cache[docid] = new List<T>(defaultLength);
-			}
-			list.Add(s);
-		}
+        public void Add(int docid, T s)
+        {
+            IList<T> list = cache[docid];
+            if (list == null)
+            {
+                list = cache[docid] = new List<T>(defaultLength);
+            }
+            list.Add(s);
+        }
 
-		public IList<T> GetShapes(int docid)
-		{
-			return cache[docid];
-		}
+        public IList<T> GetShapes(int docid)
+        {
+            return cache[docid];
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/ShapeFieldCacheDistanceValueSource.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/ShapeFieldCacheDistanceValueSource.cs b/src/contrib/Spatial/Util/ShapeFieldCacheDistanceValueSource.cs
index 940b59b..d806767 100644
--- a/src/contrib/Spatial/Util/ShapeFieldCacheDistanceValueSource.cs
+++ b/src/contrib/Spatial/Util/ShapeFieldCacheDistanceValueSource.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -24,44 +24,44 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/// <summary>
-	/// An implementation of the Lucene ValueSource model to support spatial relevance ranking.
-	/// </summary>
-	public class ShapeFieldCacheDistanceValueSource : ValueSource
-	{
-		private readonly ShapeFieldCacheProvider<Point> provider;
-		private readonly SpatialContext ctx;
-		private readonly Point from;
+    /// <summary>
+    /// An implementation of the Lucene ValueSource model to support spatial relevance ranking.
+    /// </summary>
+    public class ShapeFieldCacheDistanceValueSource : ValueSource
+    {
+        private readonly ShapeFieldCacheProvider<Point> provider;
+        private readonly SpatialContext ctx;
+        private readonly Point from;
 
-		public ShapeFieldCacheDistanceValueSource(SpatialContext ctx, ShapeFieldCacheProvider<Point> provider, Point from)
-		{
+        public ShapeFieldCacheDistanceValueSource(SpatialContext ctx, ShapeFieldCacheProvider<Point> provider, Point from)
+        {
             this.ctx = ctx;
-			this.from = from;
-			this.provider = provider;
-		}
+            this.from = from;
+            this.provider = provider;
+        }
 
-		public class CachedDistanceDocValues : DocValues
-		{
-			private readonly ShapeFieldCacheDistanceValueSource enclosingInstance;
-			private readonly ShapeFieldCache<Point> cache;
-		    private readonly Point from;
-		    private readonly DistanceCalculator calculator;
-		    private readonly double nullValue;
+        public class CachedDistanceDocValues : DocValues
+        {
+            private readonly ShapeFieldCacheDistanceValueSource enclosingInstance;
+            private readonly ShapeFieldCache<Point> cache;
+            private readonly Point from;
+            private readonly DistanceCalculator calculator;
+            private readonly double nullValue;
 
-			public CachedDistanceDocValues(IndexReader reader, ShapeFieldCacheDistanceValueSource enclosingInstance)
-			{
+            public CachedDistanceDocValues(IndexReader reader, ShapeFieldCacheDistanceValueSource enclosingInstance)
+            {
                 cache = enclosingInstance.provider.GetCache(reader);
-				this.enclosingInstance = enclosingInstance;
-				
+                this.enclosingInstance = enclosingInstance;
+                
                 from = enclosingInstance.from;
-			    calculator = enclosingInstance.ctx.GetDistCalc();
-			    nullValue = (enclosingInstance.ctx.IsGeo() ? 180 : double.MaxValue);
-			}
+                calculator = enclosingInstance.ctx.GetDistCalc();
+                nullValue = (enclosingInstance.ctx.IsGeo() ? 180 : double.MaxValue);
+            }
 
-			public override float FloatVal(int doc)
-			{
-				return (float)DoubleVal(doc);
-			}
+            public override float FloatVal(int doc)
+            {
+                return (float)DoubleVal(doc);
+            }
 
             public override double DoubleVal(int doc)
             {
@@ -78,39 +78,39 @@ namespace Lucene.Net.Spatial.Util
                 return nullValue;
             }
 
-		    public override string ToString(int doc)
-			{
-				return enclosingInstance.Description() + "=" + FloatVal(doc);
-			}
-		}
+            public override string ToString(int doc)
+            {
+                return enclosingInstance.Description() + "=" + FloatVal(doc);
+            }
+        }
 
-		public override DocValues GetValues(IndexReader reader)
-		{
-			return new CachedDistanceDocValues(reader, this);
-		}
+        public override DocValues GetValues(IndexReader reader)
+        {
+            return new CachedDistanceDocValues(reader, this);
+        }
 
-		public override string Description()
-		{
+        public override string Description()
+        {
             return GetType().Name + "(" + provider + ", " + from + ")";
-		}
+        }
 
-		public override bool Equals(object o)
-		{
-			if (this == o) return true;
+        public override bool Equals(object o)
+        {
+            if (this == o) return true;
 
-			var that = o as ShapeFieldCacheDistanceValueSource;
+            var that = o as ShapeFieldCacheDistanceValueSource;
 
-			if (that == null) return false;
+            if (that == null) return false;
             if (!ctx.Equals(that.ctx)) return false;
             if (!from.Equals(that.from)) return false;
             if (!provider.Equals(that.provider)) return false;
 
-			return true;
-		}
+            return true;
+        }
 
-		public override int GetHashCode()
-		{
-		    return from.GetHashCode();
-		}
-	}
+        public override int GetHashCode()
+        {
+            return from.GetHashCode();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/ShapeFieldCacheProvider.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/ShapeFieldCacheProvider.cs b/src/contrib/Spatial/Util/ShapeFieldCacheProvider.cs
index 13f623a..782ebc9 100644
--- a/src/contrib/Spatial/Util/ShapeFieldCacheProvider.cs
+++ b/src/contrib/Spatial/Util/ShapeFieldCacheProvider.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -33,73 +33,73 @@ namespace Lucene.Net.Spatial.Util
     /// them to the Cache.
     /// </summary>
     /// <typeparam name="T"></typeparam>
-	public abstract class ShapeFieldCacheProvider<T> where T : Shape
-	{
-		//private Logger log = Logger.getLogger(getClass().getName());
+    public abstract class ShapeFieldCacheProvider<T> where T : Shape
+    {
+        //private Logger log = Logger.getLogger(getClass().getName());
 
-		// it may be a List<T> or T
+        // it may be a List<T> or T
 #if !NET35
-		private readonly ConditionalWeakTable<IndexReader, ShapeFieldCache<T>> sidx =
-			new ConditionalWeakTable<IndexReader, ShapeFieldCache<T>>(); // WeakHashMap
+        private readonly ConditionalWeakTable<IndexReader, ShapeFieldCache<T>> sidx =
+            new ConditionalWeakTable<IndexReader, ShapeFieldCache<T>>(); // WeakHashMap
 #else
-	    private readonly WeakDictionary<IndexReader, ShapeFieldCache<T>> sidx =
-	        new WeakDictionary<IndexReader, ShapeFieldCache<T>>();
+        private readonly WeakDictionary<IndexReader, ShapeFieldCache<T>> sidx =
+            new WeakDictionary<IndexReader, ShapeFieldCache<T>>();
 #endif
 
 
-		protected readonly int defaultSize;
-		protected readonly String shapeField;
+        protected readonly int defaultSize;
+        protected readonly String shapeField;
 
-		protected ShapeFieldCacheProvider(String shapeField, int defaultSize)
-		{
-			this.shapeField = shapeField;
-			this.defaultSize = defaultSize;
-		}
+        protected ShapeFieldCacheProvider(String shapeField, int defaultSize)
+        {
+            this.shapeField = shapeField;
+            this.defaultSize = defaultSize;
+        }
 
-		protected abstract T ReadShape(/*BytesRef*/ Term term);
+        protected abstract T ReadShape(/*BytesRef*/ Term term);
 
-		private readonly object locker = new object();
+        private readonly object locker = new object();
 
-		public ShapeFieldCache<T> GetCache(IndexReader reader)
-		{
-			lock (locker)
-			{
-				ShapeFieldCache<T> idx;
-				if (sidx.TryGetValue(reader, out idx) && idx != null)
-				{
-					return idx;
-				}
+        public ShapeFieldCache<T> GetCache(IndexReader reader)
+        {
+            lock (locker)
+            {
+                ShapeFieldCache<T> idx;
+                if (sidx.TryGetValue(reader, out idx) && idx != null)
+                {
+                    return idx;
+                }
 
-				//long startTime = System.CurrentTimeMillis();
-				//log.fine("Building Cache [" + reader.MaxDoc() + "]");
+                //long startTime = System.CurrentTimeMillis();
+                //log.fine("Building Cache [" + reader.MaxDoc() + "]");
 
-				idx = new ShapeFieldCache<T>(reader.MaxDoc, defaultSize);
-				var count = 0;
-				var tec = new TermsEnumCompatibility(reader, shapeField);
+                idx = new ShapeFieldCache<T>(reader.MaxDoc, defaultSize);
+                var count = 0;
+                var tec = new TermsEnumCompatibility(reader, shapeField);
 
-				var term = tec.Next();
-				while (term != null)
-				{
-					var shape = ReadShape(term);
-					if (shape != null)
-					{
-						var docs = reader.TermDocs(new Term(shapeField, tec.Term().Text));
-						while (docs.Next())
-						{
-							idx.Add(docs.Doc, shape);
-							count++;
-						}
-					}
-					term = tec.Next();
-				}
+                var term = tec.Next();
+                while (term != null)
+                {
+                    var shape = ReadShape(term);
+                    if (shape != null)
+                    {
+                        var docs = reader.TermDocs(new Term(shapeField, tec.Term().Text));
+                        while (docs.Next())
+                        {
+                            idx.Add(docs.Doc, shape);
+                            count++;
+                        }
+                    }
+                    term = tec.Next();
+                }
 
-				sidx.Add(reader, idx);
-				tec.Close();
+                sidx.Add(reader, idx);
+                tec.Close();
 
-				//long elapsed = System.CurrentTimeMillis() - startTime;
-				//log.fine("Cached: [" + count + " in " + elapsed + "ms] " + idx);
-				return idx;
-			}
-		}
-	}
+                //long elapsed = System.CurrentTimeMillis() - startTime;
+                //log.fine("Cached: [" + count + " in " + elapsed + "ms] " + idx);
+                return idx;
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/TermsEnumCompatibility.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/TermsEnumCompatibility.cs b/src/contrib/Spatial/Util/TermsEnumCompatibility.cs
index 13fa483..7b6809e 100644
--- a/src/contrib/Spatial/Util/TermsEnumCompatibility.cs
+++ b/src/contrib/Spatial/Util/TermsEnumCompatibility.cs
@@ -26,115 +26,115 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/// <summary>
-	/// Wraps Lucene 3 TermEnum to make it look like a Lucene 4 TermsEnum
-	/// SOLR-2155
-	/// @author dsmiley
-	/// </summary>
-	public class TermsEnumCompatibility
-	{
-		private readonly IndexReader reader;
-		private readonly String fieldName;
-		private TermEnum termEnum;
-		private bool initialState = true;
+    /// <summary>
+    /// Wraps Lucene 3 TermEnum to make it look like a Lucene 4 TermsEnum
+    /// SOLR-2155
+    /// @author dsmiley
+    /// </summary>
+    public class TermsEnumCompatibility
+    {
+        private readonly IndexReader reader;
+        private readonly String fieldName;
+        private TermEnum termEnum;
+        private bool initialState = true;
 
-		public TermsEnumCompatibility(IndexReader reader, String fieldName)
-		{
-			this.reader = reader;
-			this.fieldName = string.Intern(fieldName);
-			this.termEnum = reader.Terms(new Term(this.fieldName));
-		}
+        public TermsEnumCompatibility(IndexReader reader, String fieldName)
+        {
+            this.reader = reader;
+            this.fieldName = string.Intern(fieldName);
+            this.termEnum = reader.Terms(new Term(this.fieldName));
+        }
 
-		public TermEnum GetTermEnum()
-		{
-			return termEnum;
-		}
+        public TermEnum GetTermEnum()
+        {
+            return termEnum;
+        }
 
-		public Term Term()
-		{
-			Term t = termEnum.Term;
-			return t != null && t.Field == fieldName ? t : null;
-		}
+        public Term Term()
+        {
+            Term t = termEnum.Term;
+            return t != null && t.Field == fieldName ? t : null;
+        }
 
-		public Term Next()
-		{
-			//in Lucene 3, a call to reader.terms(term) is already pre-positioned, you don't call next first
-			if (initialState)
-			{
-				initialState = false;
-				return Term();
-			}
-			else
-			{
-				return termEnum.Next() ? Term() : null;
-			}
-		}
+        public Term Next()
+        {
+            //in Lucene 3, a call to reader.terms(term) is already pre-positioned, you don't call next first
+            if (initialState)
+            {
+                initialState = false;
+                return Term();
+            }
+            else
+            {
+                return termEnum.Next() ? Term() : null;
+            }
+        }
 
-		public void Close()
-		{
-			termEnum.Close();
-		}
+        public void Close()
+        {
+            termEnum.Close();
+        }
 
-		public enum SeekStatus
-		{
-			END,
-			FOUND,
-			NOT_FOUND
-		}
+        public enum SeekStatus
+        {
+            END,
+            FOUND,
+            NOT_FOUND
+        }
 
-		public SeekStatus Seek(String value)
-		{
-			termEnum = reader.Terms(new Term(this.fieldName, value));
-			Term t = Term();
-			if (t == null)
-				return SeekStatus.END;
-			return (t.Text.Equals(value)) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
-		}
+        public SeekStatus Seek(String value)
+        {
+            termEnum = reader.Terms(new Term(this.fieldName, value));
+            Term t = Term();
+            if (t == null)
+                return SeekStatus.END;
+            return (t.Text.Equals(value)) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
+        }
 
-		/// <summary>
-		/// Seeks to the specified term, if it exists, or to the
-		/// next (ceiling) term.  Returns SeekStatus to
-		/// indicate whether exact term was found, a different
-		/// term was found, or EOF was hit.  The target term may
-		/// be before or after the current term.  If this returns
-		/// SeekStatus.END, the enum is unpositioned.
-		/// </summary>
-		/// <param name="value"></param>
-		/// <returns></returns>
-		public SeekStatus SeekCeil(String value)
-		{
-			return Seek(value);
-		}
+        /// <summary>
+        /// Seeks to the specified term, if it exists, or to the
+        /// next (ceiling) term.  Returns SeekStatus to
+        /// indicate whether exact term was found, a different
+        /// term was found, or EOF was hit.  The target term may
+        /// be before or after the current term.  If this returns
+        /// SeekStatus.END, the enum is unpositioned.
+        /// </summary>
+        /// <param name="value"></param>
+        /// <returns></returns>
+        public SeekStatus SeekCeil(String value)
+        {
+            return Seek(value);
+        }
 
-		/// <summary>
-		/// Returns the number of documents that have at least one
-		/// term for this field, or -1 if this measure isn't
-		/// stored by the codec.  Note that, just like other term
-		/// measures, this measure does not take deleted documents
-		/// into account.
-		/// </summary>
-		/// <returns></returns>
-		public int GetDocCount()
-		{
-			return -1; // TODO find a way to efficiently determine this
-		}
+        /// <summary>
+        /// Returns the number of documents that have at least one
+        /// term for this field, or -1 if this measure isn't
+        /// stored by the codec.  Note that, just like other term
+        /// measures, this measure does not take deleted documents
+        /// into account.
+        /// </summary>
+        /// <returns></returns>
+        public int GetDocCount()
+        {
+            return -1; // TODO find a way to efficiently determine this
+        }
 
-		public void Docs(OpenBitSet bits)
-		{
-			var termDocs = reader.TermDocs(new Term(fieldName, Term().Text));
-			while (termDocs.Next())
-			{
-				bits.FastSet(termDocs.Doc);
-			}
-		}
+        public void Docs(OpenBitSet bits)
+        {
+            var termDocs = reader.TermDocs(new Term(fieldName, Term().Text));
+            while (termDocs.Next())
+            {
+                bits.FastSet(termDocs.Doc);
+            }
+        }
 
-		public void Docs(FixedBitSet bits)
-		{
-			var termDocs = reader.TermDocs(new Term(fieldName, Term().Text));
-			while (termDocs.Next())
-			{
-				bits.Set(termDocs.Doc);
-			}
-		}
-	}
+        public void Docs(FixedBitSet bits)
+        {
+            var termDocs = reader.TermDocs(new Term(fieldName, Term().Text));
+            while (termDocs.Next())
+            {
+                bits.Set(termDocs.Doc);
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/TermsFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/TermsFilter.cs b/src/contrib/Spatial/Util/TermsFilter.cs
index 4514277..4acfcef 100644
--- a/src/contrib/Spatial/Util/TermsFilter.cs
+++ b/src/contrib/Spatial/Util/TermsFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -23,95 +23,95 @@ using Lucene.Net.Search;
 
 namespace Lucene.Net.Spatial.Util
 {
-	/// <summary>
-	/// Constructs a filter for docs matching any of the terms added to this class.
-	/// Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in
-	/// a sequence. An example might be a collection of primary keys from a database query result or perhaps
-	/// a choice of "category" labels picked by the end user. As a filter, this is much faster than the
-	/// equivalent query (a BooleanQuery with many "should" TermQueries)
-	/// </summary>
-	public class TermsFilter : Filter
-	{
-		private readonly SortedSet<Term> terms = new SortedSet<Term>();
+    /// <summary>
+    /// Constructs a filter for docs matching any of the terms added to this class.
+    /// Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in
+    /// a sequence. An example might be a collection of primary keys from a database query result or perhaps
+    /// a choice of "category" labels picked by the end user. As a filter, this is much faster than the
+    /// equivalent query (a BooleanQuery with many "should" TermQueries)
+    /// </summary>
+    public class TermsFilter : Filter
+    {
+        private readonly SortedSet<Term> terms = new SortedSet<Term>();
 
-		/// <summary>
-		/// Adds a term to the list of acceptable terms
-		/// </summary>
-		/// <param name="term"></param>
-		public void AddTerm(Term term)
-		{
-			terms.Add(term);
-		}
+        /// <summary>
+        /// Adds a term to the list of acceptable terms
+        /// </summary>
+        /// <param name="term"></param>
+        public void AddTerm(Term term)
+        {
+            terms.Add(term);
+        }
 
-		public override DocIdSet GetDocIdSet(IndexReader reader)
-		{
-			var result = new FixedBitSet(reader.MaxDoc);
-			var fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+        public override DocIdSet GetDocIdSet(IndexReader reader)
+        {
+            var result = new FixedBitSet(reader.MaxDoc);
+            var fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
 
-			if (fields == null || fields.Count == 0)
-			{
-				return result;
-			}
+            if (fields == null || fields.Count == 0)
+            {
+                return result;
+            }
 
-			String lastField = null;
-			TermsEnumCompatibility termsEnum = null;
-			foreach (Term term in terms)
-			{
-				if (!term.Field.Equals(lastField))
-				{
-					var termsC = new TermsEnumCompatibility(reader, term.Field);
-					if (termsC.Term() == null)
-					{
-						return result;
-					}
-					termsEnum = termsC;
-					lastField = term.Field;
-				}
+            String lastField = null;
+            TermsEnumCompatibility termsEnum = null;
+            foreach (Term term in terms)
+            {
+                if (!term.Field.Equals(lastField))
+                {
+                    var termsC = new TermsEnumCompatibility(reader, term.Field);
+                    if (termsC.Term() == null)
+                    {
+                        return result;
+                    }
+                    termsEnum = termsC;
+                    lastField = term.Field;
+                }
 
-				if (terms != null)
-				{
-					// TODO this check doesn't make sense, decide which variable its supposed to be for
-					Debug.Assert(termsEnum != null);
-					if (termsEnum.SeekCeil(term.Text) == TermsEnumCompatibility.SeekStatus.FOUND)
-					{
-						termsEnum.Docs(result);
-					}
-				}
-			}
-			return result;
-		}
+                if (terms != null)
+                {
+                    // TODO this check doesn't make sense, decide which variable its supposed to be for
+                    Debug.Assert(termsEnum != null);
+                    if (termsEnum.SeekCeil(term.Text) == TermsEnumCompatibility.SeekStatus.FOUND)
+                    {
+                        termsEnum.Docs(result);
+                    }
+                }
+            }
+            return result;
+        }
 
-		public override bool Equals(object obj)
-		{
-			if (this == obj)
-				return true;
+        public override bool Equals(object obj)
+        {
+            if (this == obj)
+                return true;
 
-			if ((obj == null) || (obj.GetType() != this.GetType()))
-				return false;
+            if ((obj == null) || (obj.GetType() != this.GetType()))
+                return false;
 
-			var test = (TermsFilter)obj;
-			if (terms == test.terms)
-				return true;
-			if (terms == null || terms.Count != test.terms.Count)
-				return false;
+            var test = (TermsFilter)obj;
+            if (terms == test.terms)
+                return true;
+            if (terms == null || terms.Count != test.terms.Count)
+                return false;
 
-			var e1 = terms.GetEnumerator();
-			var e2 = test.terms.GetEnumerator();
-			while (e1.MoveNext() && e2.MoveNext())
-			{
-				if (!e1.Current.Equals(e2.Current)) return false;
-			}
-			return true;
-		}
+            var e1 = terms.GetEnumerator();
+            var e2 = test.terms.GetEnumerator();
+            while (e1.MoveNext() && e2.MoveNext())
+            {
+                if (!e1.Current.Equals(e2.Current)) return false;
+            }
+            return true;
+        }
 
-		public override int GetHashCode()
-		{
-			int hash = 9;
-			foreach (Term term in terms)
-			{
-				hash = 31 * hash + term.GetHashCode();
-			}
-			return hash;
-		}
-	}
+        public override int GetHashCode()
+        {
+            int hash = 9;
+            foreach (Term term in terms)
+            {
+                hash = 31 * hash + term.GetHashCode();
+            }
+            return hash;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/ValueSourceFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/ValueSourceFilter.cs b/src/contrib/Spatial/Util/ValueSourceFilter.cs
index f969c05..e92c120 100644
--- a/src/contrib/Spatial/Util/ValueSourceFilter.cs
+++ b/src/contrib/Spatial/Util/ValueSourceFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -25,30 +25,30 @@ namespace Lucene.Net.Spatial.Util
     /// Filter that matches all documents where a valuesource is
     /// in between a range of <c>min</c> and <c>max</c> inclusive.
     /// </summary>
-	public class ValueSourceFilter : Filter
-	{
-		readonly Filter startingFilter;
-		readonly ValueSource source;
-		public readonly double min;
-		public readonly double max;
+    public class ValueSourceFilter : Filter
+    {
+        readonly Filter startingFilter;
+        readonly ValueSource source;
+        public readonly double min;
+        public readonly double max;
 
-		public ValueSourceFilter(Filter startingFilter, ValueSource source, double min, double max)
-		{
-			if (startingFilter == null)
-			{
-				throw new ArgumentException("please provide a non-null startingFilter; you can use QueryWrapperFilter(MatchAllDocsQuery) as a no-op filter", "startingFilter");
-			}
-			this.startingFilter = startingFilter;
-			this.source = source;
-			this.min = min;
-			this.max = max;
-		}
+        public ValueSourceFilter(Filter startingFilter, ValueSource source, double min, double max)
+        {
+            if (startingFilter == null)
+            {
+                throw new ArgumentException("please provide a non-null startingFilter; you can use QueryWrapperFilter(MatchAllDocsQuery) as a no-op filter", "startingFilter");
+            }
+            this.startingFilter = startingFilter;
+            this.source = source;
+            this.min = min;
+            this.max = max;
+        }
 
-		public override DocIdSet GetDocIdSet(Index.IndexReader reader)
-		{
-			var values = source.GetValues(reader);
-			return new ValueSourceFilteredDocIdSet(startingFilter.GetDocIdSet(reader), values, this);
-		}
+        public override DocIdSet GetDocIdSet(Index.IndexReader reader)
+        {
+            var values = source.GetValues(reader);
+            return new ValueSourceFilteredDocIdSet(startingFilter.GetDocIdSet(reader), values, this);
+        }
 
         public class ValueSourceFilteredDocIdSet : FilteredDocIdSet
         {
@@ -68,5 +68,5 @@ namespace Lucene.Net.Spatial.Util
                 return val >= enclosingFilter.min && val <= enclosingFilter.max;
             }
         }
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Vector/DistanceValueSource.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Vector/DistanceValueSource.cs b/src/contrib/Spatial/Vector/DistanceValueSource.cs
index 29439e1..19e2ea0 100644
--- a/src/contrib/Spatial/Vector/DistanceValueSource.cs
+++ b/src/contrib/Spatial/Vector/DistanceValueSource.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -26,93 +26,93 @@ using Spatial4n.Core.Shapes.Impl;
 
 namespace Lucene.Net.Spatial.Vector
 {
-	/// <summary>
+    /// <summary>
     /// An implementation of the Lucene ValueSource model that returns the distance.
-	/// </summary>
-	public class DistanceValueSource : ValueSource
-	{
-		private readonly PointVectorStrategy strategy;
-		private readonly Point from;
+    /// </summary>
+    public class DistanceValueSource : ValueSource
+    {
+        private readonly PointVectorStrategy strategy;
+        private readonly Point from;
 
-		public DistanceValueSource(PointVectorStrategy strategy, Point from)
-		{
-			this.strategy = strategy;
-			this.from = from;
-		}
+        public DistanceValueSource(PointVectorStrategy strategy, Point from)
+        {
+            this.strategy = strategy;
+            this.from = from;
+        }
 
-		public class DistanceDocValues : DocValues
-		{
-			private readonly DistanceValueSource enclosingInstance;
+        public class DistanceDocValues : DocValues
+        {
+            private readonly DistanceValueSource enclosingInstance;
 
-			private readonly double[] ptX, ptY;
-			private readonly IBits validX, validY;
+            private readonly double[] ptX, ptY;
+            private readonly IBits validX, validY;
 
             private readonly Point from;
             private readonly DistanceCalculator calculator;
             private readonly double nullValue;
 
-			public DistanceDocValues(DistanceValueSource enclosingInstance, IndexReader reader)
-			{
-				this.enclosingInstance = enclosingInstance;
+            public DistanceDocValues(DistanceValueSource enclosingInstance, IndexReader reader)
+            {
+                this.enclosingInstance = enclosingInstance;
 
-				ptX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.GetFieldNameX()/*, true*/);
-				ptY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.GetFieldNameY()/*, true*/);
-				validX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.GetFieldNameX());
-				validY = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.GetFieldNameY());
+                ptX = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.GetFieldNameX()/*, true*/);
+                ptY = FieldCache_Fields.DEFAULT.GetDoubles(reader, enclosingInstance.strategy.GetFieldNameY()/*, true*/);
+                validX = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.GetFieldNameX());
+                validY = FieldCache_Fields.DEFAULT.GetDocsWithField(reader, enclosingInstance.strategy.GetFieldNameY());
 
                 from = enclosingInstance.from;
                 calculator = enclosingInstance.strategy.GetSpatialContext().GetDistCalc();
                 nullValue = (enclosingInstance.strategy.GetSpatialContext().IsGeo() ? 180 : double.MaxValue);
-			}
-
-			public override float FloatVal(int doc)
-			{
-				return (float)DoubleVal(doc);
-			}
-
-			public override double DoubleVal(int doc)
-			{
-				// make sure it has minX and area
-				if (validX.Get(doc))
-				{
-				    Debug.Assert(validY.Get(doc));
-					return calculator.Distance(from, ptX[doc], ptY[doc]);
-				}
-				return nullValue;
-			}
-
-			public override string ToString(int doc)
-			{
-				return enclosingInstance.Description() + "=" + FloatVal(doc);
-			}
-		}
-
-		public override DocValues GetValues(IndexReader reader)
-		{
-			return new DistanceDocValues(this, reader);
-		}
-
-		public override string Description()
-		{
+            }
+
+            public override float FloatVal(int doc)
+            {
+                return (float)DoubleVal(doc);
+            }
+
+            public override double DoubleVal(int doc)
+            {
+                // make sure it has minX and area
+                if (validX.Get(doc))
+                {
+                    Debug.Assert(validY.Get(doc));
+                    return calculator.Distance(from, ptX[doc], ptY[doc]);
+                }
+                return nullValue;
+            }
+
+            public override string ToString(int doc)
+            {
+                return enclosingInstance.Description() + "=" + FloatVal(doc);
+            }
+        }
+
+        public override DocValues GetValues(IndexReader reader)
+        {
+            return new DistanceDocValues(this, reader);
+        }
+
+        public override string Description()
+        {
             return "DistanceValueSource(" + strategy + ", " + from + ")";
-		}
+        }
 
-		public override bool Equals(object o)
-		{
-			if (this == o) return true;
+        public override bool Equals(object o)
+        {
+            if (this == o) return true;
 
-			var that = o as DistanceValueSource;
-			if (that == null) return false;
+            var that = o as DistanceValueSource;
+            if (that == null) return false;
 
             if (!from.Equals(that.from)) return false;
             if (!strategy.Equals(that.strategy)) return false;
 
-			return true;
-		}
+            return true;
+        }
 
-		public override int GetHashCode()
-		{
-		    return from.GetHashCode();
-		}
-	}
+        public override int GetHashCode()
+        {
+            return from.GetHashCode();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Vector/PointVectorStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Vector/PointVectorStrategy.cs b/src/contrib/Spatial/Vector/PointVectorStrategy.cs
index 5f3cd69..f3ef815 100644
--- a/src/contrib/Spatial/Vector/PointVectorStrategy.cs
+++ b/src/contrib/Spatial/Vector/PointVectorStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -36,70 +36,70 @@ namespace Lucene.Net.Spatial.Vector
     /// Due to the simple use of numeric fields, this Strategy provides support for sorting by
     /// distance through {@link DistanceValueSource}
     /// </summary>
-	public class PointVectorStrategy : SpatialStrategy
-	{
-		public static String SUFFIX_X = "__x";
-		public static String SUFFIX_Y = "__y";
-
-		private readonly String fieldNameX;
-		private readonly String fieldNameY;
-
-		public int precisionStep = 8; // same as solr default
-
-		public PointVectorStrategy(SpatialContext ctx, String fieldNamePrefix)
-			: base(ctx, fieldNamePrefix)
-		{
-			this.fieldNameX = fieldNamePrefix + SUFFIX_X;
-			this.fieldNameY = fieldNamePrefix + SUFFIX_Y;
-		}
-
-		public void SetPrecisionStep(int p)
-		{
-			precisionStep = p;
-			if (precisionStep <= 0 || precisionStep >= 64)
-				precisionStep = int.MaxValue;
-		}
-
-		public string GetFieldNameX()
-		{
-			return fieldNameX;
-		}
-
-		public string GetFieldNameY()
-		{
-			return fieldNameY;
-		}
-
-		public override AbstractField[] CreateIndexableFields(Shape shape)
-		{
-		    var point = shape as Point;
-		    if (point != null)
-		        return CreateIndexableFields(point);
-
-		    throw new InvalidOperationException("Can only index Point, not " + shape);
-		}
+    public class PointVectorStrategy : SpatialStrategy
+    {
+        public static String SUFFIX_X = "__x";
+        public static String SUFFIX_Y = "__y";
+
+        private readonly String fieldNameX;
+        private readonly String fieldNameY;
+
+        public int precisionStep = 8; // same as solr default
+
+        public PointVectorStrategy(SpatialContext ctx, String fieldNamePrefix)
+            : base(ctx, fieldNamePrefix)
+        {
+            this.fieldNameX = fieldNamePrefix + SUFFIX_X;
+            this.fieldNameY = fieldNamePrefix + SUFFIX_Y;
+        }
+
+        public void SetPrecisionStep(int p)
+        {
+            precisionStep = p;
+            if (precisionStep <= 0 || precisionStep >= 64)
+                precisionStep = int.MaxValue;
+        }
+
+        public string GetFieldNameX()
+        {
+            return fieldNameX;
+        }
+
+        public string GetFieldNameY()
+        {
+            return fieldNameY;
+        }
+
+        public override AbstractField[] CreateIndexableFields(Shape shape)
+        {
+            var point = shape as Point;
+            if (point != null)
+                return CreateIndexableFields(point);
+
+            throw new InvalidOperationException("Can only index Point, not " + shape);
+        }
 
         public AbstractField[] CreateIndexableFields(Point point)
         {
-				var f = new AbstractField[2];
+                var f = new AbstractField[2];
 
-				var f0 = new NumericField(fieldNameX, precisionStep, Field.Store.NO, true)
-				         	{OmitNorms = true, OmitTermFreqAndPositions = true};
-				f0.SetDoubleValue(point.GetX());
-				f[0] = f0;
+                var f0 = new NumericField(fieldNameX, precisionStep, Field.Store.NO, true)
+                             {OmitNorms = true, OmitTermFreqAndPositions = true};
+                f0.SetDoubleValue(point.GetX());
+                f[0] = f0;
 
-				var f1 = new NumericField(fieldNameY, precisionStep, Field.Store.NO, true)
-				         	{OmitNorms = true, OmitTermFreqAndPositions = true};
-				f1.SetDoubleValue(point.GetY());
-				f[1] = f1;
+                var f1 = new NumericField(fieldNameY, precisionStep, Field.Store.NO, true)
+                             {OmitNorms = true, OmitTermFreqAndPositions = true};
+                f1.SetDoubleValue(point.GetY());
+                f[1] = f1;
 
-				return f;
-		}
+                return f;
+        }
 
-		public override ValueSource MakeDistanceValueSource(Point queryPoint)
-		{
+        public override ValueSource MakeDistanceValueSource(Point queryPoint)
+        {
             return new DistanceValueSource(this, queryPoint);
-		}
+        }
 
         public override ConstantScoreQuery MakeQuery(SpatialArgs args)
         {
@@ -129,74 +129,74 @@ namespace Lucene.Net.Spatial.Vector
                                             "found [" + shape.GetType().Name + "]"); //TODO
         }
 
-	    //TODO this is basically old code that hasn't been verified well and should probably be removed
+        //TODO this is basically old code that hasn't been verified well and should probably be removed
         public Query MakeQueryDistanceScore(SpatialArgs args)
         {
-	        // For starters, just limit the bbox
-			var shape = args.Shape;
-			if (!(shape is Rectangle || shape is Circle))
-				throw new InvalidOperationException("Only Rectangles and Circles are currently supported, found ["
-					+ shape.GetType().Name + "]");//TODO
-
-			Rectangle bbox = shape.GetBoundingBox();
-			if (bbox.GetCrossesDateLine())
-			{
-				throw new InvalidOperationException("Crossing dateline not yet supported");
-			}
-
-			ValueSource valueSource = null;
-
-			Query spatial = null;
-			SpatialOperation op = args.Operation;
-
-			if (SpatialOperation.Is(op,
-				SpatialOperation.BBoxWithin,
-				SpatialOperation.BBoxIntersects))
-			{
-				spatial = MakeWithin(bbox);
-			}
-			else if (SpatialOperation.Is(op,
-			  SpatialOperation.Intersects,
-			  SpatialOperation.IsWithin))
-			{
-				spatial = MakeWithin(bbox);
-				var circle = args.Shape as Circle;
-				if (circle != null)
-				{
-					// Make the ValueSource
+            // For starters, just limit the bbox
+            var shape = args.Shape;
+            if (!(shape is Rectangle || shape is Circle))
+                throw new InvalidOperationException("Only Rectangles and Circles are currently supported, found ["
+                    + shape.GetType().Name + "]");//TODO
+
+            Rectangle bbox = shape.GetBoundingBox();
+            if (bbox.GetCrossesDateLine())
+            {
+                throw new InvalidOperationException("Crossing dateline not yet supported");
+            }
+
+            ValueSource valueSource = null;
+
+            Query spatial = null;
+            SpatialOperation op = args.Operation;
+
+            if (SpatialOperation.Is(op,
+                SpatialOperation.BBoxWithin,
+                SpatialOperation.BBoxIntersects))
+            {
+                spatial = MakeWithin(bbox);
+            }
+            else if (SpatialOperation.Is(op,
+              SpatialOperation.Intersects,
+              SpatialOperation.IsWithin))
+            {
+                spatial = MakeWithin(bbox);
+                var circle = args.Shape as Circle;
+                if (circle != null)
+                {
+                    // Make the ValueSource
                     valueSource = MakeDistanceValueSource(shape.GetCenter());
 
-					var vsf = new ValueSourceFilter(
-						new QueryWrapperFilter(spatial), valueSource, 0, circle.GetRadius());
-
-					spatial = new FilteredQuery(new MatchAllDocsQuery(), vsf);
-				}
-			}
-			else if (op == SpatialOperation.IsDisjointTo)
-			{
-				spatial = MakeDisjoint(bbox);
-			}
-
-			if (spatial == null)
-			{
-				throw new UnsupportedSpatialOperation(args.Operation);
-			}
-
-			if (valueSource != null)
-			{
-				valueSource = new CachingDoubleValueSource(valueSource);
-			}
-			else
-			{
+                    var vsf = new ValueSourceFilter(
+                        new QueryWrapperFilter(spatial), valueSource, 0, circle.GetRadius());
+
+                    spatial = new FilteredQuery(new MatchAllDocsQuery(), vsf);
+                }
+            }
+            else if (op == SpatialOperation.IsDisjointTo)
+            {
+                spatial = MakeDisjoint(bbox);
+            }
+
+            if (spatial == null)
+            {
+                throw new UnsupportedSpatialOperation(args.Operation);
+            }
+
+            if (valueSource != null)
+            {
+                valueSource = new CachingDoubleValueSource(valueSource);
+            }
+            else
+            {
                 valueSource = MakeDistanceValueSource(shape.GetCenter());
-			}
-			Query spatialRankingQuery = new FunctionQuery(valueSource);
-			var bq = new BooleanQuery();
-			bq.Add(spatial, Occur.MUST);
-			bq.Add(spatialRankingQuery, Occur.MUST);
-			return bq;
+            }
+            Query spatialRankingQuery = new FunctionQuery(valueSource);
+            var bq = new BooleanQuery();
+            bq.Add(spatial, Occur.MUST);
+            bq.Add(spatialRankingQuery, Occur.MUST);
+            return bq;
 
-		}
+        }
 
         public override Filter MakeFilter(SpatialArgs args)
         {
@@ -209,30 +209,30 @@ namespace Lucene.Net.Spatial.Vector
                 return new QueryWrapperFilter(csq);
         }
 
-	    /// <summary>
-		/// Constructs a query to retrieve documents that fully contain the input envelope.
-		/// </summary>
-		/// <param name="bbox"></param>
+        /// <summary>
+        /// Constructs a query to retrieve documents that fully contain the input envelope.
+        /// </summary>
+        /// <param name="bbox"></param>
         private Query MakeWithin(Rectangle bbox)
-	    {
-	        var bq = new BooleanQuery();
-	        const Occur MUST = Occur.MUST;
-	        if (bbox.GetCrossesDateLine())
-	        {
-	            //use null as performance trick since no data will be beyond the world bounds
-	            bq.Add(RangeQuery(fieldNameX, null /*-180*/, bbox.GetMaxX()), Occur.SHOULD);
-	            bq.Add(RangeQuery(fieldNameX, bbox.GetMinX(), null /*+180*/), Occur.SHOULD);
-	            bq.MinimumNumberShouldMatch = 1; //must match at least one of the SHOULD
-	        }
-	        else
-	        {
-	            bq.Add(RangeQuery(fieldNameX, bbox.GetMinX(), bbox.GetMaxX()), MUST);
-	        }
-	        bq.Add(RangeQuery(fieldNameY, bbox.GetMinY(), bbox.GetMaxY()), MUST);
-	        return bq;
-	    }
-
-	    private NumericRangeQuery<Double> RangeQuery(String fieldName, double? min, double? max)
+        {
+            var bq = new BooleanQuery();
+            const Occur MUST = Occur.MUST;
+            if (bbox.GetCrossesDateLine())
+            {
+                //use null as performance trick since no data will be beyond the world bounds
+                bq.Add(RangeQuery(fieldNameX, null /*-180*/, bbox.GetMaxX()), Occur.SHOULD);
+                bq.Add(RangeQuery(fieldNameX, bbox.GetMinX(), null /*+180*/), Occur.SHOULD);
+                bq.MinimumNumberShouldMatch = 1; //must match at least one of the SHOULD
+            }
+            else
+            {
+                bq.Add(RangeQuery(fieldNameX, bbox.GetMinX(), bbox.GetMaxX()), MUST);
+            }
+            bq.Add(RangeQuery(fieldNameY, bbox.GetMinY(), bbox.GetMaxY()), MUST);
+            return bq;
+        }
+
+        private NumericRangeQuery<Double> RangeQuery(String fieldName, double? min, double? max)
         {
             return NumericRangeQuery.NewDoubleRange(
                 fieldName,
@@ -243,18 +243,18 @@ namespace Lucene.Net.Spatial.Vector
                 true); //inclusive
         }
 
-	    /// <summary>
-		/// Constructs a query to retrieve documents that fully contain the input envelope.
-		/// </summary>
-		/// <param name="bbox"></param>
+        /// <summary>
+        /// Constructs a query to retrieve documents that fully contain the input envelope.
+        /// </summary>
+        /// <param name="bbox"></param>
         private Query MakeDisjoint(Rectangle bbox)
-	    {
-	        if (bbox.GetCrossesDateLine())
-	            throw new InvalidOperationException("MakeDisjoint doesn't handle dateline cross");
-	        Query qX = RangeQuery(fieldNameX, bbox.GetMinX(), bbox.GetMaxX());
-	        Query qY = RangeQuery(fieldNameY, bbox.GetMinY(), bbox.GetMaxY());
-	        var bq = new BooleanQuery {{qX, Occur.MUST_NOT}, {qY, Occur.MUST_NOT}};
-	        return bq;
-	    }
-	}
+        {
+            if (bbox.GetCrossesDateLine())
+                throw new InvalidOperationException("MakeDisjoint doesn't handle dateline cross");
+            Query qX = RangeQuery(fieldNameX, bbox.GetMinX(), bbox.GetMaxX());
+            Query qY = RangeQuery(fieldNameY, bbox.GetMinY(), bbox.GetMaxY());
+            var bq = new BooleanQuery {{qX, Occur.MUST_NOT}, {qY, Occur.MUST_NOT}};
+            return bq;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/IDictionary.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/IDictionary.cs b/src/contrib/SpellChecker/Spell/IDictionary.cs
index e6a4de3..53b916f 100644
--- a/src/contrib/SpellChecker/Spell/IDictionary.cs
+++ b/src/contrib/SpellChecker/Spell/IDictionary.cs
@@ -18,7 +18,7 @@
 
 namespace SpellChecker.Net.Search.Spell
 {
-	
+    
     /// <summary> A simple interface representing a Dictionary</summary>
     public interface IDictionary
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/LuceneDictionary.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/LuceneDictionary.cs b/src/contrib/SpellChecker/Spell/LuceneDictionary.cs
index b5539be..3771f24 100644
--- a/src/contrib/SpellChecker/Spell/LuceneDictionary.cs
+++ b/src/contrib/SpellChecker/Spell/LuceneDictionary.cs
@@ -32,7 +32,7 @@ namespace SpellChecker.Net.Search.Spell
     {
         internal IndexReader reader;
         internal System.String field;
-		
+        
         public LuceneDictionary(IndexReader reader, System.String field)
         {
             this.reader = reader;
@@ -53,7 +53,7 @@ namespace SpellChecker.Net.Search.Spell
         {
             return GetEnumerator();
         }
-		
+        
         internal sealed class LuceneIterator : System.Collections.Generic.IEnumerator<string>
         {
             private readonly TermEnum termEnum;
@@ -61,7 +61,7 @@ namespace SpellChecker.Net.Search.Spell
             private bool hasNextCalled;
 
             private readonly LuceneDictionary enclosingInstance;
-			
+            
             public LuceneIterator(LuceneDictionary enclosingInstance)
             {
                 this.enclosingInstance = enclosingInstance;
@@ -93,7 +93,7 @@ namespace SpellChecker.Net.Search.Spell
             {
                 get { return Current; }
             }
-			
+            
             public bool MoveNext()
             {
                 hasNextCalled = true;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs b/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
index 3d38b50..586450a 100644
--- a/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
+++ b/src/contrib/SpellChecker/Spell/PlainTextDictionary.cs
@@ -19,8 +19,8 @@ using System;
 
 namespace SpellChecker.Net.Search.Spell
 {
-	
-	
+    
+    
     /// <summary> Dictionary represented by a file text.
     /// <p/>Format allowed: 1 word per line:<br/>
     /// word1<br/>
@@ -46,16 +46,16 @@ namespace SpellChecker.Net.Search.Spell
         {
             return GetEnumerator();
         }
-		
+        
         private System.IO.StreamReader in_Renamed;
         private System.String line;
         private bool has_next_called;
-		
+        
         public PlainTextDictionary(System.IO.FileInfo file)
         {
             in_Renamed = new System.IO.StreamReader(new System.IO.StreamReader(file.FullName, System.Text.Encoding.Default).BaseStream, new System.IO.StreamReader(file.FullName, System.Text.Encoding.Default).CurrentEncoding);
         }
-		
+        
         public PlainTextDictionary(System.IO.Stream dictFile)
         {
             in_Renamed = new System.IO.StreamReader(new System.IO.StreamReader(dictFile, System.Text.Encoding.Default).BaseStream, new System.IO.StreamReader(dictFile, System.Text.Encoding.Default).CurrentEncoding);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/SpellChecker.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/SpellChecker.cs b/src/contrib/SpellChecker/Spell/SpellChecker.cs
index 6b43ee5..efa390e 100644
--- a/src/contrib/SpellChecker/Spell/SpellChecker.cs
+++ b/src/contrib/SpellChecker/Spell/SpellChecker.cs
@@ -223,7 +223,7 @@ namespace SpellChecker.Net.Search.Spell
                 String[] grams;
                 String key;
 
-				var alreadySeen = new HashSet<string>();
+                var alreadySeen = new HashSet<string>();
                 for (var ng = GetMin(lengthWord); ng <= GetMax(lengthWord); ng++)
                 {
                     key = "gram" + ng; // form key
@@ -288,8 +288,8 @@ namespace SpellChecker.Net.Search.Spell
                         }
                     }
 
-					if (alreadySeen.Add(sugWord.termString) == false) // we already seen this word, no point returning it twice
-						continue;
+                    if (alreadySeen.Add(sugWord.termString) == false) // we already seen this word, no point returning it twice
+                        continue;
 
                     sugQueue.InsertWithOverflow(sugWord);
                     if (sugQueue.Size() == numSug)


[37/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/NorwegianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/NorwegianStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/NorwegianStemmer.cs
index 35d97e3..3300237 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/NorwegianStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/NorwegianStemmer.cs
@@ -23,332 +23,332 @@ namespace SF.Snowball.Ext
 {
 #pragma warning disable 162,164
 
-	/// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class NorwegianStemmer : SnowballProgram
-	{
-		public NorwegianStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("a", - 1, 1, "", this), new Among("e", - 1, 1, "", this), new Among("ede", 1, 1, "", this), new Among("ande", 1, 1, "", this), new Among("ende", 1, 1, "", this), new Among("ane", 1, 1, "", this), new Among("ene", 1, 1, "", this), new Among("hetene", 6, 1, "", this), new Among("erte", 1, 3, "", this), new Among("en", - 1, 1, "", this), new Among("heten", 9, 1, "", this), new Among("ar", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("heter", 12, 1, "", this), new Among("s", - 1, 2, "", this), new Among("as", 14, 1, "", this), new Among("es", 14, 1, "", this), new Among("edes", 16, 1, "", this), new Among("endes", 16, 1, "", this), new Among("enes", 16, 1, "", this), new Among("hetenes", 19, 1, "", this), new Among("ens", 14, 1, "", this), new Among("hetens", 21, 1, "", this), new Among("ers", 14, 1, "", this), new Among("ets", 14, 1, "", this), new Among("et", - 1, 1, "", this), new Among("het", 25, 1, "", this), new Among("ert", - 1, 3
 , "", this), new Among("ast", - 1, 1, "", this)};
-			a_1 = new Among[]{new Among("dt", - 1, - 1, "", this), new Among("vt", - 1, - 1, "", this)};
-			a_2 = new Among[]{new Among("leg", - 1, 1, "", this), new Among("eleg", 0, 1, "", this), new Among("ig", - 1, 1, "", this), new Among("eig", 2, 1, "", this), new Among("lig", 2, 1, "", this), new Among("elig", 4, 1, "", this), new Among("els", - 1, 1, "", this), new Among("lov", - 1, 1, "", this), new Among("elov", 7, 1, "", this), new Among("slov", 7, 1, "", this), new Among("hetslov", 9, 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (48), (char) (0), (char) (128)};
-		private static readonly char[] g_s_ending = new char[]{(char) (119), (char) (127), (char) (149), (char) (1)};
-		
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(NorwegianStemmer other)
-		{
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			// (, line 26
-			I_p1 = limit;
-			// goto, line 30
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 248)))
-					{
-						goto lab1_brk;
-					}
-					cursor = v_1;
-					goto golab0_brk;
-				}
-				while (false);
+    /// <summary> Generated class implementing code defined by a snowball script.</summary>
+    public class NorwegianStemmer : SnowballProgram
+    {
+        public NorwegianStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("a", - 1, 1, "", this), new Among("e", - 1, 1, "", this), new Among("ede", 1, 1, "", this), new Among("ande", 1, 1, "", this), new Among("ende", 1, 1, "", this), new Among("ane", 1, 1, "", this), new Among("ene", 1, 1, "", this), new Among("hetene", 6, 1, "", this), new Among("erte", 1, 3, "", this), new Among("en", - 1, 1, "", this), new Among("heten", 9, 1, "", this), new Among("ar", - 1, 1, "", this), new Among("er", - 1, 1, "", this), new Among("heter", 12, 1, "", this), new Among("s", - 1, 2, "", this), new Among("as", 14, 1, "", this), new Among("es", 14, 1, "", this), new Among("edes", 16, 1, "", this), new Among("endes", 16, 1, "", this), new Among("enes", 16, 1, "", this), new Among("hetenes", 19, 1, "", this), new Among("ens", 14, 1, "", this), new Among("hetens", 21, 1, "", this), new Among("ers", 14, 1, "", this), new Among("ets", 14, 1, "", this), new Among("et", - 1, 1, "", this), new Among("het", 25, 1, "", this), new Among("ert
 ", - 1, 3, "", this), new Among("ast", - 1, 1, "", this)};
+            a_1 = new Among[]{new Among("dt", - 1, - 1, "", this), new Among("vt", - 1, - 1, "", this)};
+            a_2 = new Among[]{new Among("leg", - 1, 1, "", this), new Among("eleg", 0, 1, "", this), new Among("ig", - 1, 1, "", this), new Among("eig", 2, 1, "", this), new Among("lig", 2, 1, "", this), new Among("elig", 4, 1, "", this), new Among("els", - 1, 1, "", this), new Among("lov", - 1, 1, "", this), new Among("elov", 7, 1, "", this), new Among("slov", 7, 1, "", this), new Among("hetslov", 9, 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (48), (char) (0), (char) (128)};
+        private static readonly char[] g_s_ending = new char[]{(char) (119), (char) (127), (char) (149), (char) (1)};
+        
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(NorwegianStemmer other)
+        {
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            // (, line 26
+            I_p1 = limit;
+            // goto, line 30
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 248)))
+                    {
+                        goto lab1_brk;
+                    }
+                    cursor = v_1;
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_1;
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                cursor = v_1;
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 30
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 248)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 30
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 248)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 30
-			I_p1 = cursor;
-			// try, line 31
-			do 
-			{
-				// (, line 31
-				if (!(I_p1 < 3))
-				{
-					goto lab4_brk;
-				}
-				I_p1 = 3;
-			}
-			while (false);
+            
+            // setmark p1, line 30
+            I_p1 = cursor;
+            // try, line 31
+            do 
+            {
+                // (, line 31
+                if (!(I_p1 < 3))
+                {
+                    goto lab4_brk;
+                }
+                I_p1 = 3;
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_main_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 36
-			// setlimit, line 37
-			v_1 = limit - cursor;
-			// tomark, line 37
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 37
-			// [, line 37
-			ket = cursor;
-			// substring, line 37
-			among_var = find_among_b(a_0, 29);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 37
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 43
-					// delete, line 43
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 45
-					if (!(in_grouping_b(g_s_ending, 98, 122)))
-					{
-						return false;
-					}
-					// delete, line 45
-					slice_del();
-					break;
-				
-				case 3: 
-					// (, line 47
-					// <-, line 47
-					slice_from("er");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_consonant_pair()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 51
-			// test, line 52
-			v_1 = limit - cursor;
-			// (, line 52
-			// setlimit, line 53
-			v_2 = limit - cursor;
-			// tomark, line 53
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_3 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_2;
-			// (, line 53
-			// [, line 53
-			ket = cursor;
-			// substring, line 53
-			if (find_among_b(a_1, 2) == 0)
-			{
-				limit_backward = v_3;
-				return false;
-			}
-			// ], line 53
-			bra = cursor;
-			limit_backward = v_3;
-			cursor = limit - v_1;
-			// next, line 58
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 58
-			bra = cursor;
-			// delete, line 58
-			slice_del();
-			return true;
-		}
-		
-		private bool r_other_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 61
-			// setlimit, line 62
-			v_1 = limit - cursor;
-			// tomark, line 62
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 62
-			// [, line 62
-			ket = cursor;
-			// substring, line 62
-			among_var = find_among_b(a_2, 11);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 62
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 66
-					// delete, line 66
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 71
-			// do, line 73
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 73
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        private bool r_main_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 36
+            // setlimit, line 37
+            v_1 = limit - cursor;
+            // tomark, line 37
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 37
+            // [, line 37
+            ket = cursor;
+            // substring, line 37
+            among_var = find_among_b(a_0, 29);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 37
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 43
+                    // delete, line 43
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 45
+                    if (!(in_grouping_b(g_s_ending, 98, 122)))
+                    {
+                        return false;
+                    }
+                    // delete, line 45
+                    slice_del();
+                    break;
+                
+                case 3: 
+                    // (, line 47
+                    // <-, line 47
+                    slice_from("er");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_consonant_pair()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 51
+            // test, line 52
+            v_1 = limit - cursor;
+            // (, line 52
+            // setlimit, line 53
+            v_2 = limit - cursor;
+            // tomark, line 53
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_3 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_2;
+            // (, line 53
+            // [, line 53
+            ket = cursor;
+            // substring, line 53
+            if (find_among_b(a_1, 2) == 0)
+            {
+                limit_backward = v_3;
+                return false;
+            }
+            // ], line 53
+            bra = cursor;
+            limit_backward = v_3;
+            cursor = limit - v_1;
+            // next, line 58
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // ], line 58
+            bra = cursor;
+            // delete, line 58
+            slice_del();
+            return true;
+        }
+        
+        private bool r_other_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 61
+            // setlimit, line 62
+            v_1 = limit - cursor;
+            // tomark, line 62
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 62
+            // [, line 62
+            ket = cursor;
+            // substring, line 62
+            among_var = find_among_b(a_2, 11);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 62
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 66
+                    // delete, line 66
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 71
+            // do, line 73
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 73
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-		    	
-			cursor = v_1;
-			// backwards, line 74
-			limit_backward = cursor; cursor = limit;
-			// (, line 74
-			// do, line 75
-			v_2 = limit - cursor;
-			do 
-			{
-				// call main_suffix, line 75
-				if (!r_main_suffix())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+                
+            cursor = v_1;
+            // backwards, line 74
+            limit_backward = cursor; cursor = limit;
+            // (, line 74
+            // do, line 75
+            v_2 = limit - cursor;
+            do 
+            {
+                // call main_suffix, line 75
+                if (!r_main_suffix())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 76
-			v_3 = limit - cursor;
-			do 
-			{
-				// call consonant_pair, line 76
-				if (!r_consonant_pair())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_2;
+            // do, line 76
+            v_3 = limit - cursor;
+            do 
+            {
+                // call consonant_pair, line 76
+                if (!r_consonant_pair())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 77
-			v_4 = limit - cursor;
-			do 
-			{
-				// call other_suffix, line 77
-				if (!r_other_suffix())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            // do, line 77
+            v_4 = limit - cursor;
+            do 
+            {
+                // call other_suffix, line 77
+                if (!r_other_suffix())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = limit - v_4;
-			cursor = limit_backward; return true;
-		}
-	}
+            
+            cursor = limit - v_4;
+            cursor = limit_backward; return true;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/PorterStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/PorterStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/PorterStemmer.cs
index ca04689..5cdcb11 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/PorterStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/PorterStemmer.cs
@@ -24,1063 +24,1063 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
 
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class PorterStemmer : SnowballProgram
-	{
-		public PorterStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("s", - 1, 3, "", this), new Among("ies", 0, 2, "", this), new Among("sses", 0, 1, "", this), new Among("ss", 0, - 1, "", this)};
-			a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("bb", 0, 2, "", this), new Among("dd", 0, 2, "", this), new Among("ff", 0, 2, "", this), new Among("gg", 0, 2, "", this), new Among("bl", 0, 1, "", this), new Among("mm", 0, 2, "", this), new Among("nn", 0, 2, "", this), new Among("pp", 0, 2, "", this), new Among("rr", 0, 2, "", this), new Among("at", 0, 1, "", this), new Among("tt", 0, 2, "", this), new Among("iz", 0, 1, "", this)};
-			a_2 = new Among[]{new Among("ed", - 1, 2, "", this), new Among("eed", 0, 1, "", this), new Among("ing", - 1, 2, "", this)};
-			a_3 = new Among[]{new Among("anci", - 1, 3, "", this), new Among("enci", - 1, 2, "", this), new Among("abli", - 1, 4, "", this), new Among("eli", - 1, 6, "", this), new Among("alli", - 1, 9, "", this), new Among("ousli", - 1, 12, "", this), new Among("entli", - 1, 5, "", this), new Among("aliti", - 1, 10, "", this), new Among("biliti", - 1, 14, "", this), new Among("iviti", - 1, 13, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 10, 8, "", this), new Among("alism", - 1, 10, "", this), new Among("ation", - 1, 8, "", this), new Among("ization", 13, 7, "", this), new Among("izer", - 1, 7, "", this), new Among("ator", - 1, 8, "", this), new Among("iveness", - 1, 13, "", this), new Among("fulness", - 1, 11, "", this), new Among("ousness", - 1, 12, "", this)};
-			a_4 = new Among[]{new Among("icate", - 1, 2, "", this), new Among("ative", - 1, 3, "", this), new Among("alize", - 1, 1, "", this), new Among("iciti", - 1, 2, "", this), new Among("ical", - 1, 2, "", this), new Among("ful", - 1, 3, "", this), new Among("ness", - 1, 3, "", this)};
-			a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 1, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("ive", - 1, 1, "", this), new Among("ize", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("al", - 1, 1, "", this), new Among("ism", - 1, 1, "", this), new Among("ion", - 1, 2, "", this), new Among("er", - 1, 1, "", this), new Among("ous", - 1, 1, "", this), new Among("ant", - 1, 1, "", this), new Among("ent", - 1, 1, "", this), new Among("ment", 15, 1, "", this), new Among("ement", 16, 1, "", this), new Among("ou", - 1, 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1)};
-		private static readonly char[] g_v_WXY = new char[]{(char) (1), (char) (17), (char) (65), (char) (208), (char) (1)};
-		
-		private bool B_Y_found;
-		private int I_p2;
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(PorterStemmer other)
-		{
-			B_Y_found = other.B_Y_found;
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_shortv()
-		{
-			// (, line 19
-			if (!(out_grouping_b(g_v_WXY, 89, 121)))
-			{
-				return false;
-			}
-			if (!(in_grouping_b(g_v, 97, 121)))
-			{
-				return false;
-			}
-			if (!(out_grouping_b(g_v, 97, 121)))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_Step_1a()
-		{
-			int among_var;
-			// (, line 24
-			// [, line 25
-			ket = cursor;
-			// substring, line 25
-			among_var = find_among_b(a_0, 4);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 25
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 26
-					// <-, line 26
-					slice_from("ss");
-					break;
-				
-				case 2: 
-					// (, line 27
-					// <-, line 27
-					slice_from("i");
-					break;
-				
-				case 3: 
-					// (, line 29
-					// delete, line 29
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_1b()
-		{
-			int among_var;
-			int v_1;
-			int v_3;
-			int v_4;
-			// (, line 33
-			// [, line 34
-			ket = cursor;
-			// substring, line 34
-			among_var = find_among_b(a_2, 3);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 34
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 35
-					// call R1, line 35
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 35
-					slice_from("ee");
-					break;
-				
-				case 2: 
-					// (, line 37
-					// test, line 38
-					v_1 = limit - cursor;
-					// gopast, line 38
-					while (true)
-					{
-						do 
-						{
-							if (!(in_grouping_b(g_v, 97, 121)))
-							{
-								goto lab1_brk;
-							}
-							goto golab0_brk;
-						}
-						while (false);
+    public class PorterStemmer : SnowballProgram
+    {
+        public PorterStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("s", - 1, 3, "", this), new Among("ies", 0, 2, "", this), new Among("sses", 0, 1, "", this), new Among("ss", 0, - 1, "", this)};
+            a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("bb", 0, 2, "", this), new Among("dd", 0, 2, "", this), new Among("ff", 0, 2, "", this), new Among("gg", 0, 2, "", this), new Among("bl", 0, 1, "", this), new Among("mm", 0, 2, "", this), new Among("nn", 0, 2, "", this), new Among("pp", 0, 2, "", this), new Among("rr", 0, 2, "", this), new Among("at", 0, 1, "", this), new Among("tt", 0, 2, "", this), new Among("iz", 0, 1, "", this)};
+            a_2 = new Among[]{new Among("ed", - 1, 2, "", this), new Among("eed", 0, 1, "", this), new Among("ing", - 1, 2, "", this)};
+            a_3 = new Among[]{new Among("anci", - 1, 3, "", this), new Among("enci", - 1, 2, "", this), new Among("abli", - 1, 4, "", this), new Among("eli", - 1, 6, "", this), new Among("alli", - 1, 9, "", this), new Among("ousli", - 1, 12, "", this), new Among("entli", - 1, 5, "", this), new Among("aliti", - 1, 10, "", this), new Among("biliti", - 1, 14, "", this), new Among("iviti", - 1, 13, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 10, 8, "", this), new Among("alism", - 1, 10, "", this), new Among("ation", - 1, 8, "", this), new Among("ization", 13, 7, "", this), new Among("izer", - 1, 7, "", this), new Among("ator", - 1, 8, "", this), new Among("iveness", - 1, 13, "", this), new Among("fulness", - 1, 11, "", this), new Among("ousness", - 1, 12, "", this)};
+            a_4 = new Among[]{new Among("icate", - 1, 2, "", this), new Among("ative", - 1, 3, "", this), new Among("alize", - 1, 1, "", this), new Among("iciti", - 1, 2, "", this), new Among("ical", - 1, 2, "", this), new Among("ful", - 1, 3, "", this), new Among("ness", - 1, 3, "", this)};
+            a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 1, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("ive", - 1, 1, "", this), new Among("ize", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("al", - 1, 1, "", this), new Among("ism", - 1, 1, "", this), new Among("ion", - 1, 2, "", this), new Among("er", - 1, 1, "", this), new Among("ous", - 1, 1, "", this), new Among("ant", - 1, 1, "", this), new Among("ent", - 1, 1, "", this), new Among("ment", 15, 1, "", this), new Among("ement", 16, 1, "", this), new Among("ou", - 1, 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1)};
+        private static readonly char[] g_v_WXY = new char[]{(char) (1), (char) (17), (char) (65), (char) (208), (char) (1)};
+        
+        private bool B_Y_found;
+        private int I_p2;
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(PorterStemmer other)
+        {
+            B_Y_found = other.B_Y_found;
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_shortv()
+        {
+            // (, line 19
+            if (!(out_grouping_b(g_v_WXY, 89, 121)))
+            {
+                return false;
+            }
+            if (!(in_grouping_b(g_v, 97, 121)))
+            {
+                return false;
+            }
+            if (!(out_grouping_b(g_v, 97, 121)))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_Step_1a()
+        {
+            int among_var;
+            // (, line 24
+            // [, line 25
+            ket = cursor;
+            // substring, line 25
+            among_var = find_among_b(a_0, 4);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 25
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 26
+                    // <-, line 26
+                    slice_from("ss");
+                    break;
+                
+                case 2: 
+                    // (, line 27
+                    // <-, line 27
+                    slice_from("i");
+                    break;
+                
+                case 3: 
+                    // (, line 29
+                    // delete, line 29
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_1b()
+        {
+            int among_var;
+            int v_1;
+            int v_3;
+            int v_4;
+            // (, line 33
+            // [, line 34
+            ket = cursor;
+            // substring, line 34
+            among_var = find_among_b(a_2, 3);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 34
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 35
+                    // call R1, line 35
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 35
+                    slice_from("ee");
+                    break;
+                
+                case 2: 
+                    // (, line 37
+                    // test, line 38
+                    v_1 = limit - cursor;
+                    // gopast, line 38
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(in_grouping_b(g_v, 97, 121)))
+                            {
+                                goto lab1_brk;
+                            }
+                            goto golab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						if (cursor <= limit_backward)
-						{
-							return false;
-						}
-						cursor--;
-					}
+                        
+                        if (cursor <= limit_backward)
+                        {
+                            return false;
+                        }
+                        cursor--;
+                    }
 
 golab0_brk: ;
-					
-					cursor = limit - v_1;
-					// delete, line 38
-					slice_del();
-					// test, line 39
-					v_3 = limit - cursor;
-					// substring, line 39
-					among_var = find_among_b(a_1, 13);
-					if (among_var == 0)
-					{
-						return false;
-					}
-					cursor = limit - v_3;
-					switch (among_var)
-					{
-						
-						case 0: 
-							return false;
-						
-						case 1: 
-							// (, line 41
-							// <+, line 41
-							{
-								int c = cursor;
-								insert(cursor, cursor, "e");
-								cursor = c;
-							}
-							break;
-						
-						case 2: 
-							// (, line 44
-							// [, line 44
-							ket = cursor;
-							// next, line 44
-							if (cursor <= limit_backward)
-							{
-								return false;
-							}
-							cursor--;
-							// ], line 44
-							bra = cursor;
-							// delete, line 44
-							slice_del();
-							break;
-						
-						case 3: 
-							// (, line 45
-							// atmark, line 45
-							if (cursor != I_p1)
-							{
-								return false;
-							}
-							// test, line 45
-							v_4 = limit - cursor;
-							// call shortv, line 45
-							if (!r_shortv())
-							{
-								return false;
-							}
-							cursor = limit - v_4;
-							// <+, line 45
-							{
-								int c = cursor;
-								insert(cursor, cursor, "e");
-								cursor = c;
-							}
-							break;
-						}
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_1c()
-		{
-			int v_1;
-			// (, line 51
-			// [, line 52
-			ket = cursor;
-			// or, line 52
-			do 
-			{
-				v_1 = limit - cursor;
-				do 
-				{
-					// literal, line 52
-					if (!(eq_s_b(1, "y")))
-					{
-						goto lab2_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                    
+                    cursor = limit - v_1;
+                    // delete, line 38
+                    slice_del();
+                    // test, line 39
+                    v_3 = limit - cursor;
+                    // substring, line 39
+                    among_var = find_among_b(a_1, 13);
+                    if (among_var == 0)
+                    {
+                        return false;
+                    }
+                    cursor = limit - v_3;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            return false;
+                        
+                        case 1: 
+                            // (, line 41
+                            // <+, line 41
+                            {
+                                int c = cursor;
+                                insert(cursor, cursor, "e");
+                                cursor = c;
+                            }
+                            break;
+                        
+                        case 2: 
+                            // (, line 44
+                            // [, line 44
+                            ket = cursor;
+                            // next, line 44
+                            if (cursor <= limit_backward)
+                            {
+                                return false;
+                            }
+                            cursor--;
+                            // ], line 44
+                            bra = cursor;
+                            // delete, line 44
+                            slice_del();
+                            break;
+                        
+                        case 3: 
+                            // (, line 45
+                            // atmark, line 45
+                            if (cursor != I_p1)
+                            {
+                                return false;
+                            }
+                            // test, line 45
+                            v_4 = limit - cursor;
+                            // call shortv, line 45
+                            if (!r_shortv())
+                            {
+                                return false;
+                            }
+                            cursor = limit - v_4;
+                            // <+, line 45
+                            {
+                                int c = cursor;
+                                insert(cursor, cursor, "e");
+                                cursor = c;
+                            }
+                            break;
+                        }
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_1c()
+        {
+            int v_1;
+            // (, line 51
+            // [, line 52
+            ket = cursor;
+            // or, line 52
+            do 
+            {
+                v_1 = limit - cursor;
+                do 
+                {
+                    // literal, line 52
+                    if (!(eq_s_b(1, "y")))
+                    {
+                        goto lab2_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_1;
-				// literal, line 52
-				if (!(eq_s_b(1, "Y")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_1;
+                // literal, line 52
+                if (!(eq_s_b(1, "Y")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			// ], line 52
-			bra = cursor;
-			// gopast, line 53
-			while (true)
-			{
-				do 
-				{
-					if (!(in_grouping_b(g_v, 97, 121)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // ], line 52
+            bra = cursor;
+            // gopast, line 53
+            while (true)
+            {
+                do 
+                {
+                    if (!(in_grouping_b(g_v, 97, 121)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor <= limit_backward)
-				{
-					return false;
-				}
-				cursor--;
-			}
+                
+                if (cursor <= limit_backward)
+                {
+                    return false;
+                }
+                cursor--;
+            }
 
 golab2_brk: ;
-			
-			// <-, line 54
-			slice_from("i");
-			return true;
-		}
-		
-		private bool r_Step_2()
-		{
-			int among_var;
-			// (, line 57
-			// [, line 58
-			ket = cursor;
-			// substring, line 58
-			among_var = find_among_b(a_3, 20);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 58
-			bra = cursor;
-			// call R1, line 58
-			if (!r_R1())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 59
-					// <-, line 59
-					slice_from("tion");
-					break;
-				
-				case 2: 
-					// (, line 60
-					// <-, line 60
-					slice_from("ence");
-					break;
-				
-				case 3: 
-					// (, line 61
-					// <-, line 61
-					slice_from("ance");
-					break;
-				
-				case 4: 
-					// (, line 62
-					// <-, line 62
-					slice_from("able");
-					break;
-				
-				case 5: 
-					// (, line 63
-					// <-, line 63
-					slice_from("ent");
-					break;
-				
-				case 6: 
-					// (, line 64
-					// <-, line 64
-					slice_from("e");
-					break;
-				
-				case 7: 
-					// (, line 66
-					// <-, line 66
-					slice_from("ize");
-					break;
-				
-				case 8: 
-					// (, line 68
-					// <-, line 68
-					slice_from("ate");
-					break;
-				
-				case 9: 
-					// (, line 69
-					// <-, line 69
-					slice_from("al");
-					break;
-				
-				case 10: 
-					// (, line 71
-					// <-, line 71
-					slice_from("al");
-					break;
-				
-				case 11: 
-					// (, line 72
-					// <-, line 72
-					slice_from("ful");
-					break;
-				
-				case 12: 
-					// (, line 74
-					// <-, line 74
-					slice_from("ous");
-					break;
-				
-				case 13: 
-					// (, line 76
-					// <-, line 76
-					slice_from("ive");
-					break;
-				
-				case 14: 
-					// (, line 77
-					// <-, line 77
-					slice_from("ble");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_3()
-		{
-			int among_var;
-			// (, line 81
-			// [, line 82
-			ket = cursor;
-			// substring, line 82
-			among_var = find_among_b(a_4, 7);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 82
-			bra = cursor;
-			// call R1, line 82
-			if (!r_R1())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 83
-					// <-, line 83
-					slice_from("al");
-					break;
-				
-				case 2: 
-					// (, line 85
-					// <-, line 85
-					slice_from("ic");
-					break;
-				
-				case 3: 
-					// (, line 87
-					// delete, line 87
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_4()
-		{
-			int among_var;
-			int v_1;
-			// (, line 91
-			// [, line 92
-			ket = cursor;
-			// substring, line 92
-			among_var = find_among_b(a_5, 19);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 92
-			bra = cursor;
-			// call R2, line 92
-			if (!r_R2())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 95
-					// delete, line 95
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 96
-					// or, line 96
+            
+            // <-, line 54
+            slice_from("i");
+            return true;
+        }
+        
+        private bool r_Step_2()
+        {
+            int among_var;
+            // (, line 57
+            // [, line 58
+            ket = cursor;
+            // substring, line 58
+            among_var = find_among_b(a_3, 20);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 58
+            bra = cursor;
+            // call R1, line 58
+            if (!r_R1())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 59
+                    // <-, line 59
+                    slice_from("tion");
+                    break;
+                
+                case 2: 
+                    // (, line 60
+                    // <-, line 60
+                    slice_from("ence");
+                    break;
+                
+                case 3: 
+                    // (, line 61
+                    // <-, line 61
+                    slice_from("ance");
+                    break;
+                
+                case 4: 
+                    // (, line 62
+                    // <-, line 62
+                    slice_from("able");
+                    break;
+                
+                case 5: 
+                    // (, line 63
+                    // <-, line 63
+                    slice_from("ent");
+                    break;
+                
+                case 6: 
+                    // (, line 64
+                    // <-, line 64
+                    slice_from("e");
+                    break;
+                
+                case 7: 
+                    // (, line 66
+                    // <-, line 66
+                    slice_from("ize");
+                    break;
+                
+                case 8: 
+                    // (, line 68
+                    // <-, line 68
+                    slice_from("ate");
+                    break;
+                
+                case 9: 
+                    // (, line 69
+                    // <-, line 69
+                    slice_from("al");
+                    break;
+                
+                case 10: 
+                    // (, line 71
+                    // <-, line 71
+                    slice_from("al");
+                    break;
+                
+                case 11: 
+                    // (, line 72
+                    // <-, line 72
+                    slice_from("ful");
+                    break;
+                
+                case 12: 
+                    // (, line 74
+                    // <-, line 74
+                    slice_from("ous");
+                    break;
+                
+                case 13: 
+                    // (, line 76
+                    // <-, line 76
+                    slice_from("ive");
+                    break;
+                
+                case 14: 
+                    // (, line 77
+                    // <-, line 77
+                    slice_from("ble");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_3()
+        {
+            int among_var;
+            // (, line 81
+            // [, line 82
+            ket = cursor;
+            // substring, line 82
+            among_var = find_among_b(a_4, 7);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 82
+            bra = cursor;
+            // call R1, line 82
+            if (!r_R1())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 83
+                    // <-, line 83
+                    slice_from("al");
+                    break;
+                
+                case 2: 
+                    // (, line 85
+                    // <-, line 85
+                    slice_from("ic");
+                    break;
+                
+                case 3: 
+                    // (, line 87
+                    // delete, line 87
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_4()
+        {
+            int among_var;
+            int v_1;
+            // (, line 91
+            // [, line 92
+            ket = cursor;
+            // substring, line 92
+            among_var = find_among_b(a_5, 19);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 92
+            bra = cursor;
+            // call R2, line 92
+            if (!r_R2())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 95
+                    // delete, line 95
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 96
+                    // or, line 96
 lab2: 
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// literal, line 96
-							if (!(eq_s_b(1, "s")))
-							{
-								goto lab2_brk;
-							}
-							goto lab2_brk;
-						}
-						while (false);
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 96
+                            if (!(eq_s_b(1, "s")))
+                            {
+                                goto lab2_brk;
+                            }
+                            goto lab2_brk;
+                        }
+                        while (false);
 
 lab2_brk: ;
-						
-						cursor = limit - v_1;
-						// literal, line 96
-						if (!(eq_s_b(1, "t")))
-						{
-							return false;
-						}
-					}
-					while (false);
-					// delete, line 96
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_5a()
-		{
-			int v_1;
-			int v_2;
-			// (, line 100
-			// [, line 101
-			ket = cursor;
-			// literal, line 101
-			if (!(eq_s_b(1, "e")))
-			{
-				return false;
-			}
-			// ], line 101
-			bra = cursor;
-			// or, line 102
-			do 
-			{
-				v_1 = limit - cursor;
-				do 
-				{
-					// call R2, line 102
-					if (!r_R2())
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                        
+                        cursor = limit - v_1;
+                        // literal, line 96
+                        if (!(eq_s_b(1, "t")))
+                        {
+                            return false;
+                        }
+                    }
+                    while (false);
+                    // delete, line 96
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_5a()
+        {
+            int v_1;
+            int v_2;
+            // (, line 100
+            // [, line 101
+            ket = cursor;
+            // literal, line 101
+            if (!(eq_s_b(1, "e")))
+            {
+                return false;
+            }
+            // ], line 101
+            bra = cursor;
+            // or, line 102
+            do 
+            {
+                v_1 = limit - cursor;
+                do 
+                {
+                    // call R2, line 102
+                    if (!r_R2())
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_1;
-				// (, line 102
-				// call R1, line 102
-				if (!r_R1())
-				{
-					return false;
-				}
-				// not, line 102
-				{
-					v_2 = limit - cursor;
-					do 
-					{
-						// call shortv, line 102
-						if (!r_shortv())
-						{
-							goto lab2_brk;
-						}
-						return false;
-					}
-					while (false);
+                
+                cursor = limit - v_1;
+                // (, line 102
+                // call R1, line 102
+                if (!r_R1())
+                {
+                    return false;
+                }
+                // not, line 102
+                {
+                    v_2 = limit - cursor;
+                    do 
+                    {
+                        // call shortv, line 102
+                        if (!r_shortv())
+                        {
+                            goto lab2_brk;
+                        }
+                        return false;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = limit - v_2;
-				}
-			}
-			while (false);
+                    
+                    cursor = limit - v_2;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			// delete, line 103
-			slice_del();
-			return true;
-		}
-		
-		private bool r_Step_5b()
-		{
-			// (, line 106
-			// [, line 107
-			ket = cursor;
-			// literal, line 107
-			if (!(eq_s_b(1, "l")))
-			{
-				return false;
-			}
-			// ], line 107
-			bra = cursor;
-			// call R2, line 108
-			if (!r_R2())
-			{
-				return false;
-			}
-			// literal, line 108
-			if (!(eq_s_b(1, "l")))
-			{
-				return false;
-			}
-			// delete, line 109
-			slice_del();
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_10;
-			int v_11;
-			int v_12;
-			int v_13;
-			int v_14;
-			int v_15;
-			int v_16;
-			int v_17;
-			int v_18;
-			int v_19;
-			int v_20;
-			// (, line 113
-			// unset Y_found, line 115
-			B_Y_found = false;
-			// do, line 116
-			v_1 = cursor;
-			do 
-			{
-				// (, line 116
-				// [, line 116
-				bra = cursor;
-				// literal, line 116
-				if (!(eq_s(1, "y")))
-				{
-					goto lab0_brk;
-				}
-				// ], line 116
-				ket = cursor;
-				// <-, line 116
-				slice_from("Y");
-				// set Y_found, line 116
-				B_Y_found = true;
-			}
-			while (false);
+            // delete, line 103
+            slice_del();
+            return true;
+        }
+        
+        private bool r_Step_5b()
+        {
+            // (, line 106
+            // [, line 107
+            ket = cursor;
+            // literal, line 107
+            if (!(eq_s_b(1, "l")))
+            {
+                return false;
+            }
+            // ], line 107
+            bra = cursor;
+            // call R2, line 108
+            if (!r_R2())
+            {
+                return false;
+            }
+            // literal, line 108
+            if (!(eq_s_b(1, "l")))
+            {
+                return false;
+            }
+            // delete, line 109
+            slice_del();
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_10;
+            int v_11;
+            int v_12;
+            int v_13;
+            int v_14;
+            int v_15;
+            int v_16;
+            int v_17;
+            int v_18;
+            int v_19;
+            int v_20;
+            // (, line 113
+            // unset Y_found, line 115
+            B_Y_found = false;
+            // do, line 116
+            v_1 = cursor;
+            do 
+            {
+                // (, line 116
+                // [, line 116
+                bra = cursor;
+                // literal, line 116
+                if (!(eq_s(1, "y")))
+                {
+                    goto lab0_brk;
+                }
+                // ], line 116
+                ket = cursor;
+                // <-, line 116
+                slice_from("Y");
+                // set Y_found, line 116
+                B_Y_found = true;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 117
-			v_2 = cursor;
-			do 
-			{
-				// repeat, line 117
-				while (true)
-				{
-					v_3 = cursor;
-					do 
-					{
-						// (, line 117
-						// goto, line 117
-						while (true)
-						{
-							v_4 = cursor;
-							do 
-							{
-								// (, line 117
-								if (!(in_grouping(g_v, 97, 121)))
-								{
-									goto lab5_brk;
-								}
-								// [, line 117
-								bra = cursor;
-								// literal, line 117
-								if (!(eq_s(1, "y")))
-								{
-									goto lab5_brk;
-								}
-								// ], line 117
-								ket = cursor;
-								cursor = v_4;
-								goto golab4_brk;
-							}
-							while (false);
+            
+            cursor = v_1;
+            // do, line 117
+            v_2 = cursor;
+            do 
+            {
+                // repeat, line 117
+                while (true)
+                {
+                    v_3 = cursor;
+                    do 
+                    {
+                        // (, line 117
+                        // goto, line 117
+                        while (true)
+                        {
+                            v_4 = cursor;
+                            do 
+                            {
+                                // (, line 117
+                                if (!(in_grouping(g_v, 97, 121)))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // [, line 117
+                                bra = cursor;
+                                // literal, line 117
+                                if (!(eq_s(1, "y")))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // ], line 117
+                                ket = cursor;
+                                cursor = v_4;
+                                goto golab4_brk;
+                            }
+                            while (false);
 
 lab5_brk: ;
-							
-							cursor = v_4;
-							if (cursor >= limit)
-							{
-								goto lab3_brk;
-							}
-							cursor++;
-						}
+                            
+                            cursor = v_4;
+                            if (cursor >= limit)
+                            {
+                                goto lab3_brk;
+                            }
+                            cursor++;
+                        }
 
 golab4_brk: ;
-						
-						// <-, line 117
-						slice_from("Y");
-						// set Y_found, line 117
-						B_Y_found = true;
-						goto replab2;
-					}
-					while (false);
+                        
+                        // <-, line 117
+                        slice_from("Y");
+                        // set Y_found, line 117
+                        B_Y_found = true;
+                        goto replab2;
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					cursor = v_3;
-					goto replab2_brk;
+                    
+                    cursor = v_3;
+                    goto replab2_brk;
 
 replab2: ;
-				}
+                }
 
 replab2_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab1_brk: ;
 
-			cursor = v_2;
-			I_p1 = limit;
-			I_p2 = limit;
-			// do, line 121
-			v_5 = cursor;
-			do 
-			{
-				// (, line 121
-				// gopast, line 122
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 121)))
-						{
-							goto lab8_brk;
-						}
-						goto golab7_brk;
-					}
-					while (false);
+            cursor = v_2;
+            I_p1 = limit;
+            I_p2 = limit;
+            // do, line 121
+            v_5 = cursor;
+            do 
+            {
+                // (, line 121
+                // gopast, line 122
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 121)))
+                        {
+                            goto lab8_brk;
+                        }
+                        goto golab7_brk;
+                    }
+                    while (false);
 
 lab8_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab6_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab6_brk;
+                    }
+                    cursor++;
+                }
 
 golab7_brk: ;
-				
-				// gopast, line 122
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 121)))
-						{
-							goto lab10_brk;
-						}
-						goto golab9_brk;
-					}
-					while (false);
+                
+                // gopast, line 122
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 121)))
+                        {
+                            goto lab10_brk;
+                        }
+                        goto golab9_brk;
+                    }
+                    while (false);
 
 lab10_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab6_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab6_brk;
+                    }
+                    cursor++;
+                }
 
 golab9_brk: ;
-				
-				// setmark p1, line 122
-				I_p1 = cursor;
-				// gopast, line 123
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 121)))
-						{
-							goto lab12_brk;
-						}
-						goto golab11_brk;
-					}
-					while (false);
+                
+                // setmark p1, line 122
+                I_p1 = cursor;
+                // gopast, line 123
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 121)))
+                        {
+                            goto lab12_brk;
+                        }
+                        goto golab11_brk;
+                    }
+                    while (false);
 
 lab12_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab6_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab6_brk;
+                    }
+                    cursor++;
+                }
 
 golab11_brk: ;
-				
-				// gopast, line 123
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 121)))
-						{
-							goto lab14_brk;
-						}
-						goto golab13_brk;
-					}
-					while (false);
+                
+                // gopast, line 123
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 121)))
+                        {
+                            goto lab14_brk;
+                        }
+                        goto golab13_brk;
+                    }
+                    while (false);
 
 lab14_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab6_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab6_brk;
+                    }
+                    cursor++;
+                }
 
 golab13_brk: ;
-				
-				// setmark p2, line 123
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 123
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab6_brk: ;
-			
-			cursor = v_5;
-			// backwards, line 126
-			limit_backward = cursor; cursor = limit;
-			// (, line 126
-			// do, line 127
-			v_10 = limit - cursor;
-			do 
-			{
-				// call Step_1a, line 127
-				if (!r_Step_1a())
-				{
-					goto lab15_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_5;
+            // backwards, line 126
+            limit_backward = cursor; cursor = limit;
+            // (, line 126
+            // do, line 127
+            v_10 = limit - cursor;
+            do 
+            {
+                // call Step_1a, line 127
+                if (!r_Step_1a())
+                {
+                    goto lab15_brk;
+                }
+            }
+            while (false);
 
 lab15_brk: ;
-			
-			cursor = limit - v_10;
-			// do, line 128
-			v_11 = limit - cursor;
-			do 
-			{
-				// call Step_1b, line 128
-				if (!r_Step_1b())
-				{
-					goto lab16_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_10;
+            // do, line 128
+            v_11 = limit - cursor;
+            do 
+            {
+                // call Step_1b, line 128
+                if (!r_Step_1b())
+                {
+                    goto lab16_brk;
+                }
+            }
+            while (false);
 
 lab16_brk: ;
-			
-			cursor = limit - v_11;
-			// do, line 129
-			v_12 = limit - cursor;
-			do 
-			{
-				// call Step_1c, line 129
-				if (!r_Step_1c())
-				{
-					goto lab17_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_11;
+            // do, line 129
+            v_12 = limit - cursor;
+            do 
+            {
+                // call Step_1c, line 129
+                if (!r_Step_1c())
+                {
+                    goto lab17_brk;
+                }
+            }
+            while (false);
 
 lab17_brk: ;
-			
-			cursor = limit - v_12;
-			// do, line 130
-			v_13 = limit - cursor;
-			do 
-			{
-				// call Step_2, line 130
-				if (!r_Step_2())
-				{
-					goto lab18_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_12;
+            // do, line 130
+            v_13 = limit - cursor;
+            do 
+            {
+                // call Step_2, line 130
+                if (!r_Step_2())
+                {
+                    goto lab18_brk;
+                }
+            }
+            while (false);
 
 lab18_brk: ;
-			
-			cursor = limit - v_13;
-			// do, line 131
-			v_14 = limit - cursor;
-			do 
-			{
-				// call Step_3, line 131
-				if (!r_Step_3())
-				{
-					goto lab19_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_13;
+            // do, line 131
+            v_14 = limit - cursor;
+            do 
+            {
+                // call Step_3, line 131
+                if (!r_Step_3())
+                {
+                    goto lab19_brk;
+                }
+            }
+            while (false);
 
 lab19_brk: ;
-			
-			cursor = limit - v_14;
-			// do, line 132
-			v_15 = limit - cursor;
-			do 
-			{
-				// call Step_4, line 132
-				if (!r_Step_4())
-				{
-					goto lab20_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_14;
+            // do, line 132
+            v_15 = limit - cursor;
+            do 
+            {
+                // call Step_4, line 132
+                if (!r_Step_4())
+                {
+                    goto lab20_brk;
+                }
+            }
+            while (false);
 
 lab20_brk: ;
-			
-			cursor = limit - v_15;
-			// do, line 133
-			v_16 = limit - cursor;
-			do 
-			{
-				// call Step_5a, line 133
-				if (!r_Step_5a())
-				{
-					goto lab21_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_15;
+            // do, line 133
+            v_16 = limit - cursor;
+            do 
+            {
+                // call Step_5a, line 133
+                if (!r_Step_5a())
+                {
+                    goto lab21_brk;
+                }
+            }
+            while (false);
 
 lab21_brk: ;
-			
-			cursor = limit - v_16;
-			// do, line 134
-			v_17 = limit - cursor;
-			do 
-			{
-				// call Step_5b, line 134
-				if (!r_Step_5b())
-				{
-					goto lab22_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_16;
+            // do, line 134
+            v_17 = limit - cursor;
+            do 
+            {
+                // call Step_5b, line 134
+                if (!r_Step_5b())
+                {
+                    goto lab22_brk;
+                }
+            }
+            while (false);
 
 lab22_brk: ;
-			
-			cursor = limit - v_17;
-			cursor = limit_backward; // do, line 137
-			v_18 = cursor;
-			do 
-			{
-				// (, line 137
-				// Boolean test Y_found, line 137
-				if (!(B_Y_found))
-				{
-					goto lab23_brk;
-				}
-				// repeat, line 137
-				while (true)
-				{
-					v_19 = cursor;
-					do 
-					{
-						// (, line 137
-						// goto, line 137
-						while (true)
-						{
-							v_20 = cursor;
-							do 
-							{
-								// (, line 137
-								// [, line 137
-								bra = cursor;
-								// literal, line 137
-								if (!(eq_s(1, "Y")))
-								{
-									goto lab27_brk;
-								}
-								// ], line 137
-								ket = cursor;
-								cursor = v_20;
-								goto golab26_brk;
-							}
-							while (false);
+            
+            cursor = limit - v_17;
+            cursor = limit_backward; // do, line 137
+            v_18 = cursor;
+            do 
+            {
+                // (, line 137
+                // Boolean test Y_found, line 137
+                if (!(B_Y_found))
+                {
+                    goto lab23_brk;
+                }
+                // repeat, line 137
+                while (true)
+                {
+                    v_19 = cursor;
+                    do 
+                    {
+                        // (, line 137
+                        // goto, line 137
+                        while (true)
+                        {
+                            v_20 = cursor;
+                            do 
+                            {
+                                // (, line 137
+                                // [, line 137
+                                bra = cursor;
+                                // literal, line 137
+                                if (!(eq_s(1, "Y")))
+                                {
+                                    goto lab27_brk;
+                                }
+                                // ], line 137
+                                ket = cursor;
+                                cursor = v_20;
+                                goto golab26_brk;
+                            }
+                            while (false);
 
 lab27_brk: ;
-							
-							cursor = v_20;
-							if (cursor >= limit)
-							{
-								goto lab25_brk;
-							}
-							cursor++;
-						}
+                            
+                            cursor = v_20;
+                            if (cursor >= limit)
+                            {
+                                goto lab25_brk;
+                            }
+                            cursor++;
+                        }
 
 golab26_brk: ;
-						
-						// <-, line 137
-						slice_from("y");
-						goto replab24;
-					}
-					while (false);
+                        
+                        // <-, line 137
+                        slice_from("y");
+                        goto replab24;
+                    }
+                    while (false);
 
 lab25_brk: ;
-					
-					cursor = v_19;
-					goto replab24_brk;
+                    
+                    cursor = v_19;
+                    goto replab24_brk;
 
 replab24: ;
-				}
+                }
 
 replab24_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab23_brk: ;
-			
-			cursor = v_18;
-			return true;
-		}
-	}
+            
+            cursor = v_18;
+            return true;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
index ac9302f..6ba552b 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/PortugueseStemmer.cs
@@ -58,8 +58,8 @@ namespace SF.Snowball.Ext
 {
 
     /*
-	 * Generated class implementing code defined by a snowball script.
-	 */
+     * Generated class implementing code defined by a snowball script.
+     */
     public class PortugueseStemmer : SnowballProgram
     {
         public PortugueseStemmer()
@@ -70,223 +70,223 @@ namespace SF.Snowball.Ext
         void Init()
         {
             a_0 = new Among[] {
-				new Among("", -1, 3, "",null),
-				new Among("\u00E3", 0, 1, "",null),
-				new Among("\u00F5", 0, 2, "",null)
-			};
+                new Among("", -1, 3, "",null),
+                new Among("\u00E3", 0, 1, "",null),
+                new Among("\u00F5", 0, 2, "",null)
+            };
 
             a_1 = new Among[] {
-				new Among("", -1, 3, "", null),
-				new Among("a~", 0, 1, "", null),
-				new Among("o~", 0, 2, "", null)
-			};
+                new Among("", -1, 3, "", null),
+                new Among("a~", 0, 1, "", null),
+                new Among("o~", 0, 2, "", null)
+            };
 
             a_2 = new Among[] {
-				new Among("ic", -1, -1, "", null),
-				new Among("ad", -1, -1, "", null),
-				new Among("os", -1, -1, "", null),
-				new Among("iv", -1, 1, "", null)
-			};
+                new Among("ic", -1, -1, "", null),
+                new Among("ad", -1, -1, "", null),
+                new Among("os", -1, -1, "", null),
+                new Among("iv", -1, 1, "", null)
+            };
 
             a_3 = new Among[] {
-				new Among("ante", -1, 1, "", null),
-				new Among("avel", -1, 1, "", null),
-				new Among("\u00EDvel", -1, 1, "", null)
-			};
+                new Among("ante", -1, 1, "", null),
+                new Among("avel", -1, 1, "", null),
+                new Among("\u00EDvel", -1, 1, "", null)
+            };
 
             a_4 = new Among[] {
-				new Among("ic", -1, 1, "", null),
-				new Among("abil", -1, 1, "", null),
-				new Among("iv", -1, 1, "", null)
-			};
+                new Among("ic", -1, 1, "", null),
+                new Among("abil", -1, 1, "", null),
+                new Among("iv", -1, 1, "", null)
+            };
 
             a_5 = new Among[] {
-				new Among("ica", -1, 1, "", null),
-				new Among("\u00E2ncia", -1, 1, "", null),
-				new Among("\u00EAncia", -1, 4, "", null),
-				new Among("ira", -1, 9, "", null),
-				new Among("adora", -1, 1, "", null),
-				new Among("osa", -1, 1, "", null),
-				new Among("ista", -1, 1, "", null),
-				new Among("iva", -1, 8, "", null),
-				new Among("eza", -1, 1, "", null),
-				new Among("log\u00EDa", -1, 2, "", null),
-				new Among("idade", -1, 7, "", null),
-				new Among("ante", -1, 1, "", null),
-				new Among("mente", -1, 6, "", null),
-				new Among("amente", 12, 5, "", null),
-				new Among("\u00E1vel", -1, 1, "", null),
-				new Among("\u00EDvel", -1, 1, "", null),
-				new Among("uci\u00F3n", -1, 3, "", null),
-				new Among("ico", -1, 1, "", null),
-				new Among("ismo", -1, 1, "", null),
-				new Among("oso", -1, 1, "", null),
-				new Among("amento", -1, 1, "", null),
-				new Among("imento", -1, 1, "", null),
-				new Among("ivo", -1, 8, "", null),
-				new Among("a\u00E7a~o", -1, 1, "", null),
-				new Among("ador", -1, 1, "", null),
-				new Among("icas", -1, 1, "", null),
-				new Among("\u00EAncias", -1, 4, "", null),
-				new Among("iras", -1, 9, "", null),
-				new Among("adoras", -1, 1, "", null),
-				new Among("osas", -1, 1, "", null),
-				new Among("istas", -1, 1, "", null),
-				new Among("ivas", -1, 8, "", null),
-				new Among("ezas", -1, 1, "", null),
-				new Among("log\u00EDas", -1, 2, "", null),
-				new Among("idades", -1, 7, "", null),
-				new Among("uciones", -1, 3, "", null),
-				new Among("adores", -1, 1, "", null),
-				new Among("antes", -1, 1, "", null),
-				new Among("a\u00E7o~es", -1, 1, "", null),
-				new Among("icos", -1, 1, "", null),
-				new Among("ismos", -1, 1, "", null),
-				new Among("osos", -1, 1, "", null),
-				new Among("amentos", -1, 1, "", null),
-				new Among("imentos", -1, 1, "", null),
-				new Among("ivos", -1, 8, "", null)
-			};
+                new Among("ica", -1, 1, "", null),
+                new Among("\u00E2ncia", -1, 1, "", null),
+                new Among("\u00EAncia", -1, 4, "", null),
+                new Among("ira", -1, 9, "", null),
+                new Among("adora", -1, 1, "", null),
+                new Among("osa", -1, 1, "", null),
+                new Among("ista", -1, 1, "", null),
+                new Among("iva", -1, 8, "", null),
+                new Among("eza", -1, 1, "", null),
+                new Among("log\u00EDa", -1, 2, "", null),
+                new Among("idade", -1, 7, "", null),
+                new Among("ante", -1, 1, "", null),
+                new Among("mente", -1, 6, "", null),
+                new Among("amente", 12, 5, "", null),
+                new Among("\u00E1vel", -1, 1, "", null),
+                new Among("\u00EDvel", -1, 1, "", null),
+                new Among("uci\u00F3n", -1, 3, "", null),
+                new Among("ico", -1, 1, "", null),
+                new Among("ismo", -1, 1, "", null),
+                new Among("oso", -1, 1, "", null),
+                new Among("amento", -1, 1, "", null),
+                new Among("imento", -1, 1, "", null),
+                new Among("ivo", -1, 8, "", null),
+                new Among("a\u00E7a~o", -1, 1, "", null),
+                new Among("ador", -1, 1, "", null),
+                new Among("icas", -1, 1, "", null),
+                new Among("\u00EAncias", -1, 4, "", null),
+                new Among("iras", -1, 9, "", null),
+                new Among("adoras", -1, 1, "", null),
+                new Among("osas", -1, 1, "", null),
+                new Among("istas", -1, 1, "", null),
+                new Among("ivas", -1, 8, "", null),
+                new Among("ezas", -1, 1, "", null),
+                new Among("log\u00EDas", -1, 2, "", null),
+                new Among("idades", -1, 7, "", null),
+                new Among("uciones", -1, 3, "", null),
+                new Among("adores", -1, 1, "", null),
+                new Among("antes", -1, 1, "", null),
+                new Among("a\u00E7o~es", -1, 1, "", null),
+                new Among("icos", -1, 1, "", null),
+                new Among("ismos", -1, 1, "", null),
+                new Among("osos", -1, 1, "", null),
+                new Among("amentos", -1, 1, "", null),
+                new Among("imentos", -1, 1, "", null),
+                new Among("ivos", -1, 8, "", null)
+            };
 
             a_6 = new Among[] {
-				new Among("ada", -1, 1, "", null),
-				new Among("ida", -1, 1, "", null),
-				new Among("ia", -1, 1, "", null),
-				new Among("aria", 2, 1, "", null),
-				new Among("eria", 2, 1, "", null),
-				new Among("iria", 2, 1, "", null),
-				new Among("ara", -1, 1, "", null),
-				new Among("era", -1, 1, "", null),
-				new Among("ira", -1, 1, "", null),
-				new Among("ava", -1, 1, "", null),
-				new Among("asse", -1, 1, "", null),
-				new Among("esse", -1, 1, "", null),
-				new Among("isse", -1, 1, "", null),
-				new Among("aste", -1, 1, "", null),
-				new Among("este", -1, 1, "", null),
-				new Among("iste", -1, 1, "", null),
-				new Among("ei", -1, 1, "", null),
-				new Among("arei", 16, 1, "", null),
-				new Among("erei", 16, 1, "", null),
-				new Among("irei", 16, 1, "", null),
-				new Among("am", -1, 1, "", null),
-				new Among("iam", 20, 1, "", null),
-				new Among("ariam", 21, 1, "", null),
-				new Among("eriam", 21, 1, "", null),
-				new Among("iriam", 21, 1, "", null),
-				new Among("aram", 20, 1, "", null),
-				new Among("eram", 20, 1, "", null),
-				new Among("iram", 20, 1, "", null),
-				new Among("avam", 20, 1, "", null),
-				new Among("em", -1, 1, "", null),
-				new Among("arem", 29, 1, "", null),
-				new Among("erem", 29, 1, "", null),
-				new Among("irem", 29, 1, "", null),
-				new Among("assem", 29, 1, "", null),
-				new Among("essem", 29, 1, "", null),
-				new Among("issem", 29, 1, "", null),
-				new Among("ado", -1, 1, "", null),
-				new Among("ido", -1, 1, "", null),
-				new Among("ando", -1, 1, "", null),
-				new Among("endo", -1, 1, "", null),
-				new Among("indo", -1, 1, "", null),
-				new Among("ara~o", -1, 1, "", null),
-				new Among("era~o", -1, 1, "", null),
-				new Among("ira~o", -1, 1, "", null),
-				new Among("ar", -1, 1, "", null),
-				new Among("er", -1, 1, "", null),
-				new Among("ir", -1, 1, "", null),
-				new Among("as", -1, 1, "", null),
-				new Among("adas", 47, 1, "", null),
-				new Among("idas", 47, 1, "", null),
-				new Among("ias", 47, 1, "", null),
-				new Among("arias", 50, 1, "", null),
-				new Among("erias", 50, 1, "", null),
-				new Among("irias", 50, 1, "", null),
-				new Among("aras", 47, 1, "", null),
-				new Among("eras", 47, 1, "", null),
-				new Among("iras", 47, 1, "", null),
-				new Among("avas", 47, 1, "", null),
-				new Among("es", -1, 1, "", null),
-				new Among("ardes", 58, 1, "", null),
-				new Among("erdes", 58, 1, "", null),
-				new Among("irdes", 58, 1, "", null),
-				new Among("ares", 58, 1, "", null),
-				new Among("eres", 58, 1, "", null),
-				new Among("ires", 58, 1, "", null),
-				new Among("asses", 58, 1, "", null),
-				new Among("esses", 58, 1, "", null),
-				new Among("isses", 58, 1, "", null),
-				new Among("astes", 58, 1, "", null),
-				new Among("estes", 58, 1, "", null),
-				new Among("istes", 58, 1, "", null),
-				new Among("is", -1, 1, "", null),
-				new Among("ais", 71, 1, "", null),
-				new Among("eis", 71, 1, "", null),
-				new Among("areis", 73, 1, "", null),
-				new Among("ereis", 73, 1, "", null),
-				new Among("ireis", 73, 1, "", null),
-				new Among("\u00E1reis", 73, 1, "", null),
-				new Among("\u00E9reis", 73, 1, "", null),
-				new Among("\u00EDreis", 73, 1, "", null),
-				new Among("\u00E1sseis", 73, 1, "", null),
-				new Among("\u00E9sseis", 73, 1, "", null),
-				new Among("\u00EDsseis", 73, 1, "", null),
-				new Among("\u00E1veis", 73, 1, "", null),
-				new Among("\u00EDeis", 73, 1, "", null),
-				new Among("ar\u00EDeis", 84, 1, "", null),
-				new Among("er\u00EDeis", 84, 1, "", null),
-				new Among("ir\u00EDeis", 84, 1, "", null),
-				new Among("ados", -1, 1, "", null),
-				new Among("idos", -1, 1, "", null),
-				new Among("amos", -1, 1, "", null),
-				new Among("\u00E1ramos", 90, 1, "", null),
-				new Among("\u00E9ramos", 90, 1, "", null),
-				new Among("\u00EDramos", 90, 1, "", null),
-				new Among("\u00E1vamos", 90, 1, "", null),
-				new Among("\u00EDamos", 90, 1, "", null),
-				new Among("ar\u00EDamos", 95, 1, "", null),
-				new Among("er\u00EDamos", 95, 1, "", null),
-				new Among("ir\u00EDamos", 95, 1, "", null),
-				new Among("emos", -1, 1, "", null),
-				new Among("aremos", 99, 1, "", null),
-				new Among("eremos", 99, 1, "", null),
-				new Among("iremos", 99, 1, "", null),
-				new Among("\u00E1ssemos", 99, 1, "", null),
-				new Among("\u00EAssemos", 99, 1, "", null),
-				new Among("\u00EDssemos", 99, 1, "", null),
-				new Among("imos", -1, 1, "", null),
-				new Among("armos", -1, 1, "", null),
-				new Among("ermos", -1, 1, "", null),
-				new Among("irmos", -1, 1, "", null),
-				new Among("\u00E1mos", -1, 1, "", null),
-				new Among("ar\u00E1s", -1, 1, "", null),
-				new Among("er\u00E1s", -1, 1, "", null),
-				new Among("ir\u00E1s", -1, 1, "", null),
-				new Among("eu", -1, 1, "", null),
-				new Among("iu", -1, 1, "", null),
-				new Among("ou", -1, 1, "", null),
-				new Among("ar\u00E1", -1, 1, "", null),
-				new Among("er\u00E1", -1, 1, "", null),
-				new Among("ir\u00E1", -1, 1, "", null)
-			};
+                new Among("ada", -1, 1, "", null),
+                new Among("ida", -1, 1, "", null),
+                new Among("ia", -1, 1, "", null),
+                new Among("aria", 2, 1, "", null),
+                new Among("eria", 2, 1, "", null),
+                new Among("iria", 2, 1, "", null),
+                new Among("ara", -1, 1, "", null),
+                new Among("era", -1, 1, "", null),
+                new Among("ira", -1, 1, "", null),
+                new Among("ava", -1, 1, "", null),
+                new Among("asse", -1, 1, "", null),
+                new Among("esse", -1, 1, "", null),
+                new Among("isse", -1, 1, "", null),
+                new Among("aste", -1, 1, "", null),
+                new Among("este", -1, 1, "", null),
+                new Among("iste", -1, 1, "", null),
+                new Among("ei", -1, 1, "", null),
+                new Among("arei", 16, 1, "", null),
+                new Among("erei", 16, 1, "", null),
+                new Among("irei", 16, 1, "", null),
+                new Among("am", -1, 1, "", null),
+                new Among("iam", 20, 1, "", null),
+                new Among("ariam", 21, 1, "", null),
+                new Among("eriam", 21, 1, "", null),
+                new Among("iriam", 21, 1, "", null),
+                new Among("aram", 20, 1, "", null),
+                new Among("eram", 20, 1, "", null),
+                new Among("iram", 20, 1, "", null),
+                new Among("avam", 20, 1, "", null),
+                new Among("em", -1, 1, "", null),
+                new Among("arem", 29, 1, "", null),
+                new Among("erem", 29, 1, "", null),
+                new Among("irem", 29, 1, "", null),
+                new Among("assem", 29, 1, "", null),
+                new Among("essem", 29, 1, "", null),
+                new Among("issem", 29, 1, "", null),
+                new Among("ado", -1, 1, "", null),
+                new Among("ido", -1, 1, "", null),
+                new Among("ando", -1, 1, "", null),
+                new Among("endo", -1, 1, "", null),
+                new Among("indo", -1, 1, "", null),
+                new Among("ara~o", -1, 1, "", null),
+                new Among("era~o", -1, 1, "", null),
+                new Among("ira~o", -1, 1, "", null),
+                new Among("ar", -1, 1, "", null),
+                new Among("er", -1, 1, "", null),
+                new Among("ir", -1, 1, "", null),
+                new Among("as", -1, 1, "", null),
+                new Among("adas", 47, 1, "", null),
+                new Among("idas", 47, 1, "", null),
+                new Among("ias", 47, 1, "", null),
+                new Among("arias", 50, 1, "", null),
+                new Among("erias", 50, 1, "", null),
+                new Among("irias", 50, 1, "", null),
+                new Among("aras", 47, 1, "", null),
+                new Among("eras", 47, 1, "", null),
+                new Among("iras", 47, 1, "", null),
+                new Among("avas", 47, 1, "", null),
+                new Among("es", -1, 1, "", null),
+                new Among("ardes", 58, 1, "", null),
+                new Among("erdes", 58, 1, "", null),
+                new Among("irdes", 58, 1, "", null),
+                new Among("ares", 58, 1, "", null),
+                new Among("eres", 58, 1, "", null),
+                new Among("ires", 58, 1, "", null),
+                new Among("asses", 58, 1, "", null),
+                new Among("esses", 58, 1, "", null),
+                new Among("isses", 58, 1, "", null),
+                new Among("astes", 58, 1, "", null),
+                new Among("estes", 58, 1, "", null),
+                new Among("istes", 58, 1, "", null),
+                new Among("is", -1, 1, "", null),
+                new Among("ais", 71, 1, "", null),
+                new Among("eis", 71, 1, "", null),
+                new Among("areis", 73, 1, "", null),
+                new Among("ereis", 73, 1, "", null),
+                new Among("ireis", 73, 1, "", null),
+                new Among("\u00E1reis", 73, 1, "", null),
+                new Among("\u00E9reis", 73, 1, "", null),
+                new Among("\u00EDreis", 73, 1, "", null),
+                new Among("\u00E1sseis", 73, 1, "", null),
+                new Among("\u00E9sseis", 73, 1, "", null),
+                new Among("\u00EDsseis", 73, 1, "", null),
+                new Among("\u00E1veis", 73, 1, "", null),
+                new Among("\u00EDeis", 73, 1, "", null),
+                new Among("ar\u00EDeis", 84, 1, "", null),
+                new Among("er\u00EDeis", 84, 1, "", null),
+                new Among("ir\u00EDeis", 84, 1, "", null),
+                new Among("ados", -1, 1, "", null),
+                new Among("idos", -1, 1, "", null),
+                new Among("amos", -1, 1, "", null),
+                new Among("\u00E1ramos", 90, 1, "", null),
+                new Among("\u00E9ramos", 90, 1, "", null),
+                new Among("\u00EDramos", 90, 1, "", null),
+                new Among("\u00E1vamos", 90, 1, "", null),
+                new Among("\u00EDamos", 90, 1, "", null),
+                new Among("ar\u00EDamos", 95, 1, "", null),
+                new Among("er\u00EDamos", 95, 1, "", null),
+                new Among("ir\u00EDamos", 95, 1, "", null),
+                new Among("emos", -1, 1, "", null),
+                new Among("aremos", 99, 1, "", null),
+                new Among("eremos", 99, 1, "", null),
+                new Among("iremos", 99, 1, "", null),
+                new Among("\u00E1ssemos", 99, 1, "", null),
+                new Among("\u00EAssemos", 99, 1, "", null),
+                new Among("\u00EDssemos", 99, 1, "", null),
+                new Among("imos", -1, 1, "", null),
+                new Among("armos", -1, 1, "", null),
+                new Among("ermos", -1, 1, "", null),
+                new Among("irmos", -1, 1, "", null),
+                new Among("\u00E1mos", -1, 1, "", null),
+                new Among("ar\u00E1s", -1, 1, "", null),
+                new Among("er\u00E1s", -1, 1, "", null),
+                new Among("ir\u00E1s", -1, 1, "", null),
+                new Among("eu", -1, 1, "", null),
+                new Among("iu", -1, 1, "", null),
+                new Among("ou", -1, 1, "", null),
+                new Among("ar\u00E1", -1, 1, "", null),
+                new Among("er\u00E1", -1, 1, "", null),
+                new Among("ir\u00E1", -1, 1, "", null)
+            };
 
             a_7 = new Among[] {
-				new Among("a", -1, 1, "", null),
-				new Among("i", -1, 1, "", null),
-				new Among("o", -1, 1, "", null),
-				new Among("os", -1, 1, "", null),
-				new Among("\u00E1", -1, 1, "", null),
-				new Among("\u00ED", -1, 1, "", null),
-				new Among("\u00F3", -1, 1, "", null)
-			};
+                new Among("a", -1, 1, "", null),
+                new Among("i", -1, 1, "", null),
+                new Among("o", -1, 1, "", null),
+                new Among("os", -1, 1, "", null),
+                new Among("\u00E1", -1, 1, "", null),
+                new Among("\u00ED", -1, 1, "", null),
+                new Among("\u00F3", -1, 1, "", null)
+            };
 
             a_8 = new Among[] {
-				new Among("e", -1, 1, "", null),
-				new Among("\u00E7", -1, 2, "", null),
-				new Among("\u00E9", -1, 1, "", null),
-				new Among("\u00EA", -1, 1, "", null)
-			};
+                new Among("e", -1, 1, "", null),
+                new Among("\u00E7", -1, 2, "", null),
+                new Among("\u00E9", -1, 1, "", null),
+                new Among("\u00EA", -1, 1, "", null)
+            };
 
         }
 


[22/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/AbstractField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/AbstractField.cs b/src/core/Document/AbstractField.cs
index a526f1d..0cfbf5d 100644
--- a/src/core/Document/AbstractField.cs
+++ b/src/core/Document/AbstractField.cs
@@ -24,289 +24,289 @@ using SpanQuery = Lucene.Net.Search.Spans.SpanQuery;
 
 namespace Lucene.Net.Documents
 {
-	/// <summary> 
-	/// 
-	/// 
-	/// </summary>
-	[Serializable]
-	public abstract class AbstractField : IFieldable
-	{
-		
-		protected internal System.String internalName = "body";
-		protected internal bool storeTermVector = false;
-		protected internal bool storeOffsetWithTermVector = false;
-		protected internal bool storePositionWithTermVector = false;
-		protected internal bool internalOmitNorms = false;
-		protected internal bool internalIsStored = false;
-		protected internal bool internalIsIndexed = true;
-		protected internal bool internalIsTokenized = true;
-		protected internal bool internalIsBinary = false;
-		protected internal bool lazy = false;
-		protected internal bool internalOmitTermFreqAndPositions = false;
-		protected internal float internalBoost = 1.0f;
-		// the data object for all different kind of field values
-		protected internal System.Object fieldsData = null;
-		// pre-analyzed tokenStream for indexed fields
-		protected internal TokenStream tokenStream;
-		// length/offset for all primitive types
-		protected internal int internalBinaryLength;
-		protected internal int internalbinaryOffset;
-		
-		protected internal AbstractField()
-		{
-		}
-		
-		protected internal AbstractField(System.String name, Field.Store store, Field.Index index, Field.TermVector termVector)
-		{
-			if (name == null)
-				throw new System.NullReferenceException("name cannot be null");
-			this.internalName = StringHelper.Intern(name); // field names are interned
+    /// <summary> 
+    /// 
+    /// 
+    /// </summary>
+    [Serializable]
+    public abstract class AbstractField : IFieldable
+    {
+        
+        protected internal System.String internalName = "body";
+        protected internal bool storeTermVector = false;
+        protected internal bool storeOffsetWithTermVector = false;
+        protected internal bool storePositionWithTermVector = false;
+        protected internal bool internalOmitNorms = false;
+        protected internal bool internalIsStored = false;
+        protected internal bool internalIsIndexed = true;
+        protected internal bool internalIsTokenized = true;
+        protected internal bool internalIsBinary = false;
+        protected internal bool lazy = false;
+        protected internal bool internalOmitTermFreqAndPositions = false;
+        protected internal float internalBoost = 1.0f;
+        // the data object for all different kind of field values
+        protected internal System.Object fieldsData = null;
+        // pre-analyzed tokenStream for indexed fields
+        protected internal TokenStream tokenStream;
+        // length/offset for all primitive types
+        protected internal int internalBinaryLength;
+        protected internal int internalbinaryOffset;
+        
+        protected internal AbstractField()
+        {
+        }
+        
+        protected internal AbstractField(System.String name, Field.Store store, Field.Index index, Field.TermVector termVector)
+        {
+            if (name == null)
+                throw new System.NullReferenceException("name cannot be null");
+            this.internalName = StringHelper.Intern(name); // field names are interned
 
-		    this.internalIsStored = store.IsStored();
-		    this.internalIsIndexed = index.IsIndexed();
-		    this.internalIsTokenized = index.IsAnalyzed();
-		    this.internalOmitNorms = index.OmitNorms();
-			
-			this.internalIsBinary = false;
-			
-			SetStoreTermVector(termVector);
-		}
+            this.internalIsStored = store.IsStored();
+            this.internalIsIndexed = index.IsIndexed();
+            this.internalIsTokenized = index.IsAnalyzed();
+            this.internalOmitNorms = index.OmitNorms();
+            
+            this.internalIsBinary = false;
+            
+            SetStoreTermVector(termVector);
+        }
 
-	    /// <summary>Gets or sets the boost factor for hits for this field.
-	    /// 
-	    /// <p/>The default value is 1.0.
-	    /// 
-	    /// <p/>Note: this value is not stored directly with the document in the index.
-	    /// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
-	    /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
-	    /// this field was indexed.
-	    /// </summary>
-	    public virtual float Boost
-	    {
-	        get { return internalBoost; }
-	        set { this.internalBoost = value; }
-	    }
+        /// <summary>Gets or sets the boost factor for hits for this field.
+        /// 
+        /// <p/>The default value is 1.0.
+        /// 
+        /// <p/>Note: this value is not stored directly with the document in the index.
+        /// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
+        /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
+        /// this field was indexed.
+        /// </summary>
+        public virtual float Boost
+        {
+            get { return internalBoost; }
+            set { this.internalBoost = value; }
+        }
 
-	    /// <summary>Returns the name of the field as an interned string.
-	    /// For example "date", "title", "body", ...
-	    /// </summary>
-	    public virtual string Name
-	    {
-	        get { return internalName; }
-	    }
+        /// <summary>Returns the name of the field as an interned string.
+        /// For example "date", "title", "body", ...
+        /// </summary>
+        public virtual string Name
+        {
+            get { return internalName; }
+        }
 
-	    protected internal virtual void  SetStoreTermVector(Field.TermVector termVector)
-		{
-		    this.storeTermVector = termVector.IsStored();
-		    this.storePositionWithTermVector = termVector.WithPositions();
-		    this.storeOffsetWithTermVector = termVector.WithOffsets();
-		}
+        protected internal virtual void  SetStoreTermVector(Field.TermVector termVector)
+        {
+            this.storeTermVector = termVector.IsStored();
+            this.storePositionWithTermVector = termVector.WithPositions();
+            this.storeOffsetWithTermVector = termVector.WithOffsets();
+        }
 
-	    /// <summary>True iff the value of the field is to be stored in the index for return
-	    /// with search hits.  It is an error for this to be true if a field is
-	    /// Reader-valued. 
-	    /// </summary>
-	    public bool IsStored
-	    {
-	        get { return internalIsStored; }
-	    }
+        /// <summary>True iff the value of the field is to be stored in the index for return
+        /// with search hits.  It is an error for this to be true if a field is
+        /// Reader-valued. 
+        /// </summary>
+        public bool IsStored
+        {
+            get { return internalIsStored; }
+        }
 
-	    /// <summary>True iff the value of the field is to be indexed, so that it may be
-	    /// searched on. 
-	    /// </summary>
-	    public bool IsIndexed
-	    {
-	        get { return internalIsIndexed; }
-	    }
+        /// <summary>True iff the value of the field is to be indexed, so that it may be
+        /// searched on. 
+        /// </summary>
+        public bool IsIndexed
+        {
+            get { return internalIsIndexed; }
+        }
 
-	    /// <summary>True iff the value of the field should be tokenized as text prior to
-	    /// indexing.  Un-tokenized fields are indexed as a single word and may not be
-	    /// Reader-valued. 
-	    /// </summary>
-	    public bool IsTokenized
-	    {
-	        get { return internalIsTokenized; }
-	    }
+        /// <summary>True iff the value of the field should be tokenized as text prior to
+        /// indexing.  Un-tokenized fields are indexed as a single word and may not be
+        /// Reader-valued. 
+        /// </summary>
+        public bool IsTokenized
+        {
+            get { return internalIsTokenized; }
+        }
 
-	    /// <summary>True iff the term or terms used to index this field are stored as a term
-	    /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
-	    /// These methods do not provide access to the original content of the field,
-	    /// only to terms used to index it. If the original content must be
-	    /// preserved, use the <c>stored</c> attribute instead.
-	    /// 
-	    /// </summary>
-	    /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
-	    /// </seealso>
-	    public bool IsTermVectorStored
-	    {
-	        get { return storeTermVector; }
-	    }
+        /// <summary>True iff the term or terms used to index this field are stored as a term
+        /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
+        /// These methods do not provide access to the original content of the field,
+        /// only to terms used to index it. If the original content must be
+        /// preserved, use the <c>stored</c> attribute instead.
+        /// 
+        /// </summary>
+        /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
+        /// </seealso>
+        public bool IsTermVectorStored
+        {
+            get { return storeTermVector; }
+        }
 
-	    /// <summary> True iff terms are stored as term vector together with their offsets 
-	    /// (start and end position in source text).
-	    /// </summary>
-	    public virtual bool IsStoreOffsetWithTermVector
-	    {
-	        get { return storeOffsetWithTermVector; }
-	    }
+        /// <summary> True iff terms are stored as term vector together with their offsets 
+        /// (start and end position in source text).
+        /// </summary>
+        public virtual bool IsStoreOffsetWithTermVector
+        {
+            get { return storeOffsetWithTermVector; }
+        }
 
-	    /// <summary> True iff terms are stored as term vector together with their token positions.</summary>
-	    public virtual bool IsStorePositionWithTermVector
-	    {
-	        get { return storePositionWithTermVector; }
-	    }
+        /// <summary> True iff terms are stored as term vector together with their token positions.</summary>
+        public virtual bool IsStorePositionWithTermVector
+        {
+            get { return storePositionWithTermVector; }
+        }
 
-	    /// <summary>True iff the value of the filed is stored as binary </summary>
-	    public bool IsBinary
-	    {
-	        get { return internalIsBinary; }
-	    }
+        /// <summary>True iff the value of the filed is stored as binary </summary>
+        public bool IsBinary
+        {
+            get { return internalIsBinary; }
+        }
 
 
-	    /// <summary> Return the raw byte[] for the binary field.  Note that
-	    /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
-	    /// to know which range of bytes in this
-	    /// returned array belong to the field.
-	    /// </summary>
-	    /// <returns> reference to the Field value as byte[]. </returns>
-	    public virtual byte[] GetBinaryValue()
-	    {
-	        return GetBinaryValue(null);
-	    }
+        /// <summary> Return the raw byte[] for the binary field.  Note that
+        /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
+        /// to know which range of bytes in this
+        /// returned array belong to the field.
+        /// </summary>
+        /// <returns> reference to the Field value as byte[]. </returns>
+        public virtual byte[] GetBinaryValue()
+        {
+            return GetBinaryValue(null);
+        }
 
-	    public virtual byte[] GetBinaryValue(byte[] result)
-		{
-			if (internalIsBinary || fieldsData is byte[])
-				return (byte[]) fieldsData;
-			else
-				return null;
-		}
+        public virtual byte[] GetBinaryValue(byte[] result)
+        {
+            if (internalIsBinary || fieldsData is byte[])
+                return (byte[]) fieldsData;
+            else
+                return null;
+        }
 
-	    /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
-	    /// returned value is undefined
-	    /// </summary>
-	    /// <value> length of byte[] segment that represents this Field value </value>
-	    public virtual int BinaryLength
-	    {
-	        get
-	        {
-	            if (internalIsBinary)
-	            {
-	                return internalBinaryLength;
-	            }
-	            return fieldsData is byte[] ? ((byte[]) fieldsData).Length : 0;
-	        }
-	    }
+        /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
+        /// returned value is undefined
+        /// </summary>
+        /// <value> length of byte[] segment that represents this Field value </value>
+        public virtual int BinaryLength
+        {
+            get
+            {
+                if (internalIsBinary)
+                {
+                    return internalBinaryLength;
+                }
+                return fieldsData is byte[] ? ((byte[]) fieldsData).Length : 0;
+            }
+        }
 
-	    /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
-	    /// returned value is undefined
-	    /// </summary>
-	    /// <value> index of the first character in byte[] segment that represents this Field value </value>
-	    public virtual int BinaryOffset
-	    {
-	        get { return internalbinaryOffset; }
-	    }
+        /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
+        /// returned value is undefined
+        /// </summary>
+        /// <value> index of the first character in byte[] segment that represents this Field value </value>
+        public virtual int BinaryOffset
+        {
+            get { return internalbinaryOffset; }
+        }
 
-	    /// <summary>True if norms are omitted for this indexed field </summary>
-	    public virtual bool OmitNorms
-	    {
-	        get { return internalOmitNorms; }
-	        set { this.internalOmitNorms = value; }
-	    }
+        /// <summary>True if norms are omitted for this indexed field </summary>
+        public virtual bool OmitNorms
+        {
+            get { return internalOmitNorms; }
+            set { this.internalOmitNorms = value; }
+        }
 
-	    /// <summary>Expert:
-	    /// 
-	    /// If set, omit term freq, positions and payloads from
-	    /// postings for this field.
-	    /// 
-	    /// <p/><b>NOTE</b>: While this option reduces storage space
-	    /// required in the index, it also means any query
-	    /// requiring positional information, such as <see cref="PhraseQuery" />
-	    /// or <see cref="SpanQuery" /> subclasses will
-	    /// silently fail to find results.
-	    /// </summary>
-	    public virtual bool OmitTermFreqAndPositions
-	    {
-	        set { this.internalOmitTermFreqAndPositions = value; }
-	        get { return internalOmitTermFreqAndPositions; }
-	    }
+        /// <summary>Expert:
+        /// 
+        /// If set, omit term freq, positions and payloads from
+        /// postings for this field.
+        /// 
+        /// <p/><b>NOTE</b>: While this option reduces storage space
+        /// required in the index, it also means any query
+        /// requiring positional information, such as <see cref="PhraseQuery" />
+        /// or <see cref="SpanQuery" /> subclasses will
+        /// silently fail to find results.
+        /// </summary>
+        public virtual bool OmitTermFreqAndPositions
+        {
+            set { this.internalOmitTermFreqAndPositions = value; }
+            get { return internalOmitTermFreqAndPositions; }
+        }
 
-	    public virtual bool IsLazy
-	    {
-	        get { return lazy; }
-	    }
+        public virtual bool IsLazy
+        {
+            get { return lazy; }
+        }
 
-	    /// <summary>Prints a Field for human consumption. </summary>
-		public override System.String ToString()
-		{
-			System.Text.StringBuilder result = new System.Text.StringBuilder();
-			if (internalIsStored)
-			{
-				result.Append("stored");
-			}
-			if (internalIsIndexed)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("indexed");
-			}
-			if (internalIsTokenized)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("tokenized");
-			}
-			if (storeTermVector)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("termVector");
-			}
-			if (storeOffsetWithTermVector)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("termVectorOffsets");
-			}
-			if (storePositionWithTermVector)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("termVectorPosition");
-			}
-			if (internalIsBinary)
-			{
-				if (result.Length > 0)
-					result.Append(",");
-				result.Append("binary");
-			}
-			if (internalOmitNorms)
-			{
-				result.Append(",omitNorms");
-			}
-			if (internalOmitTermFreqAndPositions)
-			{
-				result.Append(",omitTermFreqAndPositions");
-			}
-			if (lazy)
-			{
-				result.Append(",lazy");
-			}
-			result.Append('<');
-			result.Append(internalName);
-			result.Append(':');
-			
-			if (fieldsData != null && lazy == false)
-			{
-				result.Append(fieldsData);
-			}
-			
-			result.Append('>');
-			return result.ToString();
-		}
+        /// <summary>Prints a Field for human consumption. </summary>
+        public override System.String ToString()
+        {
+            System.Text.StringBuilder result = new System.Text.StringBuilder();
+            if (internalIsStored)
+            {
+                result.Append("stored");
+            }
+            if (internalIsIndexed)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("indexed");
+            }
+            if (internalIsTokenized)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("tokenized");
+            }
+            if (storeTermVector)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("termVector");
+            }
+            if (storeOffsetWithTermVector)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("termVectorOffsets");
+            }
+            if (storePositionWithTermVector)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("termVectorPosition");
+            }
+            if (internalIsBinary)
+            {
+                if (result.Length > 0)
+                    result.Append(",");
+                result.Append("binary");
+            }
+            if (internalOmitNorms)
+            {
+                result.Append(",omitNorms");
+            }
+            if (internalOmitTermFreqAndPositions)
+            {
+                result.Append(",omitTermFreqAndPositions");
+            }
+            if (lazy)
+            {
+                result.Append(",lazy");
+            }
+            result.Append('<');
+            result.Append(internalName);
+            result.Append(':');
+            
+            if (fieldsData != null && lazy == false)
+            {
+                result.Append(fieldsData);
+            }
+            
+            result.Append('>');
+            return result.ToString();
+        }
 
-	    public abstract TokenStream TokenStreamValue { get; }
-	    public abstract TextReader ReaderValue { get; }
-	    public abstract string StringValue { get; }
-	}
+        public abstract TokenStream TokenStreamValue { get; }
+        public abstract TextReader ReaderValue { get; }
+        public abstract string StringValue { get; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/CompressionTools.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/CompressionTools.cs b/src/core/Document/CompressionTools.cs
index 400633f..aaa3aae 100644
--- a/src/core/Document/CompressionTools.cs
+++ b/src/core/Document/CompressionTools.cs
@@ -27,124 +27,124 @@ using UnicodeUtil = Lucene.Net.Util.UnicodeUtil;
 
 namespace Lucene.Net.Documents
 {
-	
-	/// <summary>Simple utility class providing static methods to
-	/// compress and decompress binary data for stored fields.
-	/// This class uses java.util.zip.Deflater and Inflater
-	/// classes to compress and decompress.
-	/// </summary>
-	
-	public class CompressionTools
-	{
-		
-		// Export only static methods
-		private CompressionTools()
-		{
-		}
-		
-		/// <summary>Compresses the specified byte range using the
-		/// specified compressionLevel (constants are defined in
-		/// java.util.zip.Deflater). 
-		/// </summary>
-		public static byte[] Compress(byte[] value_Renamed, int offset, int length, int compressionLevel)
-		{
-			/* Create an expandable byte array to hold the compressed data.
-			* You cannot use an array that's the same size as the orginal because
-			* there is no guarantee that the compressed data will be smaller than
-			* the uncompressed data. */
-			System.IO.MemoryStream bos = new System.IO.MemoryStream(length);
+    
+    /// <summary>Simple utility class providing static methods to
+    /// compress and decompress binary data for stored fields.
+    /// This class uses java.util.zip.Deflater and Inflater
+    /// classes to compress and decompress.
+    /// </summary>
+    
+    public class CompressionTools
+    {
+        
+        // Export only static methods
+        private CompressionTools()
+        {
+        }
+        
+        /// <summary>Compresses the specified byte range using the
+        /// specified compressionLevel (constants are defined in
+        /// java.util.zip.Deflater). 
+        /// </summary>
+        public static byte[] Compress(byte[] value_Renamed, int offset, int length, int compressionLevel)
+        {
+            /* Create an expandable byte array to hold the compressed data.
+            * You cannot use an array that's the same size as the orginal because
+            * there is no guarantee that the compressed data will be smaller than
+            * the uncompressed data. */
+            System.IO.MemoryStream bos = new System.IO.MemoryStream(length);
 
             Deflater compressor = SharpZipLib.CreateDeflater();
-			
-			try
-			{
-				compressor.SetLevel(compressionLevel);
-				compressor.SetInput(value_Renamed, offset, length);
-				compressor.Finish();
-				
-				// Compress the data
-				byte[] buf = new byte[1024];
-				while (!compressor.IsFinished)
-				{
-					int count = compressor.Deflate(buf);
-					bos.Write(buf, 0, count);
-				}
-			}
-			finally
-			{
-			}
-			
-			return bos.ToArray();
-		}
-		
-		/// <summary>Compresses the specified byte range, with default BEST_COMPRESSION level </summary>
-		public static byte[] Compress(byte[] value_Renamed, int offset, int length)
+            
+            try
+            {
+                compressor.SetLevel(compressionLevel);
+                compressor.SetInput(value_Renamed, offset, length);
+                compressor.Finish();
+                
+                // Compress the data
+                byte[] buf = new byte[1024];
+                while (!compressor.IsFinished)
+                {
+                    int count = compressor.Deflate(buf);
+                    bos.Write(buf, 0, count);
+                }
+            }
+            finally
+            {
+            }
+            
+            return bos.ToArray();
+        }
+        
+        /// <summary>Compresses the specified byte range, with default BEST_COMPRESSION level </summary>
+        public static byte[] Compress(byte[] value_Renamed, int offset, int length)
+        {
+            return Compress(value_Renamed, offset, length, Deflater.BEST_COMPRESSION);
+        }
+        
+        /// <summary>Compresses all bytes in the array, with default BEST_COMPRESSION level </summary>
+        public static byte[] Compress(byte[] value_Renamed)
         {
-			return Compress(value_Renamed, offset, length, Deflater.BEST_COMPRESSION);
-		}
-		
-		/// <summary>Compresses all bytes in the array, with default BEST_COMPRESSION level </summary>
-		public static byte[] Compress(byte[] value_Renamed)
-		{
             return Compress(value_Renamed, 0, value_Renamed.Length, Deflater.BEST_COMPRESSION);
-		}
-		
-		/// <summary>Compresses the String value, with default BEST_COMPRESSION level </summary>
-		public static byte[] CompressString(System.String value_Renamed)
-		{
+        }
+        
+        /// <summary>Compresses the String value, with default BEST_COMPRESSION level </summary>
+        public static byte[] CompressString(System.String value_Renamed)
+        {
             return CompressString(value_Renamed, Deflater.BEST_COMPRESSION);
-		}
-		
-		/// <summary>Compresses the String value using the specified
-		/// compressionLevel (constants are defined in
-		/// java.util.zip.Deflater). 
-		/// </summary>
-		public static byte[] CompressString(System.String value_Renamed, int compressionLevel)
-		{
-			UnicodeUtil.UTF8Result result = new UnicodeUtil.UTF8Result();
-			UnicodeUtil.UTF16toUTF8(value_Renamed, 0, value_Renamed.Length, result);
-			return Compress(result.result, 0, result.length, compressionLevel);
-		}
-		
-		/// <summary>Decompress the byte array previously returned by
-		/// compress 
-		/// </summary>
-		public static byte[] Decompress(byte[] value_Renamed)
-		{
-			// Create an expandable byte array to hold the decompressed data
-			System.IO.MemoryStream bos = new System.IO.MemoryStream(value_Renamed.Length);
-			
-			Inflater decompressor = SharpZipLib.CreateInflater();
-			
-			try
-			{
-				decompressor.SetInput(value_Renamed);
-				
-				// Decompress the data
-				byte[] buf = new byte[1024];
-				while (!decompressor.IsFinished)
-				{
-					int count = decompressor.Inflate(buf);
-					bos.Write(buf, 0, count);
-				}
-			}
-			finally
-			{
-			}
-			
-			return bos.ToArray();
-		}
-		
-		/// <summary>Decompress the byte array previously returned by
-		/// compressString back into a String 
-		/// </summary>
-		public static System.String DecompressString(byte[] value_Renamed)
-		{
-			UnicodeUtil.UTF16Result result = new UnicodeUtil.UTF16Result();
-			byte[] bytes = Decompress(value_Renamed);
-			UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.Length, result);
-			return new System.String(result.result, 0, result.length);
-		}
-	}
+        }
+        
+        /// <summary>Compresses the String value using the specified
+        /// compressionLevel (constants are defined in
+        /// java.util.zip.Deflater). 
+        /// </summary>
+        public static byte[] CompressString(System.String value_Renamed, int compressionLevel)
+        {
+            UnicodeUtil.UTF8Result result = new UnicodeUtil.UTF8Result();
+            UnicodeUtil.UTF16toUTF8(value_Renamed, 0, value_Renamed.Length, result);
+            return Compress(result.result, 0, result.length, compressionLevel);
+        }
+        
+        /// <summary>Decompress the byte array previously returned by
+        /// compress 
+        /// </summary>
+        public static byte[] Decompress(byte[] value_Renamed)
+        {
+            // Create an expandable byte array to hold the decompressed data
+            System.IO.MemoryStream bos = new System.IO.MemoryStream(value_Renamed.Length);
+            
+            Inflater decompressor = SharpZipLib.CreateInflater();
+            
+            try
+            {
+                decompressor.SetInput(value_Renamed);
+                
+                // Decompress the data
+                byte[] buf = new byte[1024];
+                while (!decompressor.IsFinished)
+                {
+                    int count = decompressor.Inflate(buf);
+                    bos.Write(buf, 0, count);
+                }
+            }
+            finally
+            {
+            }
+            
+            return bos.ToArray();
+        }
+        
+        /// <summary>Decompress the byte array previously returned by
+        /// compressString back into a String 
+        /// </summary>
+        public static System.String DecompressString(byte[] value_Renamed)
+        {
+            UnicodeUtil.UTF16Result result = new UnicodeUtil.UTF16Result();
+            byte[] bytes = Decompress(value_Renamed);
+            UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.Length, result);
+            return new System.String(result.result, 0, result.length);
+        }
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/DateField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/DateField.cs b/src/core/Document/DateField.cs
index 6179f4c..219c7d9 100644
--- a/src/core/Document/DateField.cs
+++ b/src/core/Document/DateField.cs
@@ -25,107 +25,107 @@ using TermRangeQuery = Lucene.Net.Search.TermRangeQuery;
 
 namespace Lucene.Net.Documents
 {
-	// for javadoc
-	
-	// do not remove in 3.0, needed for reading old indexes!
-	
-	/// <summary> Provides support for converting dates to strings and vice-versa.
-	/// The strings are structured so that lexicographic sorting orders by date,
-	/// which makes them suitable for use as field values and search terms.
-	/// 
-	/// <p/>Note that this class saves dates with millisecond granularity,
-	/// which is bad for <see cref="TermRangeQuery" /> and <see cref="PrefixQuery" />, as those
-	/// queries are expanded to a BooleanQuery with a potentially large number
-	/// of terms when searching. Thus you might want to use
-	/// <see cref="DateTools" /> instead.
-	/// 
-	/// <p/>
-	/// Note: dates before 1970 cannot be used, and therefore cannot be
-	/// indexed when using this class. See <see cref="DateTools" /> for an
-	/// alternative without such a limitation.
-	/// 
-	/// <p/>
-	/// Another approach is <see cref="NumericUtils" />, which provides
-	/// a sortable binary representation (prefix encoded) of numeric values, which
-	/// date/time are.
-	/// For indexing a <see cref="DateTime" />, convert it to unix timestamp as
-	/// <c>long</c> and
-	/// index this as a numeric value with <see cref="NumericField" />
-	/// and use <see cref="NumericRangeQuery{T}" /> to query it.
-	/// 
-	/// </summary>
-	/// <deprecated> If you build a new index, use <see cref="DateTools" /> or 
-	/// <see cref="NumericField" /> instead.
-	/// This class is included for use with existing
-	/// indices and will be removed in a future (possibly Lucene 4.0)
-	/// </deprecated>
+    // for javadoc
+    
+    // do not remove in 3.0, needed for reading old indexes!
+    
+    /// <summary> Provides support for converting dates to strings and vice-versa.
+    /// The strings are structured so that lexicographic sorting orders by date,
+    /// which makes them suitable for use as field values and search terms.
+    /// 
+    /// <p/>Note that this class saves dates with millisecond granularity,
+    /// which is bad for <see cref="TermRangeQuery" /> and <see cref="PrefixQuery" />, as those
+    /// queries are expanded to a BooleanQuery with a potentially large number
+    /// of terms when searching. Thus you might want to use
+    /// <see cref="DateTools" /> instead.
+    /// 
+    /// <p/>
+    /// Note: dates before 1970 cannot be used, and therefore cannot be
+    /// indexed when using this class. See <see cref="DateTools" /> for an
+    /// alternative without such a limitation.
+    /// 
+    /// <p/>
+    /// Another approach is <see cref="NumericUtils" />, which provides
+    /// a sortable binary representation (prefix encoded) of numeric values, which
+    /// date/time are.
+    /// For indexing a <see cref="DateTime" />, convert it to unix timestamp as
+    /// <c>long</c> and
+    /// index this as a numeric value with <see cref="NumericField" />
+    /// and use <see cref="NumericRangeQuery{T}" /> to query it.
+    /// 
+    /// </summary>
+    /// <deprecated> If you build a new index, use <see cref="DateTools" /> or 
+    /// <see cref="NumericField" /> instead.
+    /// This class is included for use with existing
+    /// indices and will be removed in a future (possibly Lucene 4.0)
+    /// </deprecated>
     [Obsolete("If you build a new index, use DateTools or NumericField instead.This class is included for use with existing indices and will be removed in a future release (possibly Lucene 4.0).")]
-	public class DateField
-	{
-		
-		private DateField()
-		{
-		}
-		
-		// make date strings long enough to last a millenium
+    public class DateField
+    {
+        
+        private DateField()
+        {
+        }
+        
+        // make date strings long enough to last a millenium
         private static int DATE_LEN = Number.ToString(1000L * 365 * 24 * 60 * 60 * 1000, Number.MAX_RADIX).Length;
 
-		public static System.String MIN_DATE_STRING()
-		{
-			return TimeToString(0);
-		}
-		
-		public static System.String MAX_DATE_STRING()
-		{
-			char[] buffer = new char[DATE_LEN];
+        public static System.String MIN_DATE_STRING()
+        {
+            return TimeToString(0);
+        }
+        
+        public static System.String MAX_DATE_STRING()
+        {
+            char[] buffer = new char[DATE_LEN];
             char c = Character.ForDigit(Character.MAX_RADIX - 1, Character.MAX_RADIX);
-			for (int i = 0; i < DATE_LEN; i++)
-				buffer[i] = c;
-			return new System.String(buffer);
-		}
-		
-		/// <summary> Converts a Date to a string suitable for indexing.</summary>
-		/// <throws>  RuntimeException if the date specified in the </throws>
-		/// <summary> method argument is before 1970
-		/// </summary>
+            for (int i = 0; i < DATE_LEN; i++)
+                buffer[i] = c;
+            return new System.String(buffer);
+        }
+        
+        /// <summary> Converts a Date to a string suitable for indexing.</summary>
+        /// <throws>  RuntimeException if the date specified in the </throws>
+        /// <summary> method argument is before 1970
+        /// </summary>
         public static System.String DateToString(System.DateTime date)
         {
             TimeSpan ts = date.Subtract(new DateTime(1970, 1, 1));
             ts = ts.Subtract(TimeZone.CurrentTimeZone.GetUtcOffset(date));
             return TimeToString(ts.Ticks / TimeSpan.TicksPerMillisecond);
         }
-		/// <summary> Converts a millisecond time to a string suitable for indexing.</summary>
-		/// <throws>  RuntimeException if the time specified in the </throws>
-		/// <summary> method argument is negative, that is, before 1970
-		/// </summary>
-		public static System.String TimeToString(long time)
-		{
-			if (time < 0)
-				throw new System.SystemException("time '" + time + "' is too early, must be >= 0");
+        /// <summary> Converts a millisecond time to a string suitable for indexing.</summary>
+        /// <throws>  RuntimeException if the time specified in the </throws>
+        /// <summary> method argument is negative, that is, before 1970
+        /// </summary>
+        public static System.String TimeToString(long time)
+        {
+            if (time < 0)
+                throw new System.SystemException("time '" + time + "' is too early, must be >= 0");
 
             System.String s = Number.ToString(time, Character.MAX_RADIX);
-			
-			if (s.Length > DATE_LEN)
-				throw new System.SystemException("time '" + time + "' is too late, length of string " + "representation must be <= " + DATE_LEN);
-			
-			// Pad with leading zeros
-			if (s.Length < DATE_LEN)
-			{
-				System.Text.StringBuilder sb = new System.Text.StringBuilder(s);
-				while (sb.Length < DATE_LEN)
-					sb.Insert(0, 0);
-				s = sb.ToString();
-			}
-			
-			return s;
-		}
-		
-		/// <summary>Converts a string-encoded date into a millisecond time. </summary>
-		public static long StringToTime(System.String s)
-		{
+            
+            if (s.Length > DATE_LEN)
+                throw new System.SystemException("time '" + time + "' is too late, length of string " + "representation must be <= " + DATE_LEN);
+            
+            // Pad with leading zeros
+            if (s.Length < DATE_LEN)
+            {
+                System.Text.StringBuilder sb = new System.Text.StringBuilder(s);
+                while (sb.Length < DATE_LEN)
+                    sb.Insert(0, 0);
+                s = sb.ToString();
+            }
+            
+            return s;
+        }
+        
+        /// <summary>Converts a string-encoded date into a millisecond time. </summary>
+        public static long StringToTime(System.String s)
+        {
             return Number.Parse(s, Number.MAX_RADIX);
-		}
-		/// <summary>Converts a string-encoded date into a Date object. </summary>
+        }
+        /// <summary>Converts a string-encoded date into a Date object. </summary>
         public static System.DateTime StringToDate(System.String s)
         {
             long ticks = StringToTime(s) * TimeSpan.TicksPerMillisecond;
@@ -134,5 +134,5 @@ namespace Lucene.Net.Documents
             date = date.Add(TimeZone.CurrentTimeZone.GetUtcOffset(date));
             return date;
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/DateTools.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/DateTools.cs b/src/core/Document/DateTools.cs
index 8263df1..6e2af27 100644
--- a/src/core/Document/DateTools.cs
+++ b/src/core/Document/DateTools.cs
@@ -21,32 +21,32 @@ using NumericUtils = Lucene.Net.Util.NumericUtils;
 
 namespace Lucene.Net.Documents
 {
-	
-	/// <summary> Provides support for converting dates to strings and vice-versa.
-	/// The strings are structured so that lexicographic sorting orders 
-	/// them by date, which makes them suitable for use as field values 
-	/// and search terms.
-	/// 
-	/// <p/>This class also helps you to limit the resolution of your dates. Do not
-	/// save dates with a finer resolution than you really need, as then
-	/// RangeQuery and PrefixQuery will require more memory and become slower.
-	/// 
-	/// <p/>Compared to <see cref="DateField" /> the strings generated by the methods
-	/// in this class take slightly more space, unless your selected resolution
-	/// is set to <c>Resolution.DAY</c> or lower.
-	/// 
-	/// <p/>
-	/// Another approach is <see cref="NumericUtils" />, which provides
-	/// a sortable binary representation (prefix encoded) of numeric values, which
-	/// date/time are.
+    
+    /// <summary> Provides support for converting dates to strings and vice-versa.
+    /// The strings are structured so that lexicographic sorting orders 
+    /// them by date, which makes them suitable for use as field values 
+    /// and search terms.
+    /// 
+    /// <p/>This class also helps you to limit the resolution of your dates. Do not
+    /// save dates with a finer resolution than you really need, as then
+    /// RangeQuery and PrefixQuery will require more memory and become slower.
+    /// 
+    /// <p/>Compared to <see cref="DateField" /> the strings generated by the methods
+    /// in this class take slightly more space, unless your selected resolution
+    /// is set to <c>Resolution.DAY</c> or lower.
+    /// 
+    /// <p/>
+    /// Another approach is <see cref="NumericUtils" />, which provides
+    /// a sortable binary representation (prefix encoded) of numeric values, which
+    /// date/time are.
     /// For indexing a <see cref="DateTime" />, convert it to unix timestamp as
-	/// <c>long</c> and
-	/// index this as a numeric value with <see cref="NumericField" />
-	/// and use <see cref="NumericRangeQuery{T}" /> to query it.
-	/// </summary>
-	public class DateTools
-	{
-		
+    /// <c>long</c> and
+    /// index this as a numeric value with <see cref="NumericField" />
+    /// and use <see cref="NumericRangeQuery{T}" /> to query it.
+    /// </summary>
+    public class DateTools
+    {
+        
         private static readonly System.String YEAR_FORMAT = "yyyy";
         private static readonly System.String MONTH_FORMAT = "yyyyMM";
         private static readonly System.String DAY_FORMAT = "yyyyMMdd";
@@ -54,108 +54,108 @@ namespace Lucene.Net.Documents
         private static readonly System.String MINUTE_FORMAT = "yyyyMMddHHmm";
         private static readonly System.String SECOND_FORMAT = "yyyyMMddHHmmss";
         private static readonly System.String MILLISECOND_FORMAT = "yyyyMMddHHmmssfff";
-		
-		private static readonly System.Globalization.Calendar calInstance = new System.Globalization.GregorianCalendar();
-		
-		// cannot create, the class has static methods only
-		private DateTools()
-		{
-		}
-		
-		/// <summary> Converts a Date to a string suitable for indexing.
-		/// 
-		/// </summary>
-		/// <param name="date">the date to be converted
-		/// </param>
-		/// <param name="resolution">the desired resolution, see
-		/// <see cref="Round(DateTime, DateTools.Resolution)" />
-		/// </param>
-		/// <returns> a string in format <c>yyyyMMddHHmmssSSS</c> or shorter,
-		/// depending on <c>resolution</c>; using GMT as timezone 
-		/// </returns>
-		public static System.String DateToString(System.DateTime date, Resolution resolution)
-		{
-			return TimeToString(date.Ticks / TimeSpan.TicksPerMillisecond, resolution);
-		}
-		
-		/// <summary> Converts a millisecond time to a string suitable for indexing.
-		/// 
-		/// </summary>
-		/// <param name="time">the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT
-		/// </param>
-		/// <param name="resolution">the desired resolution, see
-		/// <see cref="Round(long, DateTools.Resolution)" />
-		/// </param>
-		/// <returns> a string in format <c>yyyyMMddHHmmssSSS</c> or shorter,
-		/// depending on <c>resolution</c>; using GMT as timezone
-		/// </returns>
-		public static System.String TimeToString(long time, Resolution resolution)
-		{
+        
+        private static readonly System.Globalization.Calendar calInstance = new System.Globalization.GregorianCalendar();
+        
+        // cannot create, the class has static methods only
+        private DateTools()
+        {
+        }
+        
+        /// <summary> Converts a Date to a string suitable for indexing.
+        /// 
+        /// </summary>
+        /// <param name="date">the date to be converted
+        /// </param>
+        /// <param name="resolution">the desired resolution, see
+        /// <see cref="Round(DateTime, DateTools.Resolution)" />
+        /// </param>
+        /// <returns> a string in format <c>yyyyMMddHHmmssSSS</c> or shorter,
+        /// depending on <c>resolution</c>; using GMT as timezone 
+        /// </returns>
+        public static System.String DateToString(System.DateTime date, Resolution resolution)
+        {
+            return TimeToString(date.Ticks / TimeSpan.TicksPerMillisecond, resolution);
+        }
+        
+        /// <summary> Converts a millisecond time to a string suitable for indexing.
+        /// 
+        /// </summary>
+        /// <param name="time">the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+        /// </param>
+        /// <param name="resolution">the desired resolution, see
+        /// <see cref="Round(long, DateTools.Resolution)" />
+        /// </param>
+        /// <returns> a string in format <c>yyyyMMddHHmmssSSS</c> or shorter,
+        /// depending on <c>resolution</c>; using GMT as timezone
+        /// </returns>
+        public static System.String TimeToString(long time, Resolution resolution)
+        {
             System.DateTime date = new System.DateTime(Round(time, resolution));
-			
-			if (resolution == Resolution.YEAR)
-			{
+            
+            if (resolution == Resolution.YEAR)
+            {
                 return date.ToString(YEAR_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.MONTH)
-			{
+            }
+            else if (resolution == Resolution.MONTH)
+            {
                 return date.ToString(MONTH_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.DAY)
-			{
+            }
+            else if (resolution == Resolution.DAY)
+            {
                 return date.ToString(DAY_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.HOUR)
-			{
+            }
+            else if (resolution == Resolution.HOUR)
+            {
                 return date.ToString(HOUR_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.MINUTE)
-			{
+            }
+            else if (resolution == Resolution.MINUTE)
+            {
                 return date.ToString(MINUTE_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.SECOND)
-			{
+            }
+            else if (resolution == Resolution.SECOND)
+            {
                 return date.ToString(SECOND_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			else if (resolution == Resolution.MILLISECOND)
-			{
+            }
+            else if (resolution == Resolution.MILLISECOND)
+            {
                 return date.ToString(MILLISECOND_FORMAT, System.Globalization.CultureInfo.InvariantCulture);
-			}
-			
-			throw new System.ArgumentException("unknown resolution " + resolution);
-		}
-		
-		/// <summary> Converts a string produced by <c>timeToString</c> or
-		/// <c>DateToString</c> back to a time, represented as the
-		/// number of milliseconds since January 1, 1970, 00:00:00 GMT.
-		/// 
-		/// </summary>
-		/// <param name="dateString">the date string to be converted
-		/// </param>
-		/// <returns> the number of milliseconds since January 1, 1970, 00:00:00 GMT
-		/// </returns>
-		/// <throws>  ParseException if <c>dateString</c> is not in the  </throws>
-		/// <summary>  expected format 
-		/// </summary>
-		public static long StringToTime(System.String dateString)
-		{
-			return StringToDate(dateString).Ticks;
-		}
-		
-		/// <summary> Converts a string produced by <c>timeToString</c> or
-		/// <c>DateToString</c> back to a time, represented as a
-		/// Date object.
-		/// 
-		/// </summary>
-		/// <param name="dateString">the date string to be converted
-		/// </param>
-		/// <returns> the parsed time as a Date object 
-		/// </returns>
-		/// <throws>  ParseException if <c>dateString</c> is not in the  </throws>
-		/// <summary>  expected format 
-		/// </summary>
-		public static System.DateTime StringToDate(System.String dateString)
-		{
+            }
+            
+            throw new System.ArgumentException("unknown resolution " + resolution);
+        }
+        
+        /// <summary> Converts a string produced by <c>timeToString</c> or
+        /// <c>DateToString</c> back to a time, represented as the
+        /// number of milliseconds since January 1, 1970, 00:00:00 GMT.
+        /// 
+        /// </summary>
+        /// <param name="dateString">the date string to be converted
+        /// </param>
+        /// <returns> the number of milliseconds since January 1, 1970, 00:00:00 GMT
+        /// </returns>
+        /// <throws>  ParseException if <c>dateString</c> is not in the  </throws>
+        /// <summary>  expected format 
+        /// </summary>
+        public static long StringToTime(System.String dateString)
+        {
+            return StringToDate(dateString).Ticks;
+        }
+        
+        /// <summary> Converts a string produced by <c>timeToString</c> or
+        /// <c>DateToString</c> back to a time, represented as a
+        /// Date object.
+        /// 
+        /// </summary>
+        /// <param name="dateString">the date string to be converted
+        /// </param>
+        /// <returns> the parsed time as a Date object 
+        /// </returns>
+        /// <throws>  ParseException if <c>dateString</c> is not in the  </throws>
+        /// <summary>  expected format 
+        /// </summary>
+        public static System.DateTime StringToDate(System.String dateString)
+        {
             System.DateTime date;
             if (dateString.Length == 4)
             {
@@ -217,42 +217,42 @@ namespace Lucene.Net.Documents
                 throw new System.FormatException("Input is not valid date string: " + dateString);
             }
             return date;
-		}
+        }
 
-	    /// <summary> Limit a date's resolution. For example, the date <c>2004-09-21 13:50:11</c>
-	    /// will be changed to <c>2004-09-01 00:00:00</c> when using
-	    /// <c>Resolution.MONTH</c>. 
-	    /// 
-	    /// </summary>
-	    /// <param name="date"></param>
-	    /// <param name="resolution">The desired resolution of the date to be returned
-	    /// </param>
-	    /// <returns> the date with all values more precise than <c>resolution</c>
-	    /// set to 0 or 1
-	    /// </returns>
-	    public static System.DateTime Round(System.DateTime date, Resolution resolution)
-		{
-			return new System.DateTime(Round(date.Ticks / TimeSpan.TicksPerMillisecond, resolution));
-		}
-		
-		/// <summary> Limit a date's resolution. For example, the date <c>1095767411000</c>
-		/// (which represents 2004-09-21 13:50:11) will be changed to 
-		/// <c>1093989600000</c> (2004-09-01 00:00:00) when using
-		/// <c>Resolution.MONTH</c>.
-		/// 
-		/// </summary>
-		/// <param name="time">The time in milliseconds (not ticks).</param>
-		/// <param name="resolution">The desired resolution of the date to be returned
-		/// </param>
-		/// <returns> the date with all values more precise than <c>resolution</c>
-		/// set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT
-		/// </returns>
-		public static long Round(long time, Resolution resolution)
-		{
-			System.DateTime dt = new System.DateTime(time * TimeSpan.TicksPerMillisecond);
-			
-			if (resolution == Resolution.YEAR)
-			{
+        /// <summary> Limit a date's resolution. For example, the date <c>2004-09-21 13:50:11</c>
+        /// will be changed to <c>2004-09-01 00:00:00</c> when using
+        /// <c>Resolution.MONTH</c>. 
+        /// 
+        /// </summary>
+        /// <param name="date"></param>
+        /// <param name="resolution">The desired resolution of the date to be returned
+        /// </param>
+        /// <returns> the date with all values more precise than <c>resolution</c>
+        /// set to 0 or 1
+        /// </returns>
+        public static System.DateTime Round(System.DateTime date, Resolution resolution)
+        {
+            return new System.DateTime(Round(date.Ticks / TimeSpan.TicksPerMillisecond, resolution));
+        }
+        
+        /// <summary> Limit a date's resolution. For example, the date <c>1095767411000</c>
+        /// (which represents 2004-09-21 13:50:11) will be changed to 
+        /// <c>1093989600000</c> (2004-09-01 00:00:00) when using
+        /// <c>Resolution.MONTH</c>.
+        /// 
+        /// </summary>
+        /// <param name="time">The time in milliseconds (not ticks).</param>
+        /// <param name="resolution">The desired resolution of the date to be returned
+        /// </param>
+        /// <returns> the date with all values more precise than <c>resolution</c>
+        /// set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+        /// </returns>
+        public static long Round(long time, Resolution resolution)
+        {
+            System.DateTime dt = new System.DateTime(time * TimeSpan.TicksPerMillisecond);
+            
+            if (resolution == Resolution.YEAR)
+            {
                 dt = dt.AddMonths(1 - dt.Month);
                 dt = dt.AddDays(1 - dt.Day);
                 dt = dt.AddHours(0 - dt.Hour);
@@ -260,91 +260,91 @@ namespace Lucene.Net.Documents
                 dt = dt.AddSeconds(0 - dt.Second);
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.MONTH)
-			{
+            else if (resolution == Resolution.MONTH)
+            {
                 dt = dt.AddDays(1 - dt.Day);
                 dt = dt.AddHours(0 - dt.Hour);
                 dt = dt.AddMinutes(0 - dt.Minute);
                 dt = dt.AddSeconds(0 - dt.Second);
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.DAY)
-			{
+            else if (resolution == Resolution.DAY)
+            {
                 dt = dt.AddHours(0 - dt.Hour);
                 dt = dt.AddMinutes(0 - dt.Minute);
                 dt = dt.AddSeconds(0 - dt.Second);
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.HOUR)
-			{
+            else if (resolution == Resolution.HOUR)
+            {
                 dt = dt.AddMinutes(0 - dt.Minute);
                 dt = dt.AddSeconds(0 - dt.Second);
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.MINUTE)
-			{
+            else if (resolution == Resolution.MINUTE)
+            {
                 dt = dt.AddSeconds(0 - dt.Second);
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.SECOND)
-			{
+            else if (resolution == Resolution.SECOND)
+            {
                 dt = dt.AddMilliseconds(0 - dt.Millisecond);
             }
-			else if (resolution == Resolution.MILLISECOND)
-			{
-				// don't cut off anything
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown resolution " + resolution);
-			}
-			return dt.Ticks;
-		}
-		
-		/// <summary>Specifies the time granularity. </summary>
-		public class Resolution
-		{
-			
-			public static readonly Resolution YEAR = new Resolution("year");
-			public static readonly Resolution MONTH = new Resolution("month");
-			public static readonly Resolution DAY = new Resolution("day");
-			public static readonly Resolution HOUR = new Resolution("hour");
-			public static readonly Resolution MINUTE = new Resolution("minute");
-			public static readonly Resolution SECOND = new Resolution("second");
-			public static readonly Resolution MILLISECOND = new Resolution("millisecond");
-			
-			private System.String resolution;
-			
-			internal Resolution()
-			{
-			}
-			
-			internal Resolution(System.String resolution)
-			{
-				this.resolution = resolution;
-			}
-			
-			public override System.String ToString()
-			{
-				return resolution;
-			}
-		}
-		static DateTools()
-		{
-			{
-				// times need to be normalized so the value doesn't depend on the 
-				// location the index is created/used:
+            else if (resolution == Resolution.MILLISECOND)
+            {
+                // don't cut off anything
+            }
+            else
+            {
+                throw new System.ArgumentException("unknown resolution " + resolution);
+            }
+            return dt.Ticks;
+        }
+        
+        /// <summary>Specifies the time granularity. </summary>
+        public class Resolution
+        {
+            
+            public static readonly Resolution YEAR = new Resolution("year");
+            public static readonly Resolution MONTH = new Resolution("month");
+            public static readonly Resolution DAY = new Resolution("day");
+            public static readonly Resolution HOUR = new Resolution("hour");
+            public static readonly Resolution MINUTE = new Resolution("minute");
+            public static readonly Resolution SECOND = new Resolution("second");
+            public static readonly Resolution MILLISECOND = new Resolution("millisecond");
+            
+            private System.String resolution;
+            
+            internal Resolution()
+            {
+            }
+            
+            internal Resolution(System.String resolution)
+            {
+                this.resolution = resolution;
+            }
+            
+            public override System.String ToString()
+            {
+                return resolution;
+            }
+        }
+        static DateTools()
+        {
+            {
+                // times need to be normalized so the value doesn't depend on the 
+                // location the index is created/used:
                 // {{Aroush-2.1}}
                 /*
-				YEAR_FORMAT.setTimeZone(GMT);
-				MONTH_FORMAT.setTimeZone(GMT);
-				DAY_FORMAT.setTimeZone(GMT);
-				HOUR_FORMAT.setTimeZone(GMT);
-				MINUTE_FORMAT.setTimeZone(GMT);
-				SECOND_FORMAT.setTimeZone(GMT);
-				MILLISECOND_FORMAT.setTimeZone(GMT);
+                YEAR_FORMAT.setTimeZone(GMT);
+                MONTH_FORMAT.setTimeZone(GMT);
+                DAY_FORMAT.setTimeZone(GMT);
+                HOUR_FORMAT.setTimeZone(GMT);
+                MINUTE_FORMAT.setTimeZone(GMT);
+                SECOND_FORMAT.setTimeZone(GMT);
+                MILLISECOND_FORMAT.setTimeZone(GMT);
                 */
-			}
-		}
-	}
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/Document.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Document.cs b/src/core/Document/Document.cs
index f24a46a..e002485 100644
--- a/src/core/Document/Document.cs
+++ b/src/core/Document/Document.cs
@@ -24,149 +24,149 @@ using Searcher = Lucene.Net.Search.Searcher;
 
 namespace Lucene.Net.Documents
 {
-	
-	/// <summary>Documents are the unit of indexing and search.
-	/// 
-	/// A Document is a set of fields.  Each field has a name and a textual value.
-	/// A field may be <see cref="IFieldable.IsStored()">stored</see> with the document, in which
-	/// case it is returned with search hits on the document.  Thus each document
-	/// should typically contain one or more stored fields which uniquely identify
-	/// it.
-	/// 
-	/// <p/>Note that fields which are <i>not</i> <see cref="IFieldable.IsStored()">stored</see> are
-	/// <i>not</i> available in documents retrieved from the index, e.g. with <see cref="ScoreDoc.Doc" />,
-	/// <see cref="Searcher.Doc(int)" /> or <see cref="IndexReader.Document(int)" />.
-	/// </summary>
-	
-	[Serializable]
-	public sealed class Document
-	{
-		private class AnonymousClassEnumeration : System.Collections.IEnumerator
-		{
-			public AnonymousClassEnumeration(Document enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(Document enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-				iter = Enclosing_Instance.fields.GetEnumerator();
-			}
-			private System.Object tempAuxObj;
-			public bool MoveNext()
-			{
-				bool result = HasMoreElements();
-				if (result)
-				{
-					tempAuxObj = NextElement();
-				}
-				return result;
-			}
-			public void  Reset()
-			{
-				tempAuxObj = null;
-			}
-			public System.Object Current
-			{
-				get
-				{
-					return tempAuxObj;
-				}
-				
-			}
-			private Document enclosingInstance;
-			public Document Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal System.Collections.IEnumerator iter;
-			public bool HasMoreElements()
-			{
-				return iter.MoveNext();
-			}
-			public System.Object NextElement()
-			{
-				return iter.Current;
-			}
-		}
-		internal System.Collections.Generic.IList<IFieldable> fields = new System.Collections.Generic.List<IFieldable>();
-		private float boost = 1.0f;
-		
-		/// <summary>Constructs a new document with no fields. </summary>
-		public Document()
-		{
-		}
+    
+    /// <summary>Documents are the unit of indexing and search.
+    /// 
+    /// A Document is a set of fields.  Each field has a name and a textual value.
+    /// A field may be <see cref="IFieldable.IsStored()">stored</see> with the document, in which
+    /// case it is returned with search hits on the document.  Thus each document
+    /// should typically contain one or more stored fields which uniquely identify
+    /// it.
+    /// 
+    /// <p/>Note that fields which are <i>not</i> <see cref="IFieldable.IsStored()">stored</see> are
+    /// <i>not</i> available in documents retrieved from the index, e.g. with <see cref="ScoreDoc.Doc" />,
+    /// <see cref="Searcher.Doc(int)" /> or <see cref="IndexReader.Document(int)" />.
+    /// </summary>
+    
+    [Serializable]
+    public sealed class Document
+    {
+        private class AnonymousClassEnumeration : System.Collections.IEnumerator
+        {
+            public AnonymousClassEnumeration(Document enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(Document enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+                iter = Enclosing_Instance.fields.GetEnumerator();
+            }
+            private System.Object tempAuxObj;
+            public bool MoveNext()
+            {
+                bool result = HasMoreElements();
+                if (result)
+                {
+                    tempAuxObj = NextElement();
+                }
+                return result;
+            }
+            public void  Reset()
+            {
+                tempAuxObj = null;
+            }
+            public System.Object Current
+            {
+                get
+                {
+                    return tempAuxObj;
+                }
+                
+            }
+            private Document enclosingInstance;
+            public Document Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal System.Collections.IEnumerator iter;
+            public bool HasMoreElements()
+            {
+                return iter.MoveNext();
+            }
+            public System.Object NextElement()
+            {
+                return iter.Current;
+            }
+        }
+        internal System.Collections.Generic.IList<IFieldable> fields = new System.Collections.Generic.List<IFieldable>();
+        private float boost = 1.0f;
+        
+        /// <summary>Constructs a new document with no fields. </summary>
+        public Document()
+        {
+        }
 
 
-	    /// <summary>Gets or sets, at indexing time, the boost factor. 
-	    /// <para>
-	    /// The default is 1.0
-	    /// </para>
-	    /// <p/>Note that once a document is indexed this value is no longer available
-	    /// from the index.  At search time, for retrieved documents, this method always 
-	    /// returns 1. This however does not mean that the boost value set at  indexing 
-	    /// time was ignored - it was just combined with other indexing time factors and 
-	    /// stored elsewhere, for better indexing and search performance. (For more 
-	    /// information see the "norm(t,d)" part of the scoring formula in 
-	    /// <see cref="Lucene.Net.Search.Similarity">Similarity</see>.)
-	    /// </summary>
-	    public float Boost
-	    {
-	        get { return boost; }
-	        set { this.boost = value; }
-	    }
+        /// <summary>Gets or sets, at indexing time, the boost factor. 
+        /// <para>
+        /// The default is 1.0
+        /// </para>
+        /// <p/>Note that once a document is indexed this value is no longer available
+        /// from the index.  At search time, for retrieved documents, this method always 
+        /// returns 1. This however does not mean that the boost value set at  indexing 
+        /// time was ignored - it was just combined with other indexing time factors and 
+        /// stored elsewhere, for better indexing and search performance. (For more 
+        /// information see the "norm(t,d)" part of the scoring formula in 
+        /// <see cref="Lucene.Net.Search.Similarity">Similarity</see>.)
+        /// </summary>
+        public float Boost
+        {
+            get { return boost; }
+            set { this.boost = value; }
+        }
 
-	    /// <summary> <p/>Adds a field to a document.  Several fields may be added with
-		/// the same name.  In this case, if the fields are indexed, their text is
-		/// treated as though appended for the purposes of search.<p/>
-		/// <p/> Note that add like the removeField(s) methods only makes sense 
-		/// prior to adding a document to an index. These methods cannot
-		/// be used to change the content of an existing index! In order to achieve this,
-		/// a document has to be deleted from an index and a new changed version of that
-		/// document has to be added.<p/>
-		/// </summary>
-		public void  Add(IFieldable field)
-		{
-			fields.Add(field);
-		}
-		
-		/// <summary> <p/>Removes field with the specified name from the document.
-		/// If multiple fields exist with this name, this method removes the first field that has been added.
-		/// If there is no field with the specified name, the document remains unchanged.<p/>
-		/// <p/> Note that the removeField(s) methods like the add method only make sense 
-		/// prior to adding a document to an index. These methods cannot
-		/// be used to change the content of an existing index! In order to achieve this,
-		/// a document has to be deleted from an index and a new changed version of that
-		/// document has to be added.<p/>
-		/// </summary>
-		public void  RemoveField(System.String name)
-		{
-			System.Collections.Generic.IEnumerator<IFieldable> it = fields.GetEnumerator();
-			while (it.MoveNext())
-			{
-				IFieldable field = it.Current;
-				if (field.Name.Equals(name))
-				{
+        /// <summary> <p/>Adds a field to a document.  Several fields may be added with
+        /// the same name.  In this case, if the fields are indexed, their text is
+        /// treated as though appended for the purposes of search.<p/>
+        /// <p/> Note that add like the removeField(s) methods only makes sense 
+        /// prior to adding a document to an index. These methods cannot
+        /// be used to change the content of an existing index! In order to achieve this,
+        /// a document has to be deleted from an index and a new changed version of that
+        /// document has to be added.<p/>
+        /// </summary>
+        public void  Add(IFieldable field)
+        {
+            fields.Add(field);
+        }
+        
+        /// <summary> <p/>Removes field with the specified name from the document.
+        /// If multiple fields exist with this name, this method removes the first field that has been added.
+        /// If there is no field with the specified name, the document remains unchanged.<p/>
+        /// <p/> Note that the removeField(s) methods like the add method only make sense 
+        /// prior to adding a document to an index. These methods cannot
+        /// be used to change the content of an existing index! In order to achieve this,
+        /// a document has to be deleted from an index and a new changed version of that
+        /// document has to be added.<p/>
+        /// </summary>
+        public void  RemoveField(System.String name)
+        {
+            System.Collections.Generic.IEnumerator<IFieldable> it = fields.GetEnumerator();
+            while (it.MoveNext())
+            {
+                IFieldable field = it.Current;
+                if (field.Name.Equals(name))
+                {
                     fields.Remove(field);
-					return ;
-				}
-			}
-		}
-		
-		/// <summary> <p/>Removes all fields with the given name from the document.
-		/// If there is no field with the specified name, the document remains unchanged.<p/>
-		/// <p/> Note that the removeField(s) methods like the add method only make sense 
-		/// prior to adding a document to an index. These methods cannot
-		/// be used to change the content of an existing index! In order to achieve this,
-		/// a document has to be deleted from an index and a new changed version of that
-		/// document has to be added.<p/>
-		/// </summary>
-		public void  RemoveFields(System.String name)
-		{
+                    return ;
+                }
+            }
+        }
+        
+        /// <summary> <p/>Removes all fields with the given name from the document.
+        /// If there is no field with the specified name, the document remains unchanged.<p/>
+        /// <p/> Note that the removeField(s) methods like the add method only make sense 
+        /// prior to adding a document to an index. These methods cannot
+        /// be used to change the content of an existing index! In order to achieve this,
+        /// a document has to be deleted from an index and a new changed version of that
+        /// document has to be added.<p/>
+        /// </summary>
+        public void  RemoveFields(System.String name)
+        {
             for (int i = fields.Count - 1; i >= 0; i--)
             {
                 IFieldable field = fields[i];
@@ -175,208 +175,208 @@ namespace Lucene.Net.Documents
                     fields.RemoveAt(i);
                 }
             }
-		}
-		
-		/// <summary>Returns a field with the given name if any exist in this document, or
-		/// null.  If multiple fields exists with this name, this method returns the
-		/// first value added.
-		/// Do not use this method with lazy loaded fields.
-		/// </summary>
-		public Field GetField(System.String name)
-		{
-		    return (Field) GetFieldable(name);
-		}
-		
-		
-		/// <summary>Returns a field with the given name if any exist in this document, or
-		/// null.  If multiple fields exists with this name, this method returns the
-		/// first value added.
-		/// </summary>
-		public IFieldable GetFieldable(System.String name)
-		{
-			foreach(IFieldable field in fields)
+        }
+        
+        /// <summary>Returns a field with the given name if any exist in this document, or
+        /// null.  If multiple fields exists with this name, this method returns the
+        /// first value added.
+        /// Do not use this method with lazy loaded fields.
+        /// </summary>
+        public Field GetField(System.String name)
+        {
+            return (Field) GetFieldable(name);
+        }
+        
+        
+        /// <summary>Returns a field with the given name if any exist in this document, or
+        /// null.  If multiple fields exists with this name, this method returns the
+        /// first value added.
+        /// </summary>
+        public IFieldable GetFieldable(System.String name)
+        {
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name))
+                    return field;
+            }
+            return null;
+        }
+        
+        /// <summary>Returns the string value of the field with the given name if any exist in
+        /// this document, or null.  If multiple fields exist with this name, this
+        /// method returns the first value added. If only binary fields with this name
+        /// exist, returns null.
+        /// </summary>
+        public System.String Get(System.String name)
+        {
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name) && (!field.IsBinary))
+                    return field.StringValue;
+            }
+            return null;
+        }
+        
+        /// <summary>Returns a List of all the fields in a document.
+        /// <p/>Note that fields which are <i>not</i> <see cref="IFieldable.IsStored()">stored</see> are
+        /// <i>not</i> available in documents retrieved from the
+        /// index, e.g. <see cref="Searcher.Doc(int)" /> or <see cref="IndexReader.Document(int)" />.
+        /// </summary>
+        public System.Collections.Generic.IList<IFieldable> GetFields()
+        {
+            return fields;
+        }
+        
+        private static readonly Field[] NO_FIELDS = new Field[0];
+        
+        /// <summary> Returns an array of <see cref="Field" />s with the given name.
+        /// Do not use with lazy loaded fields.
+        /// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
+        /// 
+        /// </summary>
+        /// <param name="name">the name of the field
+        /// </param>
+        /// <returns> a <c>Field[]</c> array
+        /// </returns>
+        public Field[] GetFields(System.String name)
+        {
+            var result = new System.Collections.Generic.List<Field>();
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name))
+                {
+                    result.Add((Field)field);
+                }
+            }
+            
+            if (result.Count == 0)
+                return NO_FIELDS;
+            
+            return result.ToArray();
+        }
+        
+        
+        private static readonly IFieldable[] NO_FIELDABLES = new IFieldable[0];
+        
+        /// <summary> Returns an array of <see cref="IFieldable" />s with the given name.
+        /// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
+        /// 
+        /// </summary>
+        /// <param name="name">the name of the field
+        /// </param>
+        /// <returns> a <c>Fieldable[]</c> array
+        /// </returns>
+        public IFieldable[] GetFieldables(System.String name)
+        {
+            var result = new System.Collections.Generic.List<IFieldable>();
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name))
+                {
+                    result.Add(field);
+                }
+            }
+            
+            if (result.Count == 0)
+                return NO_FIELDABLES;
+            
+            return result.ToArray();
+        }
+        
+        
+        private static readonly System.String[] NO_STRINGS = new System.String[0];
+        
+        /// <summary> Returns an array of values of the field specified as the method parameter.
+        /// This method returns an empty array when there are no
+        /// matching fields.  It never returns null.
+        /// </summary>
+        /// <param name="name">the name of the field
+        /// </param>
+        /// <returns> a <c>String[]</c> of field values
+        /// </returns>
+        public System.String[] GetValues(System.String name)
+        {
+            var result = new System.Collections.Generic.List<string>();
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name) && (!field.IsBinary))
+                    result.Add(field.StringValue);
+            }
+            
+            if (result.Count == 0)
+                return NO_STRINGS;
+            
+            return result.ToArray();
+        }
+        
+        private static readonly byte[][] NO_BYTES = new byte[0][];
+        
+        /// <summary> Returns an array of byte arrays for of the fields that have the name specified
+        /// as the method parameter.  This method returns an empty
+        /// array when there are no matching fields.  It never
+        /// returns null.
+        /// 
+        /// </summary>
+        /// <param name="name">the name of the field
+        /// </param>
+        /// <returns> a <c>byte[][]</c> of binary field values
+        /// </returns>
+        public byte[][] GetBinaryValues(System.String name)
+        {
+            var result = new System.Collections.Generic.List<byte[]>();
+            foreach(IFieldable field in fields)
             {
-				if (field.Name.Equals(name))
-					return field;
-			}
-			return null;
-		}
-		
-		/// <summary>Returns the string value of the field with the given name if any exist in
-		/// this document, or null.  If multiple fields exist with this name, this
-		/// method returns the first value added. If only binary fields with this name
-		/// exist, returns null.
-		/// </summary>
-		public System.String Get(System.String name)
-		{
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name) && (!field.IsBinary))
-					return field.StringValue;
-			}
-			return null;
-		}
-		
-		/// <summary>Returns a List of all the fields in a document.
-		/// <p/>Note that fields which are <i>not</i> <see cref="IFieldable.IsStored()">stored</see> are
-		/// <i>not</i> available in documents retrieved from the
-		/// index, e.g. <see cref="Searcher.Doc(int)" /> or <see cref="IndexReader.Document(int)" />.
-		/// </summary>
-		public System.Collections.Generic.IList<IFieldable> GetFields()
-		{
-			return fields;
-		}
-		
-		private static readonly Field[] NO_FIELDS = new Field[0];
-		
-		/// <summary> Returns an array of <see cref="Field" />s with the given name.
-		/// Do not use with lazy loaded fields.
-		/// This method returns an empty array when there are no
-		/// matching fields.  It never returns null.
-		/// 
-		/// </summary>
-		/// <param name="name">the name of the field
-		/// </param>
-		/// <returns> a <c>Field[]</c> array
-		/// </returns>
-		public Field[] GetFields(System.String name)
-		{
-			var result = new System.Collections.Generic.List<Field>();
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name))
-				{
-					result.Add((Field)field);
-				}
-			}
-			
-			if (result.Count == 0)
-				return NO_FIELDS;
-			
-			return result.ToArray();
-		}
-		
-		
-		private static readonly IFieldable[] NO_FIELDABLES = new IFieldable[0];
-		
-		/// <summary> Returns an array of <see cref="IFieldable" />s with the given name.
-		/// This method returns an empty array when there are no
-		/// matching fields.  It never returns null.
-		/// 
-		/// </summary>
-		/// <param name="name">the name of the field
-		/// </param>
-		/// <returns> a <c>Fieldable[]</c> array
-		/// </returns>
-		public IFieldable[] GetFieldables(System.String name)
-		{
-			var result = new System.Collections.Generic.List<IFieldable>();
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name))
-				{
-					result.Add(field);
-				}
-			}
-			
-			if (result.Count == 0)
-				return NO_FIELDABLES;
-			
-			return result.ToArray();
-		}
-		
-		
-		private static readonly System.String[] NO_STRINGS = new System.String[0];
-		
-		/// <summary> Returns an array of values of the field specified as the method parameter.
-		/// This method returns an empty array when there are no
-		/// matching fields.  It never returns null.
-		/// </summary>
-		/// <param name="name">the name of the field
-		/// </param>
-		/// <returns> a <c>String[]</c> of field values
-		/// </returns>
-		public System.String[] GetValues(System.String name)
-		{
-			var result = new System.Collections.Generic.List<string>();
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name) && (!field.IsBinary))
-					result.Add(field.StringValue);
-			}
-			
-			if (result.Count == 0)
-				return NO_STRINGS;
-			
-			return result.ToArray();
-		}
-		
-		private static readonly byte[][] NO_BYTES = new byte[0][];
-		
-		/// <summary> Returns an array of byte arrays for of the fields that have the name specified
-		/// as the method parameter.  This method returns an empty
-		/// array when there are no matching fields.  It never
-		/// returns null.
-		/// 
-		/// </summary>
-		/// <param name="name">the name of the field
-		/// </param>
-		/// <returns> a <c>byte[][]</c> of binary field values
-		/// </returns>
-		public byte[][] GetBinaryValues(System.String name)
-		{
-			var result = new System.Collections.Generic.List<byte[]>();
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name) && (field.IsBinary))
-					result.Add(field.GetBinaryValue());
-			}
-			
-			if (result.Count == 0)
-				return NO_BYTES;
+                if (field.Name.Equals(name) && (field.IsBinary))
+                    result.Add(field.GetBinaryValue());
+            }
+            
+            if (result.Count == 0)
+                return NO_BYTES;
 
             return result.ToArray();
         }
-		
-		/// <summary> Returns an array of bytes for the first (or only) field that has the name
-		/// specified as the method parameter. This method will return <c>null</c>
-		/// if no binary fields with the specified name are available.
-		/// There may be non-binary fields with the same name.
-		/// 
-		/// </summary>
-		/// <param name="name">the name of the field.
-		/// </param>
-		/// <returns> a <c>byte[]</c> containing the binary field value or <c>null</c>
-		/// </returns>
-		public byte[] GetBinaryValue(System.String name)
-		{
-			foreach(IFieldable field in fields)
-			{
-				if (field.Name.Equals(name) && (field.IsBinary))
-					return field.GetBinaryValue();
-			}
-			return null;
-		}
-		
-		/// <summary>Prints the fields of a document for human consumption. </summary>
-		public override System.String ToString()
-		{
-			System.Text.StringBuilder buffer = new System.Text.StringBuilder();
-			buffer.Append("Document<");
-			for (int i = 0; i < fields.Count; i++)
-			{
-				IFieldable field = fields[i];
-				buffer.Append(field.ToString());
-				if (i != fields.Count - 1)
-					buffer.Append(" ");
-			}
-			buffer.Append(">");
-			return buffer.ToString();
-		}
+        
+        /// <summary> Returns an array of bytes for the first (or only) field that has the name
+        /// specified as the method parameter. This method will return <c>null</c>
+        /// if no binary fields with the specified name are available.
+        /// There may be non-binary fields with the same name.
+        /// 
+        /// </summary>
+        /// <param name="name">the name of the field.
+        /// </param>
+        /// <returns> a <c>byte[]</c> containing the binary field value or <c>null</c>
+        /// </returns>
+        public byte[] GetBinaryValue(System.String name)
+        {
+            foreach(IFieldable field in fields)
+            {
+                if (field.Name.Equals(name) && (field.IsBinary))
+                    return field.GetBinaryValue();
+            }
+            return null;
+        }
+        
+        /// <summary>Prints the fields of a document for human consumption. </summary>
+        public override System.String ToString()
+        {
+            System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+            buffer.Append("Document<");
+            for (int i = 0; i < fields.Count; i++)
+            {
+                IFieldable field = fields[i];
+                buffer.Append(field.ToString());
+                if (i != fields.Count - 1)
+                    buffer.Append(" ");
+            }
+            buffer.Append(">");
+            return buffer.ToString();
+        }
 
         public System.Collections.Generic.IList<IFieldable> fields_ForNUnit
         {
             get { return fields; }
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/FieldSelector.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FieldSelector.cs b/src/core/Document/FieldSelector.cs
index f940f08..18d2e1c 100644
--- a/src/core/Document/FieldSelector.cs
+++ b/src/core/Document/FieldSelector.cs
@@ -21,17 +21,17 @@ namespace Lucene.Net.Documents
 {
     /// <summary> Similar to a <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/io/FileFilter.html">
     /// java.io.FileFilter</a>, the FieldSelector allows one to make decisions about
-	/// what Fields get loaded on a <see cref="Document" /> by <see cref="Lucene.Net.Index.IndexReader.Document(int,Lucene.Net.Documents.FieldSelector)" />
-	/// </summary>
-	public interface FieldSelector
-	{
-		
-		/// <summary> </summary>
-		/// <param name="fieldName">the field to accept or reject
-		/// </param>
-		/// <returns> an instance of <see cref="FieldSelectorResult" />
-		/// if the <see cref="Field" /> named <c>fieldName</c> should be loaded.
-		/// </returns>
-		FieldSelectorResult Accept(System.String fieldName);
-	}
+    /// what Fields get loaded on a <see cref="Document" /> by <see cref="Lucene.Net.Index.IndexReader.Document(int,Lucene.Net.Documents.FieldSelector)" />
+    /// </summary>
+    public interface FieldSelector
+    {
+        
+        /// <summary> </summary>
+        /// <param name="fieldName">the field to accept or reject
+        /// </param>
+        /// <returns> an instance of <see cref="FieldSelectorResult" />
+        /// if the <see cref="Field" /> named <c>fieldName</c> should be loaded.
+        /// </returns>
+        FieldSelectorResult Accept(System.String fieldName);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Document/FieldSelectorResult.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FieldSelectorResult.cs b/src/core/Document/FieldSelectorResult.cs
index 7d3a889..15c8606 100644
--- a/src/core/Document/FieldSelectorResult.cs
+++ b/src/core/Document/FieldSelectorResult.cs
@@ -21,7 +21,7 @@ using System.Runtime.InteropServices;
 
 namespace Lucene.Net.Documents
 {
-	/// <summary>Provides information about what should be done with this Field</summary>
+    /// <summary>Provides information about what should be done with this Field</summary>
     public enum FieldSelectorResult
     {
         /// <summary>


[43/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/FrenchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/FrenchStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/FrenchStemmer.cs
index 323f84d..55c90bf 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/FrenchStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/FrenchStemmer.cs
@@ -24,1701 +24,1701 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
 
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class FrenchStemmer : SnowballProgram
-	{
-		public FrenchStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 4, "", this), new Among("I", 0, 1, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 3, "", this)};
-			a_1 = new Among[]{new Among("iqU", - 1, 3, "", this), new Among("abl", - 1, 3, "", this), new Among("I\u00E8r", - 1, 4, "", this), new Among("i\u00E8r", - 1, 4, "", this), new Among("eus", - 1, 2, "", this), new Among("iv", - 1, 1, "", this)};
-			a_2 = new Among[]{new Among("ic", - 1, 2, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 3, "", this)};
-			a_3 = new Among[]{new Among("iqUe", - 1, 1, "", this), new Among("atrice", - 1, 2, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 5, "", this), new Among("logie", - 1, 3, "", this), new Among("able", - 1, 1, "", this), new Among("isme", - 1, 1, "", this), new Among("euse", - 1, 11, "", this), new Among("iste", - 1, 1, "", this), new Among("ive", - 1, 8, "", this), new Among("if", - 1, 8, "", this), new Among("usion", - 1, 4, "", this), new Among("ation", - 1, 2, "", this), new Among("ution", - 1, 4, "", this), new Among("ateur", - 1, 2, "", this), new Among("iqUes", - 1, 1, "", this), new Among("atrices", - 1, 2, "", this), new Among("ances", - 1, 1, "", this), new Among("ences", - 1, 5, "", this), new Among("logies", - 1, 3, "", this), new Among("ables", - 1, 1, "", this), new Among("ismes", - 1, 1, "", this), new Among("euses", - 1, 11, "", this), new Among("istes", - 1, 1, "", this), new Among("ives", - 1, 8, "", this), new Among("ifs", - 1, 8, "", this
 ), new Among("usions", - 1, 4, "", this), new Among("ations", - 1, 2, "", this), new Among("utions", - 1, 4, "", this), new Among("ateurs", - 1, 2, "", this), new Among("ments", - 1, 15, "", this), new Among("ements", 30, 6, "", this), new Among("issements", 31, 12, "", this), new Among("it\u00E9s", - 1, 7, "", this), new Among("ment", - 1, 15, "", this), new Among("ement", 34, 6, "", this), new Among("issement", 35, 12, "", this), new Among("amment", 34, 13, "", this), new Among("emment", 34, 14, "", this), new Among("aux", - 1, 10, "", this), new Among("eaux", 39, 9, "", this), new Among("eux", - 1, 1, "", this), new Among("it\u00E9", - 1, 7, "", this)};
-			a_4 = new Among[]{new Among("ira", - 1, 1, "", this), new Among("ie", - 1, 1, "", this), new Among("isse", - 1, 1, "", this), new Among("issante", - 1, 1, "", this), new Among("i", - 1, 1, "", this), new Among("irai", 4, 1, "", this), new Among("ir", - 1, 1, "", this), new Among("iras", - 1, 1, "", this), new Among("ies", - 1, 1, "", this), new Among("\u00EEmes", - 1, 1, "", this), new Among("isses", - 1, 1, "", this), new Among("issantes", - 1, 1, "", this), new Among("\u00EEtes", - 1, 1, "", this), new Among("is", - 1, 1, "", this), new Among("irais", 13, 1, "", this), new Among("issais", 13, 1, "", this), new Among("irions", - 1, 1, "", this), new Among("issions", - 1, 1, "", this), new Among("irons", - 1, 1, "", this), new Among("issons", - 1, 1, "", this), new Among("issants", - 1, 1, "", this), new Among("it", - 1, 1, "", this), new Among("irait", 21, 1, "", this), new Among("issait", 21, 1, "", this), new Among("issant", - 1, 1, "", this), new Among("iraIent", - 1, 1, "", 
 this), new Among("issaIent", - 1, 1, "", this), new Among("irent", - 1, 1, "", this), new Among("issent", - 1, 1, "", this), new Among("iront", - 1, 1, "", this), new Among("\u00EEt", - 1, 1, "", this), new Among("iriez", - 1, 1, "", this), new Among("issiez", - 1, 1, "", this), new Among("irez", - 1, 1, "", this), new Among("issez", - 1, 1, "", this)};
-			a_5 = new Among[]{new Among("a", - 1, 3, "", this), new Among("era", 0, 2, "", this), new Among("asse", - 1, 3, "", this), new Among("ante", - 1, 3, "", this), new Among("\u00E9e", - 1, 2, "", this), new Among("ai", - 1, 3, "", this), new Among("erai", 5, 2, "", this), new Among("er", - 1, 2, "", this), new Among("as", - 1, 3, "", this), new Among("eras", 8, 2, "", this), new Among("\u00E2mes", - 1, 3, "", this), new Among("asses", - 1, 3, "", this), new Among("antes", - 1, 3, "", this), new Among("\u00E2tes", - 1, 3, "", this), new Among("\u00E9es", - 1, 2, "", this), new Among("ais", - 1, 3, "", this), new Among("erais", 15, 2, "", this), new Among("ions", - 1, 1, "", this), new Among("erions", 17, 2, "", this), new Among("assions", 17, 3, "", this), new Among("erons", - 1, 2, "", this), new Among("ants", - 1, 3, "", this), new Among("\u00E9s", - 1, 2, "", this), new Among("ait", - 1, 3, "", this), new Among("erait", 23, 2, "", this), new Among("ant", - 1, 3, "", this), new Amo
 ng("aIent", - 1, 3, "", this), new Among("eraIent", 26, 2, "", this), new Among("\u00E8rent", - 1, 2, "", this), new Among("assent", - 1, 3, "", this), new Among("eront", - 1, 2, "", this), new Among("\u00E2t", - 1, 3, "", this), new Among("ez", - 1, 2, "", this), new Among("iez", 32, 2, "", this), new Among("eriez", 33, 2, "", this), new Among("assiez", 33, 3, "", this), new Among("erez", 32, 2, "", this), new Among("\u00E9", - 1, 2, "", this)};
-			a_6 = new Among[]{new Among("e", - 1, 3, "", this), new Among("I\u00E8re", 0, 2, "", this), new Among("i\u00E8re", 0, 2, "", this), new Among("ion", - 1, 1, "", this), new Among("Ier", - 1, 2, "", this), new Among("ier", - 1, 2, "", this), new Among("\u00EB", - 1, 4, "", this)};
-			a_7 = new Among[]{new Among("ell", - 1, - 1, "", this), new Among("eill", - 1, - 1, "", this), new Among("enn", - 1, - 1, "", this), new Among("onn", - 1, - 1, "", this), new Among("ett", - 1, - 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (130), (char) (103), (char) (8), (char) (5)};
-		private static readonly char[] g_keep_with_s = new char[]{(char) (1), (char) (65), (char) (20), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
-		
-		private int I_p2;
-		private int I_p1;
-		private int I_pV;
-		protected internal virtual void  copy_from(FrenchStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			I_pV = other.I_pV;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// repeat, line 38
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// goto, line 38
-					while (true)
-					{
-						v_2 = cursor;
-						do 
-						{
-							// (, line 38
-							// or, line 44
-							do 
-							{
-								v_3 = cursor;
-								do 
-								{
-									// (, line 40
-									if (!(in_grouping(g_v, 97, 251)))
-									{
-										goto lab5_brk;
-									}
-									// [, line 40
-									bra = cursor;
-									// or, line 40
-									do 
-									{
-										v_4 = cursor;
-										do 
-										{
-											// (, line 40
-											// literal, line 40
-											if (!(eq_s(1, "u")))
-											{
-												goto lab7_brk;
-											}
-											// ], line 40
-											ket = cursor;
-											if (!(in_grouping(g_v, 97, 251)))
-											{
-												goto lab7_brk;
-											}
-											// <-, line 40
-											slice_from("U");
-											goto lab6_brk;
-										}
-										while (false);
+    public class FrenchStemmer : SnowballProgram
+    {
+        public FrenchStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 4, "", this), new Among("I", 0, 1, "", this), new Among("U", 0, 2, "", this), new Among("Y", 0, 3, "", this)};
+            a_1 = new Among[]{new Among("iqU", - 1, 3, "", this), new Among("abl", - 1, 3, "", this), new Among("I\u00E8r", - 1, 4, "", this), new Among("i\u00E8r", - 1, 4, "", this), new Among("eus", - 1, 2, "", this), new Among("iv", - 1, 1, "", this)};
+            a_2 = new Among[]{new Among("ic", - 1, 2, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 3, "", this)};
+            a_3 = new Among[]{new Among("iqUe", - 1, 1, "", this), new Among("atrice", - 1, 2, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 5, "", this), new Among("logie", - 1, 3, "", this), new Among("able", - 1, 1, "", this), new Among("isme", - 1, 1, "", this), new Among("euse", - 1, 11, "", this), new Among("iste", - 1, 1, "", this), new Among("ive", - 1, 8, "", this), new Among("if", - 1, 8, "", this), new Among("usion", - 1, 4, "", this), new Among("ation", - 1, 2, "", this), new Among("ution", - 1, 4, "", this), new Among("ateur", - 1, 2, "", this), new Among("iqUes", - 1, 1, "", this), new Among("atrices", - 1, 2, "", this), new Among("ances", - 1, 1, "", this), new Among("ences", - 1, 5, "", this), new Among("logies", - 1, 3, "", this), new Among("ables", - 1, 1, "", this), new Among("ismes", - 1, 1, "", this), new Among("euses", - 1, 11, "", this), new Among("istes", - 1, 1, "", this), new Among("ives", - 1, 8, "", this), new Among("ifs", - 1, 8,
  "", this), new Among("usions", - 1, 4, "", this), new Among("ations", - 1, 2, "", this), new Among("utions", - 1, 4, "", this), new Among("ateurs", - 1, 2, "", this), new Among("ments", - 1, 15, "", this), new Among("ements", 30, 6, "", this), new Among("issements", 31, 12, "", this), new Among("it\u00E9s", - 1, 7, "", this), new Among("ment", - 1, 15, "", this), new Among("ement", 34, 6, "", this), new Among("issement", 35, 12, "", this), new Among("amment", 34, 13, "", this), new Among("emment", 34, 14, "", this), new Among("aux", - 1, 10, "", this), new Among("eaux", 39, 9, "", this), new Among("eux", - 1, 1, "", this), new Among("it\u00E9", - 1, 7, "", this)};
+            a_4 = new Among[]{new Among("ira", - 1, 1, "", this), new Among("ie", - 1, 1, "", this), new Among("isse", - 1, 1, "", this), new Among("issante", - 1, 1, "", this), new Among("i", - 1, 1, "", this), new Among("irai", 4, 1, "", this), new Among("ir", - 1, 1, "", this), new Among("iras", - 1, 1, "", this), new Among("ies", - 1, 1, "", this), new Among("\u00EEmes", - 1, 1, "", this), new Among("isses", - 1, 1, "", this), new Among("issantes", - 1, 1, "", this), new Among("\u00EEtes", - 1, 1, "", this), new Among("is", - 1, 1, "", this), new Among("irais", 13, 1, "", this), new Among("issais", 13, 1, "", this), new Among("irions", - 1, 1, "", this), new Among("issions", - 1, 1, "", this), new Among("irons", - 1, 1, "", this), new Among("issons", - 1, 1, "", this), new Among("issants", - 1, 1, "", this), new Among("it", - 1, 1, "", this), new Among("irait", 21, 1, "", this), new Among("issait", 21, 1, "", this), new Among("issant", - 1, 1, "", this), new Among("iraIent", - 1
 , 1, "", this), new Among("issaIent", - 1, 1, "", this), new Among("irent", - 1, 1, "", this), new Among("issent", - 1, 1, "", this), new Among("iront", - 1, 1, "", this), new Among("\u00EEt", - 1, 1, "", this), new Among("iriez", - 1, 1, "", this), new Among("issiez", - 1, 1, "", this), new Among("irez", - 1, 1, "", this), new Among("issez", - 1, 1, "", this)};
+            a_5 = new Among[]{new Among("a", - 1, 3, "", this), new Among("era", 0, 2, "", this), new Among("asse", - 1, 3, "", this), new Among("ante", - 1, 3, "", this), new Among("\u00E9e", - 1, 2, "", this), new Among("ai", - 1, 3, "", this), new Among("erai", 5, 2, "", this), new Among("er", - 1, 2, "", this), new Among("as", - 1, 3, "", this), new Among("eras", 8, 2, "", this), new Among("\u00E2mes", - 1, 3, "", this), new Among("asses", - 1, 3, "", this), new Among("antes", - 1, 3, "", this), new Among("\u00E2tes", - 1, 3, "", this), new Among("\u00E9es", - 1, 2, "", this), new Among("ais", - 1, 3, "", this), new Among("erais", 15, 2, "", this), new Among("ions", - 1, 1, "", this), new Among("erions", 17, 2, "", this), new Among("assions", 17, 3, "", this), new Among("erons", - 1, 2, "", this), new Among("ants", - 1, 3, "", this), new Among("\u00E9s", - 1, 2, "", this), new Among("ait", - 1, 3, "", this), new Among("erait", 23, 2, "", this), new Among("ant", - 1, 3, "", this)
 , new Among("aIent", - 1, 3, "", this), new Among("eraIent", 26, 2, "", this), new Among("\u00E8rent", - 1, 2, "", this), new Among("assent", - 1, 3, "", this), new Among("eront", - 1, 2, "", this), new Among("\u00E2t", - 1, 3, "", this), new Among("ez", - 1, 2, "", this), new Among("iez", 32, 2, "", this), new Among("eriez", 33, 2, "", this), new Among("assiez", 33, 3, "", this), new Among("erez", 32, 2, "", this), new Among("\u00E9", - 1, 2, "", this)};
+            a_6 = new Among[]{new Among("e", - 1, 3, "", this), new Among("I\u00E8re", 0, 2, "", this), new Among("i\u00E8re", 0, 2, "", this), new Among("ion", - 1, 1, "", this), new Among("Ier", - 1, 2, "", this), new Among("ier", - 1, 2, "", this), new Among("\u00EB", - 1, 4, "", this)};
+            a_7 = new Among[]{new Among("ell", - 1, - 1, "", this), new Among("eill", - 1, - 1, "", this), new Among("enn", - 1, - 1, "", this), new Among("onn", - 1, - 1, "", this), new Among("ett", - 1, - 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (130), (char) (103), (char) (8), (char) (5)};
+        private static readonly char[] g_keep_with_s = new char[]{(char) (1), (char) (65), (char) (20), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128)};
+        
+        private int I_p2;
+        private int I_p1;
+        private int I_pV;
+        protected internal virtual void  copy_from(FrenchStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            I_pV = other.I_pV;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // repeat, line 38
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // goto, line 38
+                    while (true)
+                    {
+                        v_2 = cursor;
+                        do 
+                        {
+                            // (, line 38
+                            // or, line 44
+                            do 
+                            {
+                                v_3 = cursor;
+                                do 
+                                {
+                                    // (, line 40
+                                    if (!(in_grouping(g_v, 97, 251)))
+                                    {
+                                        goto lab5_brk;
+                                    }
+                                    // [, line 40
+                                    bra = cursor;
+                                    // or, line 40
+                                    do 
+                                    {
+                                        v_4 = cursor;
+                                        do 
+                                        {
+                                            // (, line 40
+                                            // literal, line 40
+                                            if (!(eq_s(1, "u")))
+                                            {
+                                                goto lab7_brk;
+                                            }
+                                            // ], line 40
+                                            ket = cursor;
+                                            if (!(in_grouping(g_v, 97, 251)))
+                                            {
+                                                goto lab7_brk;
+                                            }
+                                            // <-, line 40
+                                            slice_from("U");
+                                            goto lab6_brk;
+                                        }
+                                        while (false);
 
 lab7_brk: ;
-										
-										cursor = v_4;
-										do 
-										{
-											// (, line 41
-											// literal, line 41
-											if (!(eq_s(1, "i")))
-											{
-												goto lab8_brk;
-											}
-											// ], line 41
-											ket = cursor;
-											if (!(in_grouping(g_v, 97, 251)))
-											{
-												goto lab8_brk;
-											}
-											// <-, line 41
-											slice_from("I");
-											goto lab6_brk;
-										}
-										while (false);
+                                        
+                                        cursor = v_4;
+                                        do 
+                                        {
+                                            // (, line 41
+                                            // literal, line 41
+                                            if (!(eq_s(1, "i")))
+                                            {
+                                                goto lab8_brk;
+                                            }
+                                            // ], line 41
+                                            ket = cursor;
+                                            if (!(in_grouping(g_v, 97, 251)))
+                                            {
+                                                goto lab8_brk;
+                                            }
+                                            // <-, line 41
+                                            slice_from("I");
+                                            goto lab6_brk;
+                                        }
+                                        while (false);
 
 lab8_brk: ;
-										
-										cursor = v_4;
-										// (, line 42
-										// literal, line 42
-										if (!(eq_s(1, "y")))
-										{
-											goto lab5_brk;
-										}
-										// ], line 42
-										ket = cursor;
-										// <-, line 42
-										slice_from("Y");
-									}
-									while (false);
+                                        
+                                        cursor = v_4;
+                                        // (, line 42
+                                        // literal, line 42
+                                        if (!(eq_s(1, "y")))
+                                        {
+                                            goto lab5_brk;
+                                        }
+                                        // ], line 42
+                                        ket = cursor;
+                                        // <-, line 42
+                                        slice_from("Y");
+                                    }
+                                    while (false);
 
 lab6_brk: ;
-									
-									goto lab4_brk;
-								}
-								while (false);
+                                    
+                                    goto lab4_brk;
+                                }
+                                while (false);
 
 lab5_brk: ;
-								
-								cursor = v_3;
-								do 
-								{
-									// (, line 45
-									// [, line 45
-									bra = cursor;
-									// literal, line 45
-									if (!(eq_s(1, "y")))
-									{
-										goto lab9_brk;
-									}
-									// ], line 45
-									ket = cursor;
-									if (!(in_grouping(g_v, 97, 251)))
-									{
-										goto lab9_brk;
-									}
-									// <-, line 45
-									slice_from("Y");
-									goto lab4_brk;
-								}
-								while (false);
+                                
+                                cursor = v_3;
+                                do 
+                                {
+                                    // (, line 45
+                                    // [, line 45
+                                    bra = cursor;
+                                    // literal, line 45
+                                    if (!(eq_s(1, "y")))
+                                    {
+                                        goto lab9_brk;
+                                    }
+                                    // ], line 45
+                                    ket = cursor;
+                                    if (!(in_grouping(g_v, 97, 251)))
+                                    {
+                                        goto lab9_brk;
+                                    }
+                                    // <-, line 45
+                                    slice_from("Y");
+                                    goto lab4_brk;
+                                }
+                                while (false);
 
 lab9_brk: ;
-								
-								cursor = v_3;
-								// (, line 47
-								// literal, line 47
-								if (!(eq_s(1, "q")))
-								{
-									goto lab3_brk;
-								}
-								// [, line 47
-								bra = cursor;
-								// literal, line 47
-								if (!(eq_s(1, "u")))
-								{
-									goto lab3_brk;
-								}
-								// ], line 47
-								ket = cursor;
-								// <-, line 47
-								slice_from("U");
-							}
-							while (false);
+                                
+                                cursor = v_3;
+                                // (, line 47
+                                // literal, line 47
+                                if (!(eq_s(1, "q")))
+                                {
+                                    goto lab3_brk;
+                                }
+                                // [, line 47
+                                bra = cursor;
+                                // literal, line 47
+                                if (!(eq_s(1, "u")))
+                                {
+                                    goto lab3_brk;
+                                }
+                                // ], line 47
+                                ket = cursor;
+                                // <-, line 47
+                                slice_from("U");
+                            }
+                            while (false);
 
 lab4_brk: ;
-							
-							cursor = v_2;
-							goto golab2_brk;
-						}
-						while (false);
+                            
+                            cursor = v_2;
+                            goto golab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = v_2;
-						if (cursor >= limit)
-						{
-							goto lab1_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_2;
+                        if (cursor >= limit)
+                        {
+                            goto lab1_brk;
+                        }
+                        cursor++;
+                    }
 
 golab2_brk: ;
-					
-					goto replab0;
-				}
-				while (false);
+                    
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_1;
-				goto replab0_brk;
+                
+                cursor = v_1;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			int v_2;
-			int v_4;
-			// (, line 50
-			I_pV = limit;
-			I_p1 = limit;
-			I_p2 = limit;
-			// do, line 56
-			v_1 = cursor;
-			do 
-			{
-				// (, line 56
-				// or, line 57
-				do 
-				{
-					v_2 = cursor;
-					do 
-					{
-						// (, line 57
-						if (!(in_grouping(g_v, 97, 251)))
-						{
-							goto lab2_brk;
-						}
-						if (!(in_grouping(g_v, 97, 251)))
-						{
-							goto lab2_brk;
-						}
-						// next, line 57
-						if (cursor >= limit)
-						{
-							goto lab2_brk;
-						}
-						cursor++;
-						goto lab1_brk;
-					}
-					while (false);
+            
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            int v_2;
+            int v_4;
+            // (, line 50
+            I_pV = limit;
+            I_p1 = limit;
+            I_p2 = limit;
+            // do, line 56
+            v_1 = cursor;
+            do 
+            {
+                // (, line 56
+                // or, line 57
+                do 
+                {
+                    v_2 = cursor;
+                    do 
+                    {
+                        // (, line 57
+                        if (!(in_grouping(g_v, 97, 251)))
+                        {
+                            goto lab2_brk;
+                        }
+                        if (!(in_grouping(g_v, 97, 251)))
+                        {
+                            goto lab2_brk;
+                        }
+                        // next, line 57
+                        if (cursor >= limit)
+                        {
+                            goto lab2_brk;
+                        }
+                        cursor++;
+                        goto lab1_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = v_2;
-					// (, line 57
-					// next, line 57
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-					// gopast, line 57
-					while (true)
-					{
-						do 
-						{
-							if (!(in_grouping(g_v, 97, 251)))
-							{
-								goto lab4_brk;
-							}
-							goto golab3_brk;
-						}
-						while (false);
+                    
+                    cursor = v_2;
+                    // (, line 57
+                    // next, line 57
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                    // gopast, line 57
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(in_grouping(g_v, 97, 251)))
+                            {
+                                goto lab4_brk;
+                            }
+                            goto golab3_brk;
+                        }
+                        while (false);
 
 lab4_brk: ;
-						
-						if (cursor >= limit)
-						{
-							goto lab0_brk;
-						}
-						cursor++;
-					}
+                        
+                        if (cursor >= limit)
+                        {
+                            goto lab0_brk;
+                        }
+                        cursor++;
+                    }
 
 golab3_brk: ;
-					
-				}
-				while (false);
+                    
+                }
+                while (false);
 
 lab1_brk: ;
-				// setmark pV, line 58
-				I_pV = cursor;
-			}
-			while (false);
+                // setmark pV, line 58
+                I_pV = cursor;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 60
-			v_4 = cursor;
-			do 
-			{
-				// (, line 60
-				// gopast, line 61
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 251)))
-						{
-							goto lab7_brk;
-						}
-						goto golab6_brk;
-					}
-					while (false);
+            
+            cursor = v_1;
+            // do, line 60
+            v_4 = cursor;
+            do 
+            {
+                // (, line 60
+                // gopast, line 61
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 251)))
+                        {
+                            goto lab7_brk;
+                        }
+                        goto golab6_brk;
+                    }
+                    while (false);
 
 lab7_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab5_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab5_brk;
+                    }
+                    cursor++;
+                }
 
 golab6_brk: ;
-				
-				// gopast, line 61
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 251)))
-						{
-							goto lab9_brk;
-						}
-						goto golab8_brk;
-					}
-					while (false);
+                
+                // gopast, line 61
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 251)))
+                        {
+                            goto lab9_brk;
+                        }
+                        goto golab8_brk;
+                    }
+                    while (false);
 
 lab9_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab5_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab5_brk;
+                    }
+                    cursor++;
+                }
 
 golab8_brk: ;
-				
-				// setmark p1, line 61
-				I_p1 = cursor;
-				// gopast, line 62
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 251)))
-						{
-							goto lab11_brk;
-						}
-						goto golab10_brk;
-					}
-					while (false);
+                
+                // setmark p1, line 61
+                I_p1 = cursor;
+                // gopast, line 62
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 251)))
+                        {
+                            goto lab11_brk;
+                        }
+                        goto golab10_brk;
+                    }
+                    while (false);
 
 lab11_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab5_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab5_brk;
+                    }
+                    cursor++;
+                }
 
 golab10_brk: ;
-				
-				// gopast, line 62
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 251)))
-						{
-							goto lab13_brk;
-						}
-						goto golab12_brk;
-					}
-					while (false);
+                
+                // gopast, line 62
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 251)))
+                        {
+                            goto lab13_brk;
+                        }
+                        goto golab12_brk;
+                    }
+                    while (false);
 
 lab13_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab5_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab5_brk;
+                    }
+                    cursor++;
+                }
 
 golab12_brk: ;
-				
-				// setmark p2, line 62
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 62
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab5_brk: ;
-			
-			cursor = v_4;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 66
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 66
-					// [, line 68
-					bra = cursor;
-					// substring, line 68
-					among_var = find_among(a_0, 4);
-					if (among_var == 0)
-					{
-						goto lab10_brk;
-					}
-					// ], line 68
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab10_brk;
-						
-						case 1: 
-							// (, line 69
-							// <-, line 69
-							slice_from("i");
-							break;
-						
-						case 2: 
-							// (, line 70
-							// <-, line 70
-							slice_from("u");
-							break;
-						
-						case 3: 
-							// (, line 71
-							// <-, line 71
-							slice_from("y");
-							break;
-						
-						case 4: 
-							// (, line 72
-							// next, line 72
-							if (cursor >= limit)
-							{
-								goto lab10_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab1;
-				}
-				while (false);
+            
+            cursor = v_4;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 66
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 66
+                    // [, line 68
+                    bra = cursor;
+                    // substring, line 68
+                    among_var = find_among(a_0, 4);
+                    if (among_var == 0)
+                    {
+                        goto lab10_brk;
+                    }
+                    // ], line 68
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab10_brk;
+                        
+                        case 1: 
+                            // (, line 69
+                            // <-, line 69
+                            slice_from("i");
+                            break;
+                        
+                        case 2: 
+                            // (, line 70
+                            // <-, line 70
+                            slice_from("u");
+                            break;
+                        
+                        case 3: 
+                            // (, line 71
+                            // <-, line 71
+                            slice_from("y");
+                            break;
+                        
+                        case 4: 
+                            // (, line 72
+                            // next, line 72
+                            if (cursor >= limit)
+                            {
+                                goto lab10_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab1;
+                }
+                while (false);
 
 lab10_brk: ;
-				
-				cursor = v_1;
-				goto replab1_brk;
+                
+                cursor = v_1;
+                goto replab1_brk;
 
 replab1: ;
-			}
+            }
 
 replab1_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_RV()
-		{
-			if (!(I_pV <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			int v_10;
-			int v_11;
-			// (, line 82
-			// [, line 83
-			ket = cursor;
-			// substring, line 83
-			among_var = find_among_b(a_3, 43);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 83
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 87
-					// call R2, line 87
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 87
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 90
-					// call R2, line 90
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 90
-					slice_del();
-					// try, line 91
-					v_1 = limit - cursor;
-					do 
-					{
-						// (, line 91
-						// [, line 91
-						ket = cursor;
-						// literal, line 91
-						if (!(eq_s_b(2, "ic")))
-						{
-							cursor = limit - v_1;
-							goto lab0_brk;
-						}
-						// ], line 91
-						bra = cursor;
-						// or, line 91
-						do 
-						{
-							v_2 = limit - cursor;
-							do 
-							{
-								// (, line 91
-								// call R2, line 91
-								if (!r_R2())
-								{
-									goto lab2_brk;
-								}
-								// delete, line 91
-								slice_del();
-								goto lab1_brk;
-							}
-							while (false);
+            
+            return true;
+        }
+        
+        private bool r_RV()
+        {
+            if (!(I_pV <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            int v_10;
+            int v_11;
+            // (, line 82
+            // [, line 83
+            ket = cursor;
+            // substring, line 83
+            among_var = find_among_b(a_3, 43);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 83
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 87
+                    // call R2, line 87
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 87
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 90
+                    // call R2, line 90
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 90
+                    slice_del();
+                    // try, line 91
+                    v_1 = limit - cursor;
+                    do 
+                    {
+                        // (, line 91
+                        // [, line 91
+                        ket = cursor;
+                        // literal, line 91
+                        if (!(eq_s_b(2, "ic")))
+                        {
+                            cursor = limit - v_1;
+                            goto lab0_brk;
+                        }
+                        // ], line 91
+                        bra = cursor;
+                        // or, line 91
+                        do 
+                        {
+                            v_2 = limit - cursor;
+                            do 
+                            {
+                                // (, line 91
+                                // call R2, line 91
+                                if (!r_R2())
+                                {
+                                    goto lab2_brk;
+                                }
+                                // delete, line 91
+                                slice_del();
+                                goto lab1_brk;
+                            }
+                            while (false);
 
 lab2_brk: ;
-							
-							cursor = limit - v_2;
-							// <-, line 91
-							slice_from("iqU");
-						}
-						while (false);
+                            
+                            cursor = limit - v_2;
+                            // <-, line 91
+                            slice_from("iqU");
+                        }
+                        while (false);
 
 lab1_brk: ;
-					}
-					while (false);
+                    }
+                    while (false);
 
 lab0_brk: ;
-					
-					break;
-				
-				case 3: 
-					// (, line 95
-					// call R2, line 95
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 95
-					slice_from("log");
-					break;
-				
-				case 4: 
-					// (, line 98
-					// call R2, line 98
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 98
-					slice_from("u");
-					break;
-				
-				case 5: 
-					// (, line 101
-					// call R2, line 101
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 101
-					slice_from("ent");
-					break;
-				
-				case 6: 
-					// (, line 104
-					// call RV, line 105
-					if (!r_RV())
-					{
-						return false;
-					}
-					// delete, line 105
-					slice_del();
-					// try, line 106
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 106
-						// [, line 107
-						ket = cursor;
-						// substring, line 107
-						among_var = find_among_b(a_1, 6);
-						if (among_var == 0)
-						{
-							cursor = limit - v_3;
-							goto lab3_brk;
-						}
-						// ], line 107
-						bra = cursor;
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_3;
-								goto lab3_brk;
-							
-							case 1: 
-								// (, line 108
-								// call R2, line 108
-								if (!r_R2())
-								{
-									cursor = limit - v_3;
-									goto lab3_brk;
-								}
-								// delete, line 108
-								slice_del();
-								// [, line 108
-								ket = cursor;
-								// literal, line 108
-								if (!(eq_s_b(2, "at")))
-								{
-									cursor = limit - v_3;
-									goto lab3_brk;
-								}
-								// ], line 108
-								bra = cursor;
-								// call R2, line 108
-								if (!r_R2())
-								{
-									cursor = limit - v_3;
-									goto lab3_brk;
-								}
-								// delete, line 108
-								slice_del();
-								break;
-							
-							case 2: 
-								// (, line 109
-								// or, line 109
-								do 
-								{
-									v_4 = limit - cursor;
-									do 
-									{
-										// (, line 109
-										// call R2, line 109
-										if (!r_R2())
-										{
-											goto lab5_brk;
-										}
-										// delete, line 109
-										slice_del();
-										goto lab4_brk;
-									}
-									while (false);
+                    
+                    break;
+                
+                case 3: 
+                    // (, line 95
+                    // call R2, line 95
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 95
+                    slice_from("log");
+                    break;
+                
+                case 4: 
+                    // (, line 98
+                    // call R2, line 98
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 98
+                    slice_from("u");
+                    break;
+                
+                case 5: 
+                    // (, line 101
+                    // call R2, line 101
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 101
+                    slice_from("ent");
+                    break;
+                
+                case 6: 
+                    // (, line 104
+                    // call RV, line 105
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // delete, line 105
+                    slice_del();
+                    // try, line 106
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 106
+                        // [, line 107
+                        ket = cursor;
+                        // substring, line 107
+                        among_var = find_among_b(a_1, 6);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_3;
+                            goto lab3_brk;
+                        }
+                        // ], line 107
+                        bra = cursor;
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_3;
+                                goto lab3_brk;
+                            
+                            case 1: 
+                                // (, line 108
+                                // call R2, line 108
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab3_brk;
+                                }
+                                // delete, line 108
+                                slice_del();
+                                // [, line 108
+                                ket = cursor;
+                                // literal, line 108
+                                if (!(eq_s_b(2, "at")))
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab3_brk;
+                                }
+                                // ], line 108
+                                bra = cursor;
+                                // call R2, line 108
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab3_brk;
+                                }
+                                // delete, line 108
+                                slice_del();
+                                break;
+                            
+                            case 2: 
+                                // (, line 109
+                                // or, line 109
+                                do 
+                                {
+                                    v_4 = limit - cursor;
+                                    do 
+                                    {
+                                        // (, line 109
+                                        // call R2, line 109
+                                        if (!r_R2())
+                                        {
+                                            goto lab5_brk;
+                                        }
+                                        // delete, line 109
+                                        slice_del();
+                                        goto lab4_brk;
+                                    }
+                                    while (false);
 
 lab5_brk: ;
-									
-									cursor = limit - v_4;
-									// (, line 109
-									// call R1, line 109
-									if (!r_R1())
-									{
-										cursor = limit - v_3;
-										goto lab3_brk;
-									}
-									// <-, line 109
-									slice_from("eux");
-								}
-								while (false);
+                                    
+                                    cursor = limit - v_4;
+                                    // (, line 109
+                                    // call R1, line 109
+                                    if (!r_R1())
+                                    {
+                                        cursor = limit - v_3;
+                                        goto lab3_brk;
+                                    }
+                                    // <-, line 109
+                                    slice_from("eux");
+                                }
+                                while (false);
 
 lab4_brk: ;
-								
-								break;
-							
-							case 3: 
-								// (, line 111
-								// call R2, line 111
-								if (!r_R2())
-								{
-									cursor = limit - v_3;
-									goto lab3_brk;
-								}
-								// delete, line 111
-								slice_del();
-								break;
-							
-							case 4: 
-								// (, line 113
-								// call RV, line 113
-								if (!r_RV())
-								{
-									cursor = limit - v_3;
-									goto lab3_brk;
-								}
-								// <-, line 113
-								slice_from("i");
-								break;
-							}
-					}
-					while (false);
+                                
+                                break;
+                            
+                            case 3: 
+                                // (, line 111
+                                // call R2, line 111
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab3_brk;
+                                }
+                                // delete, line 111
+                                slice_del();
+                                break;
+                            
+                            case 4: 
+                                // (, line 113
+                                // call RV, line 113
+                                if (!r_RV())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab3_brk;
+                                }
+                                // <-, line 113
+                                slice_from("i");
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					break;
-				
-				case 7: 
-					// (, line 119
-					// call R2, line 120
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 120
-					slice_del();
-					// try, line 121
-					v_5 = limit - cursor;
-					do 
-					{
-						// (, line 121
-						// [, line 122
-						ket = cursor;
-						// substring, line 122
-						among_var = find_among_b(a_2, 3);
-						if (among_var == 0)
-						{
-							cursor = limit - v_5;
-							goto lab6_brk;
-						}
-						// ], line 122
-						bra = cursor;
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_5;
-								goto lab6_brk;
-							
-							case 1: 
-								// (, line 123
-								// or, line 123
-								do 
-								{
-									v_6 = limit - cursor;
-									do 
-									{
-										// (, line 123
-										// call R2, line 123
-										if (!r_R2())
-										{
-											goto lab8_brk;
-										}
-										// delete, line 123
-										slice_del();
-										goto lab7_brk;
-									}
-									while (false);
+                    
+                    break;
+                
+                case 7: 
+                    // (, line 119
+                    // call R2, line 120
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 120
+                    slice_del();
+                    // try, line 121
+                    v_5 = limit - cursor;
+                    do 
+                    {
+                        // (, line 121
+                        // [, line 122
+                        ket = cursor;
+                        // substring, line 122
+                        among_var = find_among_b(a_2, 3);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_5;
+                            goto lab6_brk;
+                        }
+                        // ], line 122
+                        bra = cursor;
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_5;
+                                goto lab6_brk;
+                            
+                            case 1: 
+                                // (, line 123
+                                // or, line 123
+                                do 
+                                {
+                                    v_6 = limit - cursor;
+                                    do 
+                                    {
+                                        // (, line 123
+                                        // call R2, line 123
+                                        if (!r_R2())
+                                        {
+                                            goto lab8_brk;
+                                        }
+                                        // delete, line 123
+                                        slice_del();
+                                        goto lab7_brk;
+                                    }
+                                    while (false);
 
 lab8_brk: ;
-									
-									cursor = limit - v_6;
-									// <-, line 123
-									slice_from("abl");
-								}
-								while (false);
+                                    
+                                    cursor = limit - v_6;
+                                    // <-, line 123
+                                    slice_from("abl");
+                                }
+                                while (false);
 
 lab7_brk: ;
-								break;
-							
-							case 2: 
-								// (, line 124
-								// or, line 124
-								do 
-								{
-									v_7 = limit - cursor;
-									do 
-									{
-										// (, line 124
-										// call R2, line 124
-										if (!r_R2())
-										{
-											goto lab10_brk;
-										}
-										// delete, line 124
-										slice_del();
-										goto lab9_brk;
-									}
-									while (false);
+                                break;
+                            
+                            case 2: 
+                                // (, line 124
+                                // or, line 124
+                                do 
+                                {
+                                    v_7 = limit - cursor;
+                                    do 
+                                    {
+                                        // (, line 124
+                                        // call R2, line 124
+                                        if (!r_R2())
+                                        {
+                                            goto lab10_brk;
+                                        }
+                                        // delete, line 124
+                                        slice_del();
+                                        goto lab9_brk;
+                                    }
+                                    while (false);
 
 lab10_brk: ;
-									
-									cursor = limit - v_7;
-									// <-, line 124
-									slice_from("iqU");
-								}
-								while (false);
+                                    
+                                    cursor = limit - v_7;
+                                    // <-, line 124
+                                    slice_from("iqU");
+                                }
+                                while (false);
 
 lab9_brk: ;
 
-								break;
-							
-							case 3: 
-								// (, line 125
-								// call R2, line 125
-								if (!r_R2())
-								{
-									cursor = limit - v_5;
-									goto lab6_brk;
-								}
-								// delete, line 125
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                                break;
+                            
+                            case 3: 
+                                // (, line 125
+                                // call R2, line 125
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_5;
+                                    goto lab6_brk;
+                                }
+                                // delete, line 125
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab6_brk: ;
-					
-					break;
-				
-				case 8: 
-					// (, line 131
-					// call R2, line 132
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 132
-					slice_del();
-					// try, line 133
-					v_8 = limit - cursor;
-					do 
-					{
-						// (, line 133
-						// [, line 133
-						ket = cursor;
-						// literal, line 133
-						if (!(eq_s_b(2, "at")))
-						{
-							cursor = limit - v_8;
-							goto lab11_brk;
-						}
-						// ], line 133
-						bra = cursor;
-						// call R2, line 133
-						if (!r_R2())
-						{
-							cursor = limit - v_8;
-							goto lab11_brk;
-						}
-						// delete, line 133
-						slice_del();
-						// [, line 133
-						ket = cursor;
-						// literal, line 133
-						if (!(eq_s_b(2, "ic")))
-						{
-							cursor = limit - v_8;
-							goto lab11_brk;
-						}
-						// ], line 133
-						bra = cursor;
-						// or, line 133
-						do 
-						{
-							v_9 = limit - cursor;
-							do 
-							{
-								// (, line 133
-								// call R2, line 133
-								if (!r_R2())
-								{
-									goto lab13_brk;
-								}
-								// delete, line 133
-								slice_del();
-								goto lab12_brk;
-							}
-							while (false);
+                    
+                    break;
+                
+                case 8: 
+                    // (, line 131
+                    // call R2, line 132
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 132
+                    slice_del();
+                    // try, line 133
+                    v_8 = limit - cursor;
+                    do 
+                    {
+                        // (, line 133
+                        // [, line 133
+                        ket = cursor;
+                        // literal, line 133
+                        if (!(eq_s_b(2, "at")))
+                        {
+                            cursor = limit - v_8;
+                            goto lab11_brk;
+                        }
+                        // ], line 133
+                        bra = cursor;
+                        // call R2, line 133
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_8;
+                            goto lab11_brk;
+                        }
+                        // delete, line 133
+                        slice_del();
+                        // [, line 133
+                        ket = cursor;
+                        // literal, line 133
+                        if (!(eq_s_b(2, "ic")))
+                        {
+                            cursor = limit - v_8;
+                            goto lab11_brk;
+                        }
+                        // ], line 133
+                        bra = cursor;
+                        // or, line 133
+                        do 
+                        {
+                            v_9 = limit - cursor;
+                            do 
+                            {
+                                // (, line 133
+                                // call R2, line 133
+                                if (!r_R2())
+                                {
+                                    goto lab13_brk;
+                                }
+                                // delete, line 133
+                                slice_del();
+                                goto lab12_brk;
+                            }
+                            while (false);
 
 lab13_brk: ;
-							
-							cursor = limit - v_9;
-							// <-, line 133
-							slice_from("iqU");
-						}
-						while (false);
+                            
+                            cursor = limit - v_9;
+                            // <-, line 133
+                            slice_from("iqU");
+                        }
+                        while (false);
 
 lab12_brk: ;
-						
-					}
-					while (false);
+                        
+                    }
+                    while (false);
 
 lab11_brk: ;
-					
-					break;
-				
-				case 9: 
-					// (, line 135
-					// <-, line 135
-					slice_from("eau");
-					break;
-				
-				case 10: 
-					// (, line 136
-					// call R1, line 136
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 136
-					slice_from("al");
-					break;
-				
-				case 11: 
-					// (, line 138
-					// or, line 138
-					do 
-					{
-						v_10 = limit - cursor;
-						do 
-						{
-							// (, line 138
-							// call R2, line 138
-							if (!r_R2())
-							{
-								goto lab15_brk;
-							}
-							// delete, line 138
-							slice_del();
-							goto lab14_brk;
-						}
-						while (false);
+                    
+                    break;
+                
+                case 9: 
+                    // (, line 135
+                    // <-, line 135
+                    slice_from("eau");
+                    break;
+                
+                case 10: 
+                    // (, line 136
+                    // call R1, line 136
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 136
+                    slice_from("al");
+                    break;
+                
+                case 11: 
+                    // (, line 138
+                    // or, line 138
+                    do 
+                    {
+                        v_10 = limit - cursor;
+                        do 
+                        {
+                            // (, line 138
+                            // call R2, line 138
+                            if (!r_R2())
+                            {
+                                goto lab15_brk;
+                            }
+                            // delete, line 138
+                            slice_del();
+                            goto lab14_brk;
+                        }
+                        while (false);
 
 lab15_brk: ;
-						
-						cursor = limit - v_10;
-						// (, line 138
-						// call R1, line 138
-						if (!r_R1())
-						{
-							return false;
-						}
-						// <-, line 138
-						slice_from("eux");
-					}
-					while (false);
+                        
+                        cursor = limit - v_10;
+                        // (, line 138
+                        // call R1, line 138
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // <-, line 138
+                        slice_from("eux");
+                    }
+                    while (false);
 
 lab14_brk: ;
-					
-					break;
-				
-				case 12: 
-					// (, line 141
-					// call R1, line 141
-					if (!r_R1())
-					{
-						return false;
-					}
-					if (!(out_grouping_b(g_v, 97, 251)))
-					{
-						return false;
-					}
-					// delete, line 141
-					slice_del();
-					break;
-				
-				case 13: 
-					// (, line 146
-					// call RV, line 146
-					if (!r_RV())
-					{
-						return false;
-					}
-					// fail, line 146
-					// (, line 146
-					// <-, line 146
-					slice_from("ant");
-					return false;
-				
-				case 14: 
-					// (, line 147
-					// call RV, line 147
-					if (!r_RV())
-					{
-						return false;
-					}
-					// fail, line 147
-					// (, line 147
-					// <-, line 147
-					slice_from("ent");
-					return false;
-				
-				case 15: 
-					// (, line 149
-					// test, line 149
-					v_11 = limit - cursor;
-					// (, line 149
-					if (!(in_grouping_b(g_v, 97, 251)))
-					{
-						return false;
-					}
-					// call RV, line 149
-					if (!r_RV())
-					{
-						return false;
-					}
-					cursor = limit - v_11;
-					// fail, line 149
-					// (, line 149
-					// delete, line 149
-					slice_del();
-					return false;
-				}
-			return true;
-		}
-		
-		private bool r_i_verb_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// setlimit, line 154
-			v_1 = limit - cursor;
-			// tomark, line 154
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 154
-			// [, line 155
-			ket = cursor;
-			// substring, line 155
-			among_var = find_among_b(a_4, 35);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 155
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					limit_backward = v_2;
-					return false;
-				
-				case 1: 
-					// (, line 161
-					if (!(out_grouping_b(g_v, 97, 251)))
-					{
-						limit_backward = v_2;
-						return false;
-					}
-					// delete, line 161
-					slice_del();
-					break;
-				}
-			limit_backward = v_2;
-			return true;
-		}
-		
-		private bool r_verb_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			// setlimit, line 165
-			v_1 = limit - cursor;
-			// tomark, line 165
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 165
-			// [, line 166
-			ket = cursor;
-			// substring, line 166
-			among_var = find_among_b(a_5, 38);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 166
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					limit_backward = v_2;
-					return false;
-				
-				case 1: 
-					// (, line 168
-					// call R2, line 168
-					if (!r_R2())
-					{
-						limit_backward = v_2;
-						return false;
-					}
-					// delete, line 168
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 176
-					// delete, line 176
-					slice_del();
-					break;
-				
-				case 3: 
-					// (, line 181
-					// delete, line 181
-					slice_del();
-					// try, line 182
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 182
-						// [, line 182
-						ket = cursor;
-						// literal, line 182
-						if (!(eq_s_b(1, "e")))
-						{
-							cursor = limit - v_3;
-							goto lab16_brk;
-						}
-						// ], line 182
-						bra = cursor;
-						// delete, line 182
-						slice_del();
-					}
-					while (false);
+                    
+                    break;
+                
+                case 12: 
+                    // (, line 141
+                    // call R1, line 141
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    if (!(out_grouping_b(g_v, 97, 251)))
+                    {
+                        return false;
+                    }
+                    // delete, line 141
+                    slice_del();
+                    break;
+                
+                case 13: 
+                    // (, line 146
+                    // call RV, line 146
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // fail, line 146
+                    // (, line 146
+                    // <-, line 146
+                    slice_from("ant");
+                    return false;
+                
+                case 14: 
+                    // (, line 147
+                    // call RV, line 147
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // fail, line 147
+                    // (, line 147
+                    // <-, line 147
+                    slice_from("ent");
+                    return false;
+                
+                case 15: 
+                    // (, line 149
+                    // test, line 149
+                    v_11 = limit - cursor;
+                    // (, line 149
+                    if (!(in_grouping_b(g_v, 97, 251)))
+                    {
+                        return false;
+                    }
+                    // call RV, line 149
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    cursor = limit - v_11;
+                    // fail, line 149
+                    // (, line 149
+                    // delete, line 149
+                    slice_del();
+                    return false;
+                }
+            return true;
+        }
+        
+        private bool r_i_verb_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // setlimit, line 154
+            v_1 = limit - cursor;
+            // tomark, line 154
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 154
+            // [, line 155
+            ket = cursor;
+            // substring, line 155
+            among_var = find_among_b(a_4, 35);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 155
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    limit_backward = v_2;
+                    return false;
+                
+                case 1: 
+                    // (, line 161
+                    if (!(out_grouping_b(g_v, 97, 251)))
+                    {
+                        limit_backward = v_2;
+                        return false;
+                    }
+                    // delete, line 161
+                    slice_del();
+                    break;
+                }
+            limit_backward = v_2;
+            return true;
+        }
+        
+        private bool r_verb_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            // setlimit, line 165
+            v_1 = limit - cursor;
+            // tomark, line 165
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 165
+            // [, line 166
+            ket = cursor;
+            // substring, line 166
+            among_var = find_among_b(a_5, 38);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 166
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    limit_backward = v_2;
+                    return false;
+                
+                case 1: 
+                    // (, line 168
+                    // call R2, line 168
+                    if (!r_R2())
+                    {
+                        limit_backward = v_2;
+                        return false;
+                    }
+                    // delete, line 168
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 176
+                    // delete, line 176
+                    slice_del();
+                    break;
+                
+                case 3: 
+                    // (, line 181
+                    // delete, line 181
+                    slice_del();
+                    // try, line 182
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 182
+                        // [, line 182
+                        ket = cursor;
+                        // literal, line 182
+                        if (!(eq_s_b(1, "e")))
+                        {
+                            cursor = limit - v_3;
+                            goto lab16_brk;
+                        }
+                        // ], line 182
+                        bra = cursor;
+                        // delete, line 182
+                        slice_del();
+                    }
+                    while (false);
 
 lab16_brk: ;
-					
-					break;
-				}
-			limit_backward = v_2;
-			return true;
-		}
-		
-		private bool r_residual_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 189
-			// try, line 190
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 190
-				// [, line 190
-				ket = cursor;
-				// literal, line 190
-				if (!(eq_s_b(1, "s")))
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// ], line 190
-				bra = cursor;
-				// test, line 190
-				v_2 = limit - cursor;
-				if (!(out_grouping_b(g_keep_with_s, 97, 232)))
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				cursor = limit - v_2;
-				// delete, line 190
-				slice_del();
-			}
-			while (false);
+                    
+                    break;
+                }
+            limit_backward = v_2;
+            return true;
+        }
+        
+        private bool r_residual_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 189
+            // try, line 190
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 190
+                // [, line 190
+                ket = cursor;
+                // literal, line 190
+                if (!(eq_s_b(1, "s")))
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // ], line 190
+                bra = cursor;
+                // test, line 190
+                v_2 = limit - cursor;
+                if (!(out_grouping_b(g_keep_with_s, 97, 232)))
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                cursor = limit - v_2;
+                // delete, line 190
+                slice_del();
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			// setlimit, line 191
-			v_3 = limit - cursor;
-			// tomark, line 191
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_4 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_3;
-			// (, line 191
-			// [, line 192
-			ket = cursor;
-			// substring, line 192
-			among_var = find_among_b(a_6, 7);
-			if (among_var == 0)
-			{
-				limit_backward = v_4;
-				return false;
-			}
-			// ], line 192
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					limit_backward = v_4;
-					return false;
-				
-				case 1: 
-					// (, line 193
-					// call R2, line 193
-					if (!r_R2())
-					{
-						limit_backward = v_4;
-						return false;
-					}
-					// or, line 193
-					do 
-					{
-						v_5 = limit - cursor;
-						do 
-						{
-							// literal, line 193
-							if (!(eq_s_b(1, "s")))
-							{
-								goto lab2_brk;
-							}
-							goto lab1_brk;
-						}
-						while (false);
+            
+            // setlimit, line 191
+            v_3 = limit - cursor;
+            // tomark, line 191
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_4 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_3;
+            // (, line 191
+            // [, line 192
+            ket = cursor;
+            // substring, line 192
+            among_var = find_among_b(a_6, 7);
+            if (among_var == 0)
+            {
+                limit_backward = v_4;
+                return false;
+            }
+            // ], line 192
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    limit_backward = v_4;
+                    return false;
+                
+                case 1: 
+                    // (, line 193
+                    // call R2, line 193
+                    if (!r_R2())
+                    {
+                        limit_backward = v_4;
+                        return false;
+                    }
+                    // or, line 193
+                    do 
+                    {
+                        v_5 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 193
+                            if (!(eq_s_b(1, "s")))
+                            {
+                                goto lab2_brk;
+                            }
+                            goto lab1_brk;
+                        }
+                        while (false);
 
 lab2_brk: ;
-						
-						cursor = limit - v_5;
-						// literal, line 193
-						if (!(eq_s_b(1, "t")))
-						{
-							limit_backward = v_4;
-							return false;
-						}
-					}
-					while (false);
+                        
+                        cursor = limit - v_5;
+                        // literal, line 193
+                        if (!(eq_s_b(1, "t")))
+                        {
+                            limit_backward = v_4;
+                            return false;
+                        }
+                    }
+                    while (false);
 
 lab1_brk: ;
 
-					// delete, line 193
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 195
-					// <-, line 195
-					slice_from("i");
-					break;
-				
-				case 3: 
-					// (, line 196
-					// delete, line 196
-					slice_del();
-					break;
-				
-				case 4: 
-					// (, line 197
-					// literal, line 197
-					if (!(eq_s_b(2, "gu")))
-					{
-						limit_backward = v_4;
-						return false;
-					}
-					// delete, line 197
-					slice_del();
-					break;
-				}
-			limit_backward = v_4;
-			return true;
-		}
-		
-		private bool r_un_double()
-		{
-			int v_1;
-			// (, line 202
-			// test, line 203
-			v_1 = limit - cursor;
-			// among, line 203
-			if (find_among_b(a_7, 5) == 0)
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			// [, line 203
-			ket = cursor;
-			// next, line 203
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 203
-			bra = cursor;
-			// delete, line 203
-			slice_del();
-			return true;
-		}
-		
-		private bool r_un_accent()
-		{
-			int v_3;
-			// (, line 206
-			// atleast, line 207
-			{
-				int v_1 = 1;
-				// atleast, line 207
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping_b(g_v, 97, 251)))
-						{
-							goto lab16_brk;
-						}
-						v_1--;
-						goto replab1;
-					}
-					while (false);
+                    // delete, line 193
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 195
+                    // <-, line 195
+                    slice_from("i");
+                    break;
+                
+                case 3: 
+                    // (, line 196
+                    // delete, line 196
+                    slice_del();
+                    break;
+                
+                case 4: 
+                    // (, line 197
+                    // literal, line 197
+                    if (!(eq_s_b(2, "gu")))
+                    {
+                        limit_backward = v_4;
+                        return false;
+                    }
+                    // delete, line 197
+                    slice_del();
+                    break;
+                }
+            limit_backward = v_4;
+            return true;
+        }
+        
+        private bool r_un_double()
+        {
+            int v_1;
+            // (, line 202
+            // test, line 203
+            v_1 = limit - cursor;
+            // among, line 203
+            if (find_among_b(a_7, 5) == 0)
+            {
+                return false;
+            }
+            cursor = limit - v_1;
+            // [, line 203
+            ket = cursor;
+            // next, line 203
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // ], line 203
+            bra = cursor;
+            // delete, line 203
+            slice_del();
+            return true;
+        }
+        
+        private bool r_un_accent()
+        {
+            int v_3;
+            // (, line 206
+            // atleast, line 207
+            {
+                int v_1 = 1;
+                // atleast, line 207
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping_b(g_v, 97, 251)))
+                        {
+                            goto lab16_brk;
+                        }
+                        v_1--;
+                        goto replab1;
+                    }
+                    while (false);
 
 lab16_brk: ;
-					
-					goto replab1_brk;
+                    
+                    goto replab1_brk;
 
 replab1: ;
-				}
+                }
 
 replab1_brk: ;
-				
-				if (v_1 > 0)
-				{
-					return false;
-				}
-			}
-			// [, line 208
-			ket = cursor;
-			// or, line 208
+                
+                if (v_1 > 0)
+                {
+                    return false;
+                }
+            }
+            // [, line 208
+            ket = cursor;
+            // or, line 208
 lab16: 
-			do 
-			{
-				v_3 = limit - cursor;
-				do 
-				{
-					// literal, line 208
-					if (!(eq_s_b(1, "\u00E9")))
-					{
-						goto lab16_brk;
-					}
-					goto lab16_brk;
-				}
-				while (false);
+            do 
+            {
+                v_3 = limit - cursor;
+                do 
+                {
+                    // literal, line 208
+                    if (!(eq_s_b(1, "\u00E9")))
+                    {
+                        goto lab16_brk;
+                    }
+                    goto lab16_brk;
+                }
+                while (false);
 
 lab16_brk: ;
-				
-				cursor = limit - v_3;
-				// literal, line 208
-				if (!(eq_s_b(1, "\u00E8")))
-				{
-					return false;
-				}
-			}
-			while (false);
-			// ], line 208
-			bra = cursor;
-			// <-, line 208
-			slice_from("e");
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			int v_10;
-			int v_11;
-			// (, line 212
-			// do, line 214
-			v_1 = cursor;
-			do 
-			{
-				// call prelude, line 214
-				if (!r_prelude())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_3;
+                // literal, line 208
+                if (!(eq_s_b(1, "\u00E8")))
+                {
+                    return false;
+                }
+            }
+            while (false);
+            // ], line 208
+            bra = cursor;
+            // <-, line 208
+            slice_from("e");
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            int v_10;
+            int v_11;
+            // (, line 212
+            // do, line 214
+            v_1 = cursor;
+            do 
+            {
+                // call prelude, line 214
+                if (!r_prelude())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 215
-			v_2 = cursor;
-			do 
-			{
-				// call mark_regions, line 215
-				if (!r_mark_regions())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // do, line 215
+            v_2 = cursor;
+            do 
+            {
+                // call mark_regions, line 215
+                if (!r_mark_regions())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = v_2;
-			// backwards, line 216
-			limit_backward = cursor; cursor = limit;
-			// (, line 216
-			// do, line 218
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 218
-				// or, line 228
-				do 
-				{
-					v_4 = limit - cursor;
-					do 
-					{
-						// (, line 219
-						// and, line 224
-						v_5 = limit - cursor;
-						// (, line 220
-						// or, line 220
-						do 
-						{
-							v_6 = limit - cursor;
-							do 
-							{
-								// call standard_suffix, line 220
-								if (!r_standard_suffix())
-								{
-									goto lab6_brk;
-								}
-								goto lab5_brk;
-							}
-							while (false);
+            
+            cursor = v_2;
+            // backwards, line 216
+            limit_backward = cursor; cursor = limit;
+            // (, line 216
+            // do, line 218
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 218
+                // or, line 228
+                do 
+                {
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // (, line 219
+                        // and, line 224
+                        v_5 = limit - cursor;
+                        // (, line 220
+                        // or, line 220
+                        do 
+                        {
+                            v_6 = limit - cursor;
+                            do 
+                            {
+                                // call standard_suffix, line 220
+                                if (!r_standard_suffix())
+                                {
+                                    goto lab6_brk;
+                                }
+                                goto lab5_brk;
+                            }
+                            while (false);
 
 lab6_brk: ;
-							
-							cursor = limit - v_6;
-							do 
-							{
-								// call i_verb_suffix, line 221
-								if (!r_i_verb_suffix())
-								{
-									goto lab7_brk;
-								}
-								goto lab5_brk;
-							}
-							while (false);
+                            
+                            cursor = limit - v_6;
+                            do 
+                            {
+                                // call i_verb_suffix, line 221
+                                if (!r_i_verb_suffix())
+                                {
+                                    goto lab7_brk;
+                                }
+                                goto lab5_brk;
+                            }
+                            while (false);
 
 lab7_brk: ;
-							
-							cursor = limit - v_6;
-							// call verb_suffix, line 222
-							if (!r_verb_suffix())
-							{
-								goto lab4_brk;
-							}
-						}
-						while (false);
+                            
+                            cursor = limit

<TRUNCATED>

[07/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MergePolicy.cs b/src/core/Index/MergePolicy.cs
index cdc2060..63876c8 100644
--- a/src/core/Index/MergePolicy.cs
+++ b/src/core/Index/MergePolicy.cs
@@ -22,319 +22,319 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> <p/>Expert: a MergePolicy determines the sequence of
-	/// primitive merge operations to be used for overall merge
-	/// and optimize operations.<p/>
-	/// 
-	/// <p/>Whenever the segments in an index have been altered by
-	/// <see cref="IndexWriter" />, either the addition of a newly
-	/// flushed segment, addition of many segments from
-	/// addIndexes* calls, or a previous merge that may now need
-	/// to cascade, <see cref="IndexWriter" /> invokes <see cref="FindMerges" />
-	/// to give the MergePolicy a chance to pick
-	/// merges that are now required.  This method returns a
-	/// <see cref="MergeSpecification" /> instance describing the set of
-	/// merges that should be done, or null if no merges are
-	/// necessary.  When IndexWriter.optimize is called, it calls
-	/// <see cref="FindMergesForOptimize" /> and the MergePolicy should
-	/// then return the necessary merges.<p/>
-	/// 
-	/// <p/>Note that the policy can return more than one merge at
-	/// a time.  In this case, if the writer is using <see cref="SerialMergeScheduler" />
-	///, the merges will be run
-	/// sequentially but if it is using <see cref="ConcurrentMergeScheduler" />
-	/// they will be run concurrently.<p/>
-	/// 
-	/// <p/>The default MergePolicy is <see cref="LogByteSizeMergePolicy" />
-	///.<p/>
-	/// 
-	/// <p/><b>NOTE:</b> This API is new and still experimental
-	/// (subject to change suddenly in the next release)<p/>
-	/// 
-	/// <p/><b>NOTE</b>: This class typically requires access to
-	/// package-private APIs (e.g. <c>SegmentInfos</c>) to do its job;
-	/// if you implement your own MergePolicy, you'll need to put
-	/// it in package Lucene.Net.Index in order to use
-	/// these APIs.
-	/// </summary>
-	
-	public abstract class MergePolicy : IDisposable
-	{
-		
-		/// <summary>OneMerge provides the information necessary to perform
-		/// an individual primitive merge operation, resulting in
-		/// a single new segment.  The merge spec includes the
-		/// subset of segments to be merged as well as whether the
-		/// new segment should use the compound file format. 
-		/// </summary>
-		
-		public class OneMerge
-		{
-			
-			internal SegmentInfo info;              // used by IndexWriter
-			internal bool mergeDocStores;           // used by IndexWriter
-			internal bool optimize;                 // used by IndexWriter
-			internal bool registerDone;             // used by IndexWriter
-			internal long mergeGen;                 // used by IndexWriter
-			internal bool isExternal;               // used by IndexWriter
-			internal int maxNumSegmentsOptimize;    // used by IndexWriter
-			internal SegmentReader[] readers;       // used by IndexWriter
-			internal SegmentReader[] readersClone;  // used by IndexWriter
-			internal SegmentInfos segments;
-			internal bool useCompoundFile;
-			internal bool aborted;
-			internal System.Exception error;
-			
-			public OneMerge(SegmentInfos segments, bool useCompoundFile)
-			{
-				if (0 == segments.Count)
-					throw new ArgumentException("segments must include at least one segment", "segments");
-				this.segments = segments;
-				this.useCompoundFile = useCompoundFile;
-			}
-			
-			/// <summary>Record that an exception occurred while executing
-			/// this merge 
-			/// </summary>
-			internal virtual void  SetException(System.Exception error)
-			{
-				lock (this)
-				{
-					this.error = error;
-				}
-			}
-			
-			/// <summary>Retrieve previous exception set by <see cref="SetException" />
-			///. 
-			/// </summary>
-			internal virtual System.Exception GetException()
-			{
-				lock (this)
-				{
-					return error;
-				}
-			}
-			
-			/// <summary>Mark this merge as aborted.  If this is called
-			/// before the merge is committed then the merge will
-			/// not be committed. 
-			/// </summary>
-			internal virtual void  Abort()
-			{
-				lock (this)
-				{
-					aborted = true;
-				}
-			}
-			
-			/// <summary>Returns true if this merge was aborted. </summary>
-			internal virtual bool IsAborted()
-			{
-				lock (this)
-				{
-					return aborted;
-				}
-			}
-			
-			internal virtual void  CheckAborted(Directory dir)
-			{
-				lock (this)
-				{
-					if (aborted)
-						throw new MergeAbortedException("merge is aborted: " + SegString(dir));
-				}
-			}
-			
-			internal virtual String SegString(Directory dir)
-			{
-				var b = new System.Text.StringBuilder();
-				int numSegments = segments.Count;
-				for (int i = 0; i < numSegments; i++)
-				{
-					if (i > 0)
-						b.Append(' ');
-					b.Append(segments.Info(i).SegString(dir));
-				}
-				if (info != null)
-					b.Append(" into ").Append(info.name);
-				if (optimize)
-					b.Append(" [optimize]");
-				if (mergeDocStores)
-				{
-					b.Append(" [mergeDocStores]");
-				}
-				return b.ToString();
-			}
+    
+    /// <summary> <p/>Expert: a MergePolicy determines the sequence of
+    /// primitive merge operations to be used for overall merge
+    /// and optimize operations.<p/>
+    /// 
+    /// <p/>Whenever the segments in an index have been altered by
+    /// <see cref="IndexWriter" />, either the addition of a newly
+    /// flushed segment, addition of many segments from
+    /// addIndexes* calls, or a previous merge that may now need
+    /// to cascade, <see cref="IndexWriter" /> invokes <see cref="FindMerges" />
+    /// to give the MergePolicy a chance to pick
+    /// merges that are now required.  This method returns a
+    /// <see cref="MergeSpecification" /> instance describing the set of
+    /// merges that should be done, or null if no merges are
+    /// necessary.  When IndexWriter.optimize is called, it calls
+    /// <see cref="FindMergesForOptimize" /> and the MergePolicy should
+    /// then return the necessary merges.<p/>
+    /// 
+    /// <p/>Note that the policy can return more than one merge at
+    /// a time.  In this case, if the writer is using <see cref="SerialMergeScheduler" />
+    ///, the merges will be run
+    /// sequentially but if it is using <see cref="ConcurrentMergeScheduler" />
+    /// they will be run concurrently.<p/>
+    /// 
+    /// <p/>The default MergePolicy is <see cref="LogByteSizeMergePolicy" />
+    ///.<p/>
+    /// 
+    /// <p/><b>NOTE:</b> This API is new and still experimental
+    /// (subject to change suddenly in the next release)<p/>
+    /// 
+    /// <p/><b>NOTE</b>: This class typically requires access to
+    /// package-private APIs (e.g. <c>SegmentInfos</c>) to do its job;
+    /// if you implement your own MergePolicy, you'll need to put
+    /// it in package Lucene.Net.Index in order to use
+    /// these APIs.
+    /// </summary>
+    
+    public abstract class MergePolicy : IDisposable
+    {
+        
+        /// <summary>OneMerge provides the information necessary to perform
+        /// an individual primitive merge operation, resulting in
+        /// a single new segment.  The merge spec includes the
+        /// subset of segments to be merged as well as whether the
+        /// new segment should use the compound file format. 
+        /// </summary>
+        
+        public class OneMerge
+        {
+            
+            internal SegmentInfo info;              // used by IndexWriter
+            internal bool mergeDocStores;           // used by IndexWriter
+            internal bool optimize;                 // used by IndexWriter
+            internal bool registerDone;             // used by IndexWriter
+            internal long mergeGen;                 // used by IndexWriter
+            internal bool isExternal;               // used by IndexWriter
+            internal int maxNumSegmentsOptimize;    // used by IndexWriter
+            internal SegmentReader[] readers;       // used by IndexWriter
+            internal SegmentReader[] readersClone;  // used by IndexWriter
+            internal SegmentInfos segments;
+            internal bool useCompoundFile;
+            internal bool aborted;
+            internal System.Exception error;
+            
+            public OneMerge(SegmentInfos segments, bool useCompoundFile)
+            {
+                if (0 == segments.Count)
+                    throw new ArgumentException("segments must include at least one segment", "segments");
+                this.segments = segments;
+                this.useCompoundFile = useCompoundFile;
+            }
+            
+            /// <summary>Record that an exception occurred while executing
+            /// this merge 
+            /// </summary>
+            internal virtual void  SetException(System.Exception error)
+            {
+                lock (this)
+                {
+                    this.error = error;
+                }
+            }
+            
+            /// <summary>Retrieve previous exception set by <see cref="SetException" />
+            ///. 
+            /// </summary>
+            internal virtual System.Exception GetException()
+            {
+                lock (this)
+                {
+                    return error;
+                }
+            }
+            
+            /// <summary>Mark this merge as aborted.  If this is called
+            /// before the merge is committed then the merge will
+            /// not be committed. 
+            /// </summary>
+            internal virtual void  Abort()
+            {
+                lock (this)
+                {
+                    aborted = true;
+                }
+            }
+            
+            /// <summary>Returns true if this merge was aborted. </summary>
+            internal virtual bool IsAborted()
+            {
+                lock (this)
+                {
+                    return aborted;
+                }
+            }
+            
+            internal virtual void  CheckAborted(Directory dir)
+            {
+                lock (this)
+                {
+                    if (aborted)
+                        throw new MergeAbortedException("merge is aborted: " + SegString(dir));
+                }
+            }
+            
+            internal virtual String SegString(Directory dir)
+            {
+                var b = new System.Text.StringBuilder();
+                int numSegments = segments.Count;
+                for (int i = 0; i < numSegments; i++)
+                {
+                    if (i > 0)
+                        b.Append(' ');
+                    b.Append(segments.Info(i).SegString(dir));
+                }
+                if (info != null)
+                    b.Append(" into ").Append(info.name);
+                if (optimize)
+                    b.Append(" [optimize]");
+                if (mergeDocStores)
+                {
+                    b.Append(" [mergeDocStores]");
+                }
+                return b.ToString();
+            }
 
             public SegmentInfos segments_ForNUnit
             {
                 get { return segments; }
             }
-		}
-		
-		/// <summary> A MergeSpecification instance provides the information
-		/// necessary to perform multiple merges.  It simply
-		/// contains a list of <see cref="OneMerge" /> instances.
-		/// </summary>
-		
-		public class MergeSpecification
-		{
-			
-			/// <summary> The subset of segments to be included in the primitive merge.</summary>
-			
-			public IList<OneMerge> merges = new List<OneMerge>();
-			
-			public virtual void  Add(OneMerge merge)
-			{
-				merges.Add(merge);
-			}
-			
-			public virtual String SegString(Directory dir)
-			{
-				var b = new System.Text.StringBuilder();
-				b.Append("MergeSpec:\n");
-				int count = merges.Count;
-				for (int i = 0; i < count; i++)
-					b.Append("  ").Append(1 + i).Append(": ").Append(merges[i].SegString(dir));
-				return b.ToString();
-			}
-		}
-		
-		/// <summary>Exception thrown if there are any problems while
-		/// executing a merge. 
-		/// </summary>
-		[Serializable]
-		public class MergeException:System.SystemException
-		{
-			private readonly Directory dir;
+        }
+        
+        /// <summary> A MergeSpecification instance provides the information
+        /// necessary to perform multiple merges.  It simply
+        /// contains a list of <see cref="OneMerge" /> instances.
+        /// </summary>
+        
+        public class MergeSpecification
+        {
+            
+            /// <summary> The subset of segments to be included in the primitive merge.</summary>
+            
+            public IList<OneMerge> merges = new List<OneMerge>();
+            
+            public virtual void  Add(OneMerge merge)
+            {
+                merges.Add(merge);
+            }
+            
+            public virtual String SegString(Directory dir)
+            {
+                var b = new System.Text.StringBuilder();
+                b.Append("MergeSpec:\n");
+                int count = merges.Count;
+                for (int i = 0; i < count; i++)
+                    b.Append("  ").Append(1 + i).Append(": ").Append(merges[i].SegString(dir));
+                return b.ToString();
+            }
+        }
+        
+        /// <summary>Exception thrown if there are any problems while
+        /// executing a merge. 
+        /// </summary>
+        [Serializable]
+        public class MergeException:System.SystemException
+        {
+            private readonly Directory dir;
 
-			public MergeException(System.String message, Directory dir):base(message)
-			{
-				this.dir = dir;
-			}
+            public MergeException(System.String message, Directory dir):base(message)
+            {
+                this.dir = dir;
+            }
 
-			public MergeException(System.Exception exc, Directory dir):base(null, exc)
-			{
-				this.dir = dir;
-			}
+            public MergeException(System.Exception exc, Directory dir):base(null, exc)
+            {
+                this.dir = dir;
+            }
 
-		    public MergeException()
-		    {
+            public MergeException()
+            {
             }
 
-		    public MergeException(string message) : base(message)
-		    {
+            public MergeException(string message) : base(message)
+            {
             }
 
-		    public MergeException(string message, Exception ex) : base(message, ex)
-		    {
+            public MergeException(string message, Exception ex) : base(message, ex)
+            {
             }
 
             protected MergeException(
-	                SerializationInfo info,
-	                StreamingContext context) : base(info, context)
-	        {
-	        }
+                    SerializationInfo info,
+                    StreamingContext context) : base(info, context)
+            {
+            }
 
-		    /// <summary>Returns the <see cref="Directory" /> of the index that hit
-		    /// the exception. 
-		    /// </summary>
-		    public virtual Directory Directory
-		    {
-		        get { return dir; }
-		    }
-		}
+            /// <summary>Returns the <see cref="Directory" /> of the index that hit
+            /// the exception. 
+            /// </summary>
+            public virtual Directory Directory
+            {
+                get { return dir; }
+            }
+        }
 
-	    [Serializable]
+        [Serializable]
         public class MergeAbortedException : System.IO.IOException
-	    {
+        {
             public MergeAbortedException()
                 : base("merge is aborted")
-	        {
-	        }
+            {
+            }
 
-	        public MergeAbortedException(string message) : base(message)
-	        {
-	        }
+            public MergeAbortedException(string message) : base(message)
+            {
+            }
 
-	        public MergeAbortedException(string message, Exception inner) : base(message, inner)
-	        {
-	        }
+            public MergeAbortedException(string message, Exception inner) : base(message, inner)
+            {
+            }
 
-	        protected MergeAbortedException(
-	                SerializationInfo info,
-	                StreamingContext context) : base(info, context)
-	        {
-	        }
-	    }
-		
-		protected internal IndexWriter writer;
+            protected MergeAbortedException(
+                    SerializationInfo info,
+                    StreamingContext context) : base(info, context)
+            {
+            }
+        }
+        
+        protected internal IndexWriter writer;
 
-	    protected MergePolicy(IndexWriter writer)
-		{
-			this.writer = writer;
-		}
-		
-		/// <summary> Determine what set of merge operations are now necessary on the index.
-		/// <see cref="IndexWriter" /> calls this whenever there is a change to the segments.
-		/// This call is always synchronized on the <see cref="IndexWriter" /> instance so
-		/// only one thread at a time will call this method.
-		/// 
-		/// </summary>
-		/// <param name="segmentInfos">the total set of segments in the index
-		/// </param>
-		public abstract MergeSpecification FindMerges(SegmentInfos segmentInfos);
+        protected MergePolicy(IndexWriter writer)
+        {
+            this.writer = writer;
+        }
+        
+        /// <summary> Determine what set of merge operations are now necessary on the index.
+        /// <see cref="IndexWriter" /> calls this whenever there is a change to the segments.
+        /// This call is always synchronized on the <see cref="IndexWriter" /> instance so
+        /// only one thread at a time will call this method.
+        /// 
+        /// </summary>
+        /// <param name="segmentInfos">the total set of segments in the index
+        /// </param>
+        public abstract MergeSpecification FindMerges(SegmentInfos segmentInfos);
 
-	    /// <summary> Determine what set of merge operations is necessary in order to optimize
-	    /// the index. <see cref="IndexWriter" /> calls this when its
-	    /// <see cref="IndexWriter.Optimize()" /> method is called. This call is always
-	    /// synchronized on the <see cref="IndexWriter" /> instance so only one thread at a
-	    /// time will call this method.
-	    /// 
-	    /// </summary>
-	    /// <param name="segmentInfos">the total set of segments in the index
-	    /// </param>
-	    /// <param name="maxSegmentCount">requested maximum number of segments in the index (currently this
-	    /// is always 1)
-	    /// </param>
-	    /// <param name="segmentsToOptimize">contains the specific SegmentInfo instances that must be merged
-	    /// away. This may be a subset of all SegmentInfos.
-	    /// </param>
-	    public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount,
-	                                                             ISet<SegmentInfo> segmentsToOptimize);
-		
-		/// <summary> Determine what set of merge operations is necessary in order to expunge all
-		/// deletes from the index.
-		/// 
-		/// </summary>
-		/// <param name="segmentInfos">the total set of segments in the index
-		/// </param>
-		public abstract MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos);
+        /// <summary> Determine what set of merge operations is necessary in order to optimize
+        /// the index. <see cref="IndexWriter" /> calls this when its
+        /// <see cref="IndexWriter.Optimize()" /> method is called. This call is always
+        /// synchronized on the <see cref="IndexWriter" /> instance so only one thread at a
+        /// time will call this method.
+        /// 
+        /// </summary>
+        /// <param name="segmentInfos">the total set of segments in the index
+        /// </param>
+        /// <param name="maxSegmentCount">requested maximum number of segments in the index (currently this
+        /// is always 1)
+        /// </param>
+        /// <param name="segmentsToOptimize">contains the specific SegmentInfo instances that must be merged
+        /// away. This may be a subset of all SegmentInfos.
+        /// </param>
+        public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount,
+                                                                 ISet<SegmentInfo> segmentsToOptimize);
+        
+        /// <summary> Determine what set of merge operations is necessary in order to expunge all
+        /// deletes from the index.
+        /// 
+        /// </summary>
+        /// <param name="segmentInfos">the total set of segments in the index
+        /// </param>
+        public abstract MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos);
 
         /// <summary> Release all resources for the policy.</summary>
         [Obsolete("Use Dispose() instead")]
-		public void Close()
+        public void Close()
         {
             Dispose();
         }
 
         /// <summary> Release all resources for the policy.</summary>
-	    public void Dispose()
-	    {
-	        Dispose(true);
-	    }
+        public void Dispose()
+        {
+            Dispose(true);
+        }
 
-	    protected abstract void Dispose(bool disposing);
-		
-		/// <summary> Returns true if a newly flushed (not from merge)
-		/// segment should use the compound file format.
-		/// </summary>
-		public abstract bool UseCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
-		
-		/// <summary> Returns true if the doc store files should use the
-		/// compound file format.
-		/// </summary>
-		public abstract bool UseCompoundDocStore(SegmentInfos segments);
-	}
+        protected abstract void Dispose(bool disposing);
+        
+        /// <summary> Returns true if a newly flushed (not from merge)
+        /// segment should use the compound file format.
+        /// </summary>
+        public abstract bool UseCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
+        
+        /// <summary> Returns true if the doc store files should use the
+        /// compound file format.
+        /// </summary>
+        public abstract bool UseCompoundDocStore(SegmentInfos segments);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MergeScheduler.cs b/src/core/Index/MergeScheduler.cs
index 7fbf83d..b619db2 100644
--- a/src/core/Index/MergeScheduler.cs
+++ b/src/core/Index/MergeScheduler.cs
@@ -19,30 +19,30 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary><p/>Expert: <see cref="IndexWriter" /> uses an instance
-	/// implementing this interface to execute the merges
-	/// selected by a <see cref="MergePolicy" />.  The default
-	/// MergeScheduler is <see cref="ConcurrentMergeScheduler" />.<p/>
-	/// 
-	/// <p/><b>NOTE:</b> This API is new and still experimental
-	/// (subject to change suddenly in the next release)<p/>
-	/// 
-	/// <p/><b>NOTE</b>: This class typically requires access to
-	/// package-private APIs (eg, SegmentInfos) to do its job;
-	/// if you implement your own MergePolicy, you'll need to put
-	/// it in package Lucene.Net.Index in order to use
-	/// these APIs.
-	/// </summary>
-	
-	public abstract class MergeScheduler : IDisposable
-	{
-		
-		/// <summary>Run the merges provided by <see cref="IndexWriter.GetNextMerge()" />. </summary>
-		public abstract void  Merge(IndexWriter writer);
-		
+    
+    /// <summary><p/>Expert: <see cref="IndexWriter" /> uses an instance
+    /// implementing this interface to execute the merges
+    /// selected by a <see cref="MergePolicy" />.  The default
+    /// MergeScheduler is <see cref="ConcurrentMergeScheduler" />.<p/>
+    /// 
+    /// <p/><b>NOTE:</b> This API is new and still experimental
+    /// (subject to change suddenly in the next release)<p/>
+    /// 
+    /// <p/><b>NOTE</b>: This class typically requires access to
+    /// package-private APIs (eg, SegmentInfos) to do its job;
+    /// if you implement your own MergePolicy, you'll need to put
+    /// it in package Lucene.Net.Index in order to use
+    /// these APIs.
+    /// </summary>
+    
+    public abstract class MergeScheduler : IDisposable
+    {
+        
+        /// <summary>Run the merges provided by <see cref="IndexWriter.GetNextMerge()" />. </summary>
+        public abstract void  Merge(IndexWriter writer);
+        
         [Obsolete("Use Dispose() instead")]
-		public void Close()
+        public void Close()
         {
             Dispose();
         }
@@ -52,7 +52,7 @@ namespace Lucene.Net.Index
             Dispose(true);
         }
 
-	    /// <summary>Close this MergeScheduler. </summary>
-	    protected abstract void Dispose(bool disposing);
-	}
+        /// <summary>Close this MergeScheduler. </summary>
+        protected abstract void Dispose(bool disposing);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MultiLevelSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiLevelSkipListReader.cs b/src/core/Index/MultiLevelSkipListReader.cs
index 28b4fd5..408ecfa 100644
--- a/src/core/Index/MultiLevelSkipListReader.cs
+++ b/src/core/Index/MultiLevelSkipListReader.cs
@@ -22,161 +22,161 @@ using IndexInput = Lucene.Net.Store.IndexInput;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This abstract class reads skip lists with multiple levels.
-	/// 
-	/// See <see cref="MultiLevelSkipListWriter" /> for the information about the encoding 
-	/// of the multi level skip lists. 
-	/// 
-	/// Subclasses must implement the abstract method <see cref="ReadSkipData(int, IndexInput)" />
-	/// which defines the actual format of the skip data.
-	/// </summary>
-	abstract class MultiLevelSkipListReader : IDisposable
-	{
-		// the maximum number of skip levels possible for this index
-		private readonly int maxNumberOfSkipLevels;
-		
-		// number of levels in this skip list
-		private int numberOfSkipLevels;
-		
-		// Expert: defines the number of top skip levels to buffer in memory.
-		// Reducing this number results in less memory usage, but possibly
-		// slower performance due to more random I/Os.
-		// Please notice that the space each level occupies is limited by
-		// the skipInterval. The top level can not contain more than
-		// skipLevel entries, the second top level can not contain more
-		// than skipLevel^2 entries and so forth.
-		private const int numberOfLevelsToBuffer = 1;
+    
+    /// <summary> This abstract class reads skip lists with multiple levels.
+    /// 
+    /// See <see cref="MultiLevelSkipListWriter" /> for the information about the encoding 
+    /// of the multi level skip lists. 
+    /// 
+    /// Subclasses must implement the abstract method <see cref="ReadSkipData(int, IndexInput)" />
+    /// which defines the actual format of the skip data.
+    /// </summary>
+    abstract class MultiLevelSkipListReader : IDisposable
+    {
+        // the maximum number of skip levels possible for this index
+        private readonly int maxNumberOfSkipLevels;
+        
+        // number of levels in this skip list
+        private int numberOfSkipLevels;
+        
+        // Expert: defines the number of top skip levels to buffer in memory.
+        // Reducing this number results in less memory usage, but possibly
+        // slower performance due to more random I/Os.
+        // Please notice that the space each level occupies is limited by
+        // the skipInterval. The top level can not contain more than
+        // skipLevel entries, the second top level can not contain more
+        // than skipLevel^2 entries and so forth.
+        private const int numberOfLevelsToBuffer = 1;
 
-		private int docCount;
-		private bool haveSkipped;
+        private int docCount;
+        private bool haveSkipped;
 
-	    private bool isDisposed;
-		
-		private readonly IndexInput[] skipStream; // skipStream for each level
-		private readonly long[] skipPointer; // the start pointer of each skip level
-		private readonly int[] skipInterval; // skipInterval of each level
-		private readonly int[] numSkipped; // number of docs skipped per level
-		
-		private readonly int[] skipDoc; // doc id of current skip entry per level 
-		private int lastDoc; // doc id of last read skip entry with docId <= target
-		private readonly long[] childPointer; // child pointer of current skip entry per level
-		private long lastChildPointer; // childPointer of last read skip entry with docId <= target
-		
-		private readonly bool inputIsBuffered;
+        private bool isDisposed;
+        
+        private readonly IndexInput[] skipStream; // skipStream for each level
+        private readonly long[] skipPointer; // the start pointer of each skip level
+        private readonly int[] skipInterval; // skipInterval of each level
+        private readonly int[] numSkipped; // number of docs skipped per level
+        
+        private readonly int[] skipDoc; // doc id of current skip entry per level 
+        private int lastDoc; // doc id of last read skip entry with docId <= target
+        private readonly long[] childPointer; // child pointer of current skip entry per level
+        private long lastChildPointer; // childPointer of last read skip entry with docId <= target
+        
+        private readonly bool inputIsBuffered;
 
-		protected MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval)
-		{
-			this.skipStream = new IndexInput[maxSkipLevels];
-			this.skipPointer = new long[maxSkipLevels];
-			this.childPointer = new long[maxSkipLevels];
-			this.numSkipped = new int[maxSkipLevels];
-			this.maxNumberOfSkipLevels = maxSkipLevels;
-			this.skipInterval = new int[maxSkipLevels];
-			this.skipStream[0] = skipStream;
-			this.inputIsBuffered = (skipStream is BufferedIndexInput);
-			this.skipInterval[0] = skipInterval;
-			for (int i = 1; i < maxSkipLevels; i++)
-			{
-				// cache skip intervals
-				this.skipInterval[i] = this.skipInterval[i - 1] * skipInterval;
-			}
-			skipDoc = new int[maxSkipLevels];
-		}
-		
-		
-		/// <summary>Returns the id of the doc to which the last call of <see cref="SkipTo(int)" />
-		/// has skipped.  
-		/// </summary>
-		internal virtual int GetDoc()
-		{
-			return lastDoc;
-		}
-		
-		
-		/// <summary>Skips entries to the first beyond the current whose document number is
-		/// greater than or equal to <i>target</i>. Returns the current doc count. 
-		/// </summary>
-		internal virtual int SkipTo(int target)
-		{
-			if (!haveSkipped)
-			{
-				// first time, load skip levels
-				LoadSkipLevels();
-				haveSkipped = true;
-			}
-			
-			// walk up the levels until highest level is found that has a skip
-			// for this target
-			int level = 0;
-			while (level < numberOfSkipLevels - 1 && target > skipDoc[level + 1])
-			{
-				level++;
-			}
-			
-			while (level >= 0)
-			{
-				if (target > skipDoc[level])
-				{
-					if (!LoadNextSkip(level))
-					{
-						continue;
-					}
-				}
-				else
-				{
-					// no more skips on this level, go down one level
-					if (level > 0 && lastChildPointer > skipStream[level - 1].FilePointer)
-					{
-						SeekChild(level - 1);
-					}
-					level--;
-				}
-			}
-			
-			return numSkipped[0] - skipInterval[0] - 1;
-		}
-		
-		private bool LoadNextSkip(int level)
-		{
-			// we have to skip, the target document is greater than the current
-			// skip list entry        
-			SetLastSkipData(level);
-			
-			numSkipped[level] += skipInterval[level];
-			
-			if (numSkipped[level] > docCount)
-			{
-				// this skip list is exhausted
-				skipDoc[level] = System.Int32.MaxValue;
-				if (numberOfSkipLevels > level)
-					numberOfSkipLevels = level;
-				return false;
-			}
-			
-			// read next skip entry
-			skipDoc[level] += ReadSkipData(level, skipStream[level]);
-			
-			if (level != 0)
-			{
-				// read the child pointer if we are not on the leaf level
-				childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
-			}
-			
-			return true;
-		}
-		
-		/// <summary>Seeks the skip entry on the given level </summary>
-		protected internal virtual void  SeekChild(int level)
-		{
-			skipStream[level].Seek(lastChildPointer);
-			numSkipped[level] = numSkipped[level + 1] - skipInterval[level + 1];
-			skipDoc[level] = lastDoc;
-			if (level > 0)
-			{
-				childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
-			}
-		}
+        protected MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval)
+        {
+            this.skipStream = new IndexInput[maxSkipLevels];
+            this.skipPointer = new long[maxSkipLevels];
+            this.childPointer = new long[maxSkipLevels];
+            this.numSkipped = new int[maxSkipLevels];
+            this.maxNumberOfSkipLevels = maxSkipLevels;
+            this.skipInterval = new int[maxSkipLevels];
+            this.skipStream[0] = skipStream;
+            this.inputIsBuffered = (skipStream is BufferedIndexInput);
+            this.skipInterval[0] = skipInterval;
+            for (int i = 1; i < maxSkipLevels; i++)
+            {
+                // cache skip intervals
+                this.skipInterval[i] = this.skipInterval[i - 1] * skipInterval;
+            }
+            skipDoc = new int[maxSkipLevels];
+        }
+        
+        
+        /// <summary>Returns the id of the doc to which the last call of <see cref="SkipTo(int)" />
+        /// has skipped.  
+        /// </summary>
+        internal virtual int GetDoc()
+        {
+            return lastDoc;
+        }
+        
+        
+        /// <summary>Skips entries to the first beyond the current whose document number is
+        /// greater than or equal to <i>target</i>. Returns the current doc count. 
+        /// </summary>
+        internal virtual int SkipTo(int target)
+        {
+            if (!haveSkipped)
+            {
+                // first time, load skip levels
+                LoadSkipLevels();
+                haveSkipped = true;
+            }
+            
+            // walk up the levels until highest level is found that has a skip
+            // for this target
+            int level = 0;
+            while (level < numberOfSkipLevels - 1 && target > skipDoc[level + 1])
+            {
+                level++;
+            }
+            
+            while (level >= 0)
+            {
+                if (target > skipDoc[level])
+                {
+                    if (!LoadNextSkip(level))
+                    {
+                        continue;
+                    }
+                }
+                else
+                {
+                    // no more skips on this level, go down one level
+                    if (level > 0 && lastChildPointer > skipStream[level - 1].FilePointer)
+                    {
+                        SeekChild(level - 1);
+                    }
+                    level--;
+                }
+            }
+            
+            return numSkipped[0] - skipInterval[0] - 1;
+        }
+        
+        private bool LoadNextSkip(int level)
+        {
+            // we have to skip, the target document is greater than the current
+            // skip list entry        
+            SetLastSkipData(level);
+            
+            numSkipped[level] += skipInterval[level];
+            
+            if (numSkipped[level] > docCount)
+            {
+                // this skip list is exhausted
+                skipDoc[level] = System.Int32.MaxValue;
+                if (numberOfSkipLevels > level)
+                    numberOfSkipLevels = level;
+                return false;
+            }
+            
+            // read next skip entry
+            skipDoc[level] += ReadSkipData(level, skipStream[level]);
+            
+            if (level != 0)
+            {
+                // read the child pointer if we are not on the leaf level
+                childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
+            }
+            
+            return true;
+        }
+        
+        /// <summary>Seeks the skip entry on the given level </summary>
+        protected internal virtual void  SeekChild(int level)
+        {
+            skipStream[level].Seek(lastChildPointer);
+            numSkipped[level] = numSkipped[level + 1] - skipInterval[level + 1];
+            skipDoc[level] = lastDoc;
+            if (level > 0)
+            {
+                childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
+            }
+        }
 
         public void Dispose()
         {
@@ -200,99 +200,99 @@ namespace Lucene.Net.Index
 
             isDisposed = true;
         }
-		
-		/// <summary>initializes the reader </summary>
-		internal virtual void  Init(long skipPointer, int df)
-		{
-			this.skipPointer[0] = skipPointer;
-			this.docCount = df;
+        
+        /// <summary>initializes the reader </summary>
+        internal virtual void  Init(long skipPointer, int df)
+        {
+            this.skipPointer[0] = skipPointer;
+            this.docCount = df;
             System.Array.Clear(skipDoc, 0, skipDoc.Length);
-			System.Array.Clear(numSkipped, 0, numSkipped.Length);
+            System.Array.Clear(numSkipped, 0, numSkipped.Length);
             System.Array.Clear(childPointer, 0, childPointer.Length);
-			
-			haveSkipped = false;
-			for (int i = 1; i < numberOfSkipLevels; i++)
-			{
-				skipStream[i] = null;
-			}
-		}
-		
-		/// <summary>Loads the skip levels  </summary>
-		private void  LoadSkipLevels()
-		{
-			numberOfSkipLevels = docCount == 0?0:(int) System.Math.Floor(System.Math.Log(docCount) / System.Math.Log(skipInterval[0]));
-			if (numberOfSkipLevels > maxNumberOfSkipLevels)
-			{
-				numberOfSkipLevels = maxNumberOfSkipLevels;
-			}
-			
-			skipStream[0].Seek(skipPointer[0]);
-			
-			int toBuffer = numberOfLevelsToBuffer;
-			
-			for (int i = numberOfSkipLevels - 1; i > 0; i--)
-			{
-				// the length of the current level
-				long length = skipStream[0].ReadVLong();
-				
-				// the start pointer of the current level
-				skipPointer[i] = skipStream[0].FilePointer;
-				if (toBuffer > 0)
-				{
-					// buffer this level
-					skipStream[i] = new SkipBuffer(skipStream[0], (int) length);
-					toBuffer--;
-				}
-				else
-				{
-					// clone this stream, it is already at the start of the current level
-					skipStream[i] = (IndexInput) skipStream[0].Clone();
-					if (inputIsBuffered && length < BufferedIndexInput.BUFFER_SIZE)
-					{
-						((BufferedIndexInput) skipStream[i]).SetBufferSize((int) length);
-					}
-					
-					// move base stream beyond the current level
-					skipStream[0].Seek(skipStream[0].FilePointer + length);
-				}
-			}
-			
-			// use base stream for the lowest level
-			skipPointer[0] = skipStream[0].FilePointer;
-		}
-		
-		/// <summary> Subclasses must implement the actual skip data encoding in this method.
-		/// 
-		/// </summary>
-		/// <param name="level">the level skip data shall be read from
-		/// </param>
-		/// <param name="skipStream">the skip stream to read from
-		/// </param>
-		protected internal abstract int ReadSkipData(int level, IndexInput skipStream);
-		
-		/// <summary>Copies the values of the last read skip entry on this level </summary>
-		protected internal virtual void  SetLastSkipData(int level)
-		{
-			lastDoc = skipDoc[level];
-			lastChildPointer = childPointer[level];
-		}
-		
-		
-		/// <summary>used to buffer the top skip levels </summary>
-		private sealed class SkipBuffer : IndexInput
-		{
-			private byte[] data;
-			private readonly long pointer;
-			private int pos;
+            
+            haveSkipped = false;
+            for (int i = 1; i < numberOfSkipLevels; i++)
+            {
+                skipStream[i] = null;
+            }
+        }
+        
+        /// <summary>Loads the skip levels  </summary>
+        private void  LoadSkipLevels()
+        {
+            numberOfSkipLevels = docCount == 0?0:(int) System.Math.Floor(System.Math.Log(docCount) / System.Math.Log(skipInterval[0]));
+            if (numberOfSkipLevels > maxNumberOfSkipLevels)
+            {
+                numberOfSkipLevels = maxNumberOfSkipLevels;
+            }
+            
+            skipStream[0].Seek(skipPointer[0]);
+            
+            int toBuffer = numberOfLevelsToBuffer;
+            
+            for (int i = numberOfSkipLevels - 1; i > 0; i--)
+            {
+                // the length of the current level
+                long length = skipStream[0].ReadVLong();
+                
+                // the start pointer of the current level
+                skipPointer[i] = skipStream[0].FilePointer;
+                if (toBuffer > 0)
+                {
+                    // buffer this level
+                    skipStream[i] = new SkipBuffer(skipStream[0], (int) length);
+                    toBuffer--;
+                }
+                else
+                {
+                    // clone this stream, it is already at the start of the current level
+                    skipStream[i] = (IndexInput) skipStream[0].Clone();
+                    if (inputIsBuffered && length < BufferedIndexInput.BUFFER_SIZE)
+                    {
+                        ((BufferedIndexInput) skipStream[i]).SetBufferSize((int) length);
+                    }
+                    
+                    // move base stream beyond the current level
+                    skipStream[0].Seek(skipStream[0].FilePointer + length);
+                }
+            }
+            
+            // use base stream for the lowest level
+            skipPointer[0] = skipStream[0].FilePointer;
+        }
+        
+        /// <summary> Subclasses must implement the actual skip data encoding in this method.
+        /// 
+        /// </summary>
+        /// <param name="level">the level skip data shall be read from
+        /// </param>
+        /// <param name="skipStream">the skip stream to read from
+        /// </param>
+        protected internal abstract int ReadSkipData(int level, IndexInput skipStream);
+        
+        /// <summary>Copies the values of the last read skip entry on this level </summary>
+        protected internal virtual void  SetLastSkipData(int level)
+        {
+            lastDoc = skipDoc[level];
+            lastChildPointer = childPointer[level];
+        }
+        
+        
+        /// <summary>used to buffer the top skip levels </summary>
+        private sealed class SkipBuffer : IndexInput
+        {
+            private byte[] data;
+            private readonly long pointer;
+            private int pos;
 
-		    private bool isDisposed;
-			
-			internal SkipBuffer(IndexInput input, int length)
-			{
-				data = new byte[length];
-				pointer = input.FilePointer;
-				input.ReadBytes(data, 0, length);
-			}
+            private bool isDisposed;
+            
+            internal SkipBuffer(IndexInput input, int length)
+            {
+                data = new byte[length];
+                pointer = input.FilePointer;
+                input.ReadBytes(data, 0, length);
+            }
 
             protected override void Dispose(bool disposing)
             {
@@ -305,37 +305,37 @@ namespace Lucene.Net.Index
                 isDisposed = true;
             }
 
-		    public override long FilePointer
-		    {
-		        get { return pointer + pos; }
-		    }
+            public override long FilePointer
+            {
+                get { return pointer + pos; }
+            }
 
-		    public override long Length()
-			{
-				return data.Length;
-			}
-			
-			public override byte ReadByte()
-			{
-				return data[pos++];
-			}
-			
-			public override void  ReadBytes(byte[] b, int offset, int len)
-			{
-				Array.Copy(data, pos, b, offset, len);
-				pos += len;
-			}
-			
-			public override void  Seek(long pos)
-			{
-				this.pos = (int) (pos - pointer);
-			}
-			
-			override public System.Object Clone()
-			{
+            public override long Length()
+            {
+                return data.Length;
+            }
+            
+            public override byte ReadByte()
+            {
+                return data[pos++];
+            }
+            
+            public override void  ReadBytes(byte[] b, int offset, int len)
+            {
+                Array.Copy(data, pos, b, offset, len);
+                pos += len;
+            }
+            
+            public override void  Seek(long pos)
+            {
+                this.pos = (int) (pos - pointer);
+            }
+            
+            override public System.Object Clone()
+            {
                 System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this FilterIndexReader.Clone()"); // {{Aroush-2.9}}
-				return null;
-			}
-		}
-	}
+                return null;
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MultiLevelSkipListWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiLevelSkipListWriter.cs b/src/core/Index/MultiLevelSkipListWriter.cs
index 00543f2..4afe04f 100644
--- a/src/core/Index/MultiLevelSkipListWriter.cs
+++ b/src/core/Index/MultiLevelSkipListWriter.cs
@@ -22,150 +22,150 @@ using RAMOutputStream = Lucene.Net.Store.RAMOutputStream;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This abstract class writes skip lists with multiple levels.
-	/// 
-	/// Example for skipInterval = 3:
-	/// c            (skip level 2)
-	/// c                 c                 c            (skip level 1) 
-	/// x     x     x     x     x     x     x     x     x     x      (skip level 0)
-	/// d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d  (posting list)
-	/// 3     6     9     12    15    18    21    24    27    30     (df)
-	/// 
-	/// d - document
-	/// x - skip data
-	/// c - skip data with child pointer
-	/// 
-	/// Skip level i contains every skipInterval-th entry from skip level i-1.
-	/// Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
-	/// 
-	/// Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
-	/// This guarantess a logarithmic amount of skips to find the target document.
-	/// 
-	/// While this class takes care of writing the different skip levels,
-	/// subclasses must define the actual format of the skip data.
-	/// 
-	/// </summary>
-	abstract class MultiLevelSkipListWriter
-	{
-		// number of levels in this skip list
-		private int numberOfSkipLevels;
-		
-		// the skip interval in the list with level = 0
-		private int skipInterval;
-		
-		// for every skip level a different buffer is used 
-		private RAMOutputStream[] skipBuffer;
-		
-		protected internal MultiLevelSkipListWriter(int skipInterval, int maxSkipLevels, int df)
-		{
-			this.skipInterval = skipInterval;
-			
-			// calculate the maximum number of skip levels for this document frequency
-			numberOfSkipLevels = df == 0?0:(int) System.Math.Floor(System.Math.Log(df) / System.Math.Log(skipInterval));
-			
-			// make sure it does not exceed maxSkipLevels
-			if (numberOfSkipLevels > maxSkipLevels)
-			{
-				numberOfSkipLevels = maxSkipLevels;
-			}
-		}
-		
-		protected internal virtual void  Init()
-		{
-			skipBuffer = new RAMOutputStream[numberOfSkipLevels];
-			for (int i = 0; i < numberOfSkipLevels; i++)
-			{
-				skipBuffer[i] = new RAMOutputStream();
-			}
-		}
-		
-		protected internal virtual void  ResetSkip()
-		{
-			// creates new buffers or empties the existing ones
-			if (skipBuffer == null)
-			{
-				Init();
-			}
-			else
-			{
-				for (int i = 0; i < skipBuffer.Length; i++)
-				{
-					skipBuffer[i].Reset();
-				}
-			}
-		}
-		
-		/// <summary> Subclasses must implement the actual skip data encoding in this method.
-		/// 
-		/// </summary>
-		/// <param name="level">the level skip data shall be writting for
-		/// </param>
-		/// <param name="skipBuffer">the skip buffer to write to
-		/// </param>
-		protected internal abstract void  WriteSkipData(int level, IndexOutput skipBuffer);
-		
-		/// <summary> Writes the current skip data to the buffers. The current document frequency determines
-		/// the max level is skip data is to be written to. 
-		/// 
-		/// </summary>
-		/// <param name="df">the current document frequency 
-		/// </param>
-		/// <throws>  IOException </throws>
-		internal virtual void  BufferSkip(int df)
-		{
-			int numLevels;
-			
-			// determine max level
-			for (numLevels = 0; (df % skipInterval) == 0 && numLevels < numberOfSkipLevels; df /= skipInterval)
-			{
-				numLevels++;
-			}
-			
-			long childPointer = 0;
-			
-			for (int level = 0; level < numLevels; level++)
-			{
-				WriteSkipData(level, skipBuffer[level]);
-				
-				long newChildPointer = skipBuffer[level].FilePointer;
-				
-				if (level != 0)
-				{
-					// store child pointers for all levels except the lowest
-					skipBuffer[level].WriteVLong(childPointer);
-				}
-				
-				//remember the childPointer for the next level
-				childPointer = newChildPointer;
-			}
-		}
-		
-		/// <summary> Writes the buffered skip lists to the given output.
-		/// 
-		/// </summary>
-		/// <param name="output">the IndexOutput the skip lists shall be written to 
-		/// </param>
-		/// <returns> the pointer the skip list starts
-		/// </returns>
-		internal virtual long WriteSkip(IndexOutput output)
-		{
-			long skipPointer = output.FilePointer;
-			if (skipBuffer == null || skipBuffer.Length == 0)
-				return skipPointer;
-			
-			for (int level = numberOfSkipLevels - 1; level > 0; level--)
-			{
-				long length = skipBuffer[level].FilePointer;
-				if (length > 0)
-				{
-					output.WriteVLong(length);
-					skipBuffer[level].WriteTo(output);
-				}
-			}
-			skipBuffer[0].WriteTo(output);
-			
-			return skipPointer;
-		}
-	}
+    
+    /// <summary> This abstract class writes skip lists with multiple levels.
+    /// 
+    /// Example for skipInterval = 3:
+    /// c            (skip level 2)
+    /// c                 c                 c            (skip level 1) 
+    /// x     x     x     x     x     x     x     x     x     x      (skip level 0)
+    /// d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d  (posting list)
+    /// 3     6     9     12    15    18    21    24    27    30     (df)
+    /// 
+    /// d - document
+    /// x - skip data
+    /// c - skip data with child pointer
+    /// 
+    /// Skip level i contains every skipInterval-th entry from skip level i-1.
+    /// Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
+    /// 
+    /// Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
+    /// This guarantess a logarithmic amount of skips to find the target document.
+    /// 
+    /// While this class takes care of writing the different skip levels,
+    /// subclasses must define the actual format of the skip data.
+    /// 
+    /// </summary>
+    abstract class MultiLevelSkipListWriter
+    {
+        // number of levels in this skip list
+        private int numberOfSkipLevels;
+        
+        // the skip interval in the list with level = 0
+        private int skipInterval;
+        
+        // for every skip level a different buffer is used 
+        private RAMOutputStream[] skipBuffer;
+        
+        protected internal MultiLevelSkipListWriter(int skipInterval, int maxSkipLevels, int df)
+        {
+            this.skipInterval = skipInterval;
+            
+            // calculate the maximum number of skip levels for this document frequency
+            numberOfSkipLevels = df == 0?0:(int) System.Math.Floor(System.Math.Log(df) / System.Math.Log(skipInterval));
+            
+            // make sure it does not exceed maxSkipLevels
+            if (numberOfSkipLevels > maxSkipLevels)
+            {
+                numberOfSkipLevels = maxSkipLevels;
+            }
+        }
+        
+        protected internal virtual void  Init()
+        {
+            skipBuffer = new RAMOutputStream[numberOfSkipLevels];
+            for (int i = 0; i < numberOfSkipLevels; i++)
+            {
+                skipBuffer[i] = new RAMOutputStream();
+            }
+        }
+        
+        protected internal virtual void  ResetSkip()
+        {
+            // creates new buffers or empties the existing ones
+            if (skipBuffer == null)
+            {
+                Init();
+            }
+            else
+            {
+                for (int i = 0; i < skipBuffer.Length; i++)
+                {
+                    skipBuffer[i].Reset();
+                }
+            }
+        }
+        
+        /// <summary> Subclasses must implement the actual skip data encoding in this method.
+        /// 
+        /// </summary>
+        /// <param name="level">the level skip data shall be writting for
+        /// </param>
+        /// <param name="skipBuffer">the skip buffer to write to
+        /// </param>
+        protected internal abstract void  WriteSkipData(int level, IndexOutput skipBuffer);
+        
+        /// <summary> Writes the current skip data to the buffers. The current document frequency determines
+        /// the max level is skip data is to be written to. 
+        /// 
+        /// </summary>
+        /// <param name="df">the current document frequency 
+        /// </param>
+        /// <throws>  IOException </throws>
+        internal virtual void  BufferSkip(int df)
+        {
+            int numLevels;
+            
+            // determine max level
+            for (numLevels = 0; (df % skipInterval) == 0 && numLevels < numberOfSkipLevels; df /= skipInterval)
+            {
+                numLevels++;
+            }
+            
+            long childPointer = 0;
+            
+            for (int level = 0; level < numLevels; level++)
+            {
+                WriteSkipData(level, skipBuffer[level]);
+                
+                long newChildPointer = skipBuffer[level].FilePointer;
+                
+                if (level != 0)
+                {
+                    // store child pointers for all levels except the lowest
+                    skipBuffer[level].WriteVLong(childPointer);
+                }
+                
+                //remember the childPointer for the next level
+                childPointer = newChildPointer;
+            }
+        }
+        
+        /// <summary> Writes the buffered skip lists to the given output.
+        /// 
+        /// </summary>
+        /// <param name="output">the IndexOutput the skip lists shall be written to 
+        /// </param>
+        /// <returns> the pointer the skip list starts
+        /// </returns>
+        internal virtual long WriteSkip(IndexOutput output)
+        {
+            long skipPointer = output.FilePointer;
+            if (skipBuffer == null || skipBuffer.Length == 0)
+                return skipPointer;
+            
+            for (int level = numberOfSkipLevels - 1; level > 0; level--)
+            {
+                long length = skipBuffer[level].FilePointer;
+                if (length > 0)
+                {
+                    output.WriteVLong(length);
+                    skipBuffer[level].WriteTo(output);
+                }
+            }
+            skipBuffer[0].WriteTo(output);
+            
+            return skipPointer;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MultipleTermPositions.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultipleTermPositions.cs b/src/core/Index/MultipleTermPositions.cs
index eab3dd5..34f3d65 100644
--- a/src/core/Index/MultipleTermPositions.cs
+++ b/src/core/Index/MultipleTermPositions.cs
@@ -21,175 +21,175 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Allows you to iterate over the <see cref="TermPositions" /> for multiple <see cref="Term" />s as
-	/// a single <see cref="TermPositions" />.
-	/// 
-	/// </summary>
-	public class MultipleTermPositions : TermPositions
-	{
-		private sealed class TermPositionsQueue : PriorityQueue<TermPositions>
-		{
-			internal TermPositionsQueue(LinkedList<TermPositions> termPositions)
-			{
-				Initialize(termPositions.Count);
-				
-				foreach(TermPositions tp in termPositions)
-					if (tp.Next())
-						Add(tp);
-			}
-			
-			internal TermPositions Peek()
-			{
-				return Top();
-			}
-			
-			public override bool LessThan(TermPositions a, TermPositions b)
-			{
-				return a.Doc < b.Doc;
-			}
-		}
-		
-		private sealed class IntQueue
-		{
-			public IntQueue()
-			{
-				InitBlock();
-			}
-			private void  InitBlock()
-			{
-				_array = new int[_arraySize];
-			}
-			private int _arraySize = 16;
-			private int _index = 0;
-			private int _lastIndex = 0;
-			private int[] _array;
-			
-			internal void  add(int i)
-			{
-				if (_lastIndex == _arraySize)
-					growArray();
-				
-				_array[_lastIndex++] = i;
-			}
-			
-			internal int next()
-			{
-				return _array[_index++];
-			}
-			
-			internal void  sort()
-			{
-				System.Array.Sort(_array, _index, _lastIndex - _index);
-			}
-			
-			internal void  clear()
-			{
-				_index = 0;
-				_lastIndex = 0;
-			}
-			
-			internal int size()
-			{
-				return (_lastIndex - _index);
-			}
-			
-			private void  growArray()
-			{
-				int[] newArray = new int[_arraySize * 2];
-				Array.Copy(_array, 0, newArray, 0, _arraySize);
-				_array = newArray;
-				_arraySize *= 2;
-			}
-		}
-		
-		private int _doc;
-		private int _freq;
-		private TermPositionsQueue _termPositionsQueue;
-		private IntQueue _posList;
+    
+    /// <summary> Allows you to iterate over the <see cref="TermPositions" /> for multiple <see cref="Term" />s as
+    /// a single <see cref="TermPositions" />.
+    /// 
+    /// </summary>
+    public class MultipleTermPositions : TermPositions
+    {
+        private sealed class TermPositionsQueue : PriorityQueue<TermPositions>
+        {
+            internal TermPositionsQueue(LinkedList<TermPositions> termPositions)
+            {
+                Initialize(termPositions.Count);
+                
+                foreach(TermPositions tp in termPositions)
+                    if (tp.Next())
+                        Add(tp);
+            }
+            
+            internal TermPositions Peek()
+            {
+                return Top();
+            }
+            
+            public override bool LessThan(TermPositions a, TermPositions b)
+            {
+                return a.Doc < b.Doc;
+            }
+        }
+        
+        private sealed class IntQueue
+        {
+            public IntQueue()
+            {
+                InitBlock();
+            }
+            private void  InitBlock()
+            {
+                _array = new int[_arraySize];
+            }
+            private int _arraySize = 16;
+            private int _index = 0;
+            private int _lastIndex = 0;
+            private int[] _array;
+            
+            internal void  add(int i)
+            {
+                if (_lastIndex == _arraySize)
+                    growArray();
+                
+                _array[_lastIndex++] = i;
+            }
+            
+            internal int next()
+            {
+                return _array[_index++];
+            }
+            
+            internal void  sort()
+            {
+                System.Array.Sort(_array, _index, _lastIndex - _index);
+            }
+            
+            internal void  clear()
+            {
+                _index = 0;
+                _lastIndex = 0;
+            }
+            
+            internal int size()
+            {
+                return (_lastIndex - _index);
+            }
+            
+            private void  growArray()
+            {
+                int[] newArray = new int[_arraySize * 2];
+                Array.Copy(_array, 0, newArray, 0, _arraySize);
+                _array = newArray;
+                _arraySize *= 2;
+            }
+        }
+        
+        private int _doc;
+        private int _freq;
+        private TermPositionsQueue _termPositionsQueue;
+        private IntQueue _posList;
 
-	    private bool isDisposed;
-		/// <summary> Creates a new <c>MultipleTermPositions</c> instance.
-		/// 
-		/// </summary>
-		/// <exception cref="System.IO.IOException">
-		/// </exception>
-		public MultipleTermPositions(IndexReader indexReader, Term[] terms)
-		{
-			var termPositions = new System.Collections.Generic.LinkedList<TermPositions>();
-			
-			for (int i = 0; i < terms.Length; i++)
-				termPositions.AddLast(indexReader.TermPositions(terms[i]));
-			
-			_termPositionsQueue = new TermPositionsQueue(termPositions);
-			_posList = new IntQueue();
-		}
-		
-		public bool Next()
-		{
-			if (_termPositionsQueue.Size() == 0)
-				return false;
-			
-			_posList.clear();
-			_doc = _termPositionsQueue.Peek().Doc;
-			
-			TermPositions tp;
-			do 
-			{
-				tp = _termPositionsQueue.Peek();
-				
-				for (int i = 0; i < tp.Freq; i++)
-					_posList.add(tp.NextPosition());
-				
-				if (tp.Next())
-					_termPositionsQueue.UpdateTop();
-				else
-				{
-					_termPositionsQueue.Pop();
-					tp.Close();
-				}
-			}
-			while (_termPositionsQueue.Size() > 0 && _termPositionsQueue.Peek().Doc == _doc);
-			
-			_posList.sort();
-			_freq = _posList.size();
-			
-			return true;
-		}
-		
-		public int NextPosition()
-		{
-			return _posList.next();
-		}
-		
-		public bool SkipTo(int target)
-		{
-			while (_termPositionsQueue.Peek() != null && target > _termPositionsQueue.Peek().Doc)
-			{
-				TermPositions tp = _termPositionsQueue.Pop();
-				if (tp.SkipTo(target))
-					_termPositionsQueue.Add(tp);
-				else
-					tp.Close();
-			}
-			return Next();
-		}
+        private bool isDisposed;
+        /// <summary> Creates a new <c>MultipleTermPositions</c> instance.
+        /// 
+        /// </summary>
+        /// <exception cref="System.IO.IOException">
+        /// </exception>
+        public MultipleTermPositions(IndexReader indexReader, Term[] terms)
+        {
+            var termPositions = new System.Collections.Generic.LinkedList<TermPositions>();
+            
+            for (int i = 0; i < terms.Length; i++)
+                termPositions.AddLast(indexReader.TermPositions(terms[i]));
+            
+            _termPositionsQueue = new TermPositionsQueue(termPositions);
+            _posList = new IntQueue();
+        }
+        
+        public bool Next()
+        {
+            if (_termPositionsQueue.Size() == 0)
+                return false;
+            
+            _posList.clear();
+            _doc = _termPositionsQueue.Peek().Doc;
+            
+            TermPositions tp;
+            do 
+            {
+                tp = _termPositionsQueue.Peek();
+                
+                for (int i = 0; i < tp.Freq; i++)
+                    _posList.add(tp.NextPosition());
+                
+                if (tp.Next())
+                    _termPositionsQueue.UpdateTop();
+                else
+                {
+                    _termPositionsQueue.Pop();
+                    tp.Close();
+                }
+            }
+            while (_termPositionsQueue.Size() > 0 && _termPositionsQueue.Peek().Doc == _doc);
+            
+            _posList.sort();
+            _freq = _posList.size();
+            
+            return true;
+        }
+        
+        public int NextPosition()
+        {
+            return _posList.next();
+        }
+        
+        public bool SkipTo(int target)
+        {
+            while (_termPositionsQueue.Peek() != null && target > _termPositionsQueue.Peek().Doc)
+            {
+                TermPositions tp = _termPositionsQueue.Pop();
+                if (tp.SkipTo(target))
+                    _termPositionsQueue.Add(tp);
+                else
+                    tp.Close();
+            }
+            return Next();
+        }
 
-	    public int Doc
-	    {
-	        get { return _doc; }
-	    }
+        public int Doc
+        {
+            get { return _doc; }
+        }
 
-	    public int Freq
-	    {
-	        get { return _freq; }
-	    }
+        public int Freq
+        {
+            get { return _freq; }
+        }
 
-	    [Obsolete("Use Dispose() instead")]
-		public void  Close()
-		{
-		    Dispose();
-		}
+        [Obsolete("Use Dispose() instead")]
+        public void  Close()
+        {
+            Dispose();
+        }
 
         public void Dispose()
         {
@@ -208,49 +208,49 @@ namespace Lucene.Net.Index
 
             isDisposed = true;
         }
-		
-		/// <summary> Not implemented.</summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		public virtual void Seek(Term arg0)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary> Not implemented.</summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		public virtual void Seek(TermEnum termEnum)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary> Not implemented.</summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		public virtual int Read(int[] arg0, int[] arg1)
-		{
-			throw new System.NotSupportedException();
-		}
+        
+        /// <summary> Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public virtual void Seek(Term arg0)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary> Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public virtual void Seek(TermEnum termEnum)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary> Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public virtual int Read(int[] arg0, int[] arg1)
+        {
+            throw new System.NotSupportedException();
+        }
 
 
-	    /// <summary> Not implemented.</summary>
-	    /// <throws>  UnsupportedOperationException </throws>
-	    public virtual int PayloadLength
-	    {
-	        get { throw new System.NotSupportedException(); }
-	    }
+        /// <summary> Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public virtual int PayloadLength
+        {
+            get { throw new System.NotSupportedException(); }
+        }
 
-	    /// <summary> Not implemented.</summary>
-		/// <throws>  UnsupportedOperationException </throws>
-		public virtual byte[] GetPayload(byte[] data, int offset)
-		{
-			throw new System.NotSupportedException();
-		}
+        /// <summary> Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public virtual byte[] GetPayload(byte[] data, int offset)
+        {
+            throw new System.NotSupportedException();
+        }
 
-	    /// <summary> </summary>
-	    /// <value> false </value>
+        /// <summary> </summary>
+        /// <value> false </value>
 // TODO: Remove warning after API has been finalized
-	    public virtual bool IsPayloadAvailable
-	    {
-	        get { return false; }
-	    }
-	}
+        public virtual bool IsPayloadAvailable
+        {
+            get { return false; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/NormsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NormsWriter.cs b/src/core/Index/NormsWriter.cs
index 507d69c..46bb941 100644
--- a/src/core/Index/NormsWriter.cs
+++ b/src/core/Index/NormsWriter.cs
@@ -23,184 +23,184 @@ using Similarity = Lucene.Net.Search.Similarity;
 
 namespace Lucene.Net.Index
 {
-	
-	// TODO FI: norms could actually be stored as doc store
-	
-	/// <summary>Writes norms.  Each thread X field accumulates the norms
-	/// for the doc/fields it saw, then the flush method below
-	/// merges all of these together into a single _X.nrm file.
-	/// </summary>
-	
-	sealed class NormsWriter : InvertedDocEndConsumer
-	{
-		
-		private static readonly byte defaultNorm;
-		private FieldInfos fieldInfos;
-		public override InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread)
-		{
-			return new NormsWriterPerThread(docInverterPerThread, this);
-		}
-		
-		public override void  Abort()
-		{
-		}
-		
-		// We only write the _X.nrm file at flush
-		internal void  Files(ICollection<string> files)
-		{
-		}
-		
-		internal override void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			this.fieldInfos = fieldInfos;
-		}
-		
-		/// <summary>Produce _X.nrm if any document had a field with norms
-		/// not disabled 
-		/// </summary>
+    
+    // TODO FI: norms could actually be stored as doc store
+    
+    /// <summary>Writes norms.  Each thread X field accumulates the norms
+    /// for the doc/fields it saw, then the flush method below
+    /// merges all of these together into a single _X.nrm file.
+    /// </summary>
+    
+    sealed class NormsWriter : InvertedDocEndConsumer
+    {
+        
+        private static readonly byte defaultNorm;
+        private FieldInfos fieldInfos;
+        public override InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread)
+        {
+            return new NormsWriterPerThread(docInverterPerThread, this);
+        }
+        
+        public override void  Abort()
+        {
+        }
+        
+        // We only write the _X.nrm file at flush
+        internal void  Files(ICollection<string> files)
+        {
+        }
+        
+        internal override void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+        
+        /// <summary>Produce _X.nrm if any document had a field with norms
+        /// not disabled 
+        /// </summary>
         public override void Flush(IDictionary<InvertedDocEndConsumerPerThread,ICollection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
+        {
 
             IDictionary<FieldInfo, IList<NormsWriterPerField>> byField = new HashMap<FieldInfo, IList<NormsWriterPerField>>();
-			
-			// Typically, each thread will have encountered the same
-			// field.  So first we collate by field, ie, all
-			// per-thread field instances that correspond to the
-			// same FieldInfo
-			foreach(var entry in threadsAndFields)
-			{
-				ICollection<InvertedDocEndConsumerPerField> fields = entry.Value;
-				IEnumerator<InvertedDocEndConsumerPerField> fieldsIt = fields.GetEnumerator();
-			    var fieldsToRemove = new HashSet<NormsWriterPerField>();
-				while (fieldsIt.MoveNext())
-				{
-					NormsWriterPerField perField = (NormsWriterPerField) fieldsIt.Current;
-					
-					if (perField.upto > 0)
-					{
-						// It has some norms
-						IList<NormsWriterPerField> l = byField[perField.fieldInfo];
-						if (l == null)
-						{
-							l = new List<NormsWriterPerField>();
-							byField[perField.fieldInfo] = l;
-						}
-						l.Add(perField);
-					}
-					// Remove this field since we haven't seen it
-					// since the previous flush
-					else
-					{
+            
+            // Typically, each thread will have encountered the same
+            // field.  So first we collate by field, ie, all
+            // per-thread field instances that correspond to the
+            // same FieldInfo
+            foreach(var entry in threadsAndFields)
+            {
+                ICollection<InvertedDocEndConsumerPerField> fields = entry.Value;
+                IEnumerator<InvertedDocEndConsumerPerField> fieldsIt = fields.GetEnumerator();
+                var fieldsToRemove = new HashSet<NormsWriterPerField>();
+                while (fieldsIt.MoveNext())
+                {
+                    NormsWriterPerField perField = (NormsWriterPerField) fieldsIt.Current;
+                    
+                    if (perField.upto > 0)
+                    {
+                        // It has some norms
+                        IList<NormsWriterPerField> l = byField[perField.fieldInfo];
+                        if (l == null)
+                        {
+                            l = new List<NormsWriterPerField>();
+                            byField[perField.fieldInfo] = l;
+                        }
+                        l.Add(perField);
+                    }
+                    // Remove this field since we haven't seen it
+                    // since the previous flush
+                    else
+                    {
                         fieldsToRemove.Add(perField);
-					}
-				}
+                    }
+                }
                 foreach (var field in fieldsToRemove)
                 {
                     fields.Remove(field);
                 }
-			}
-			
-			System.String normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
-			state.flushedFiles.Add(normsFileName);
-			IndexOutput normsOut = state.directory.CreateOutput(normsFileName);
-			
-			try
-			{
-				normsOut.WriteBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.Length);
-				
-				int numField = fieldInfos.Size();
-				
-				int normCount = 0;
-				
-				for (int fieldNumber = 0; fieldNumber < numField; fieldNumber++)
-				{
-					
-					FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
-					
-					IList<NormsWriterPerField> toMerge = byField[fieldInfo];
-					int upto = 0;
-					if (toMerge != null)
-					{
-						
-						int numFields = toMerge.Count;
-						
-						normCount++;
-						
-						NormsWriterPerField[] fields = new NormsWriterPerField[numFields];
-						int[] uptos = new int[numFields];
-						
-						for (int j = 0; j < numFields; j++)
-							fields[j] = toMerge[j];
-						
-						int numLeft = numFields;
-						
-						while (numLeft > 0)
-						{
-							
-							System.Diagnostics.Debug.Assert(uptos [0] < fields [0].docIDs.Length, " uptos[0]=" + uptos [0] + " len=" +(fields [0].docIDs.Length));
-							
-							int minLoc = 0;
-							int minDocID = fields[0].docIDs[uptos[0]];
-							
-							for (int j = 1; j < numLeft; j++)
-							{
-								int docID = fields[j].docIDs[uptos[j]];
-								if (docID < minDocID)
-								{
-									minDocID = docID;
-									minLoc = j;
-								}
-							}
-							
-							System.Diagnostics.Debug.Assert(minDocID < state.numDocs);
-							
-							// Fill hole
-							for (; upto < minDocID; upto++)
-								normsOut.WriteByte(defaultNorm);
-							
-							normsOut.WriteByte(fields[minLoc].norms[uptos[minLoc]]);
-							(uptos[minLoc])++;
-							upto++;
-							
-							if (uptos[minLoc] == fields[minLoc].upto)
-							{
-								fields[minLoc].Reset();
-								if (minLoc != numLeft - 1)
-								{
-									fields[minLoc] = fields[numLeft - 1];
-									uptos[minLoc] = uptos[numLeft - 1];
-								}
-								numLeft--;
-							}
-						}
-						
-						// Fill final hole with defaultNorm
-						for (; upto < state.numDocs; upto++)
-							normsOut.WriteByte(defaultNorm);
-					}
-					else if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
-					{
-						normCount++;
-						// Fill entire field with default norm:
-						for (; upto < state.numDocs; upto++)
-							normsOut.WriteByte(defaultNorm);
-					}
-					
-					System.Diagnostics.Debug.Assert(4 + normCount * state.numDocs == normsOut.FilePointer, ".nrm file size mismatch: expected=" +(4 + normCount * state.numDocs) + " actual=" + normsOut.FilePointer);
-				}
-			}
-			finally
-			{
-				normsOut.Close();
-			}
-		}
-		
-		internal override void  CloseDocStore(SegmentWriteState state)
-		{
-		}
-		static NormsWriter()
-		{
-			defaultNorm = Similarity.EncodeNorm(1.0f);
-		}
-	}
+            }
+            
+            System.String normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
+            state.flushedFiles.Add(normsFileName);
+            IndexOutput normsOut = state.directory.CreateOutput(normsFileName);
+            
+            try
+            {
+                normsOut.WriteBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.Length);
+                
+                int numField = fieldInfos.Size();
+                
+                int normCount = 0;
+                
+                for (int fieldNumber = 0; fieldNumber < numField; fieldNumber++)
+                {
+                    
+                    FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
+                    
+                    IList<NormsWriterPerField> toMerge = byField[fieldInfo];
+                    int upto = 0;
+                    if (toMerge != null)
+                    {
+                        
+                        int numFields = toMerge.Count;
+                        
+                        normCount++;
+                        
+                        NormsWriterPerField[] fields = new NormsWriterPerField[numFields];
+                        int[] uptos = new int[numFields];
+                        
+                        for (int j = 0; j < numFields; j++)
+                            fields[j] = toMerge[j];
+                        
+                        int numLeft = numFields;
+                        
+                        while (numLeft > 0)
+                        {
+                            
+                            System.Diagnostics.Debug.Assert(uptos [0] < fields [0].docIDs.Length, " uptos[0]=" + uptos [0] + " len=" +(fields [0].docIDs.Length));
+                            
+                            int minLoc = 0;
+                            int minDocID = fields[0].docIDs[uptos[0]];
+                            
+                            for (int j = 1; j < numLeft; j++)
+                            {
+                                int docID = fields[j].docIDs[uptos[j]];
+                                if (docID < minDocID)
+                                {
+                                    minDocID = docID;
+                                    minLoc = j;
+                                }
+                            }
+                            
+                            System.Diagnostics.Debug.Assert(minDocID < state.numDocs);
+                            
+                            // Fill hole
+                            for (; upto < minDocID; upto++)
+                                normsOut.WriteByte(defaultNorm);
+                            
+                            normsOut.WriteByte(fields[minLoc].norms[uptos[minLoc]]);
+                            (uptos[minLoc])++;
+                            upto++;
+                            
+                            if (uptos[minLoc] == fields[minLoc].upto)
+                            {
+                                fields[minLoc].Reset();
+                                if (minLoc != numLeft - 1)
+                                {
+                                    fields[minLoc] = fields[numLeft - 1];
+                                    uptos[minLoc] = uptos[numLeft - 1];
+                                }
+                                numLeft--;
+                            }
+                        }
+                        
+                        // Fill final hole with defaultNorm
+                        for (; upto < state.numDocs; upto++)
+                            normsOut.WriteByte(defaultNorm);
+                    }
+                    else if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+                    {
+                        normCount++;
+                        // Fill entire field with default norm:
+                        for (; upto < state.numDocs; upto++)
+                            normsOut.WriteByte(defaultNorm);
+                    }
+                    
+                    System.Diagnostics.Debug.Assert(4 + normCount * state.numDocs == normsOut.FilePointer, ".nrm file size mismatch: expected=" +(4 + normCount * state.numDocs) + " actual=" + normsOut.FilePointer);
+                }
+            }
+            finally
+            {
+                normsOut.Close();
+            }
+        }
+        
+        internal override void  CloseDocStore(SegmentWriteState state)
+        {
+        }
+        static NormsWriter()
+        {
+            defaultNorm = Similarity.EncodeNorm(1.0f);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/NormsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NormsWriterPerField.cs b/src/core/Index/NormsWriterPerField.cs
index 81d45df..9d2153d 100644
--- a/src/core/Index/NormsWriterPerField.cs
+++ b/src/core/Index/NormsWriterPerField.cs
@@ -22,69 +22,69 @@ using Similarity = Lucene.Net.Search.Similarity;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Taps into DocInverter, as an InvertedDocEndConsumer,
-	/// which is called at the end of inverting each field.  We
-	/// just look at the length for the field (docState.length)
-	/// and record the norm. 
-	/// </summary>
-	
-	sealed class NormsWriterPerField:InvertedDocEndConsumerPerField, System.IComparable<NormsWriterPerField>
-	{
-		
-		internal NormsWriterPerThread perThread;
-		internal FieldInfo fieldInfo;
-		internal DocumentsWriter.DocState docState;
-		
-		// Holds all docID/norm pairs we've seen
-		internal int[] docIDs = new int[1];
-		internal byte[] norms = new byte[1];
-		internal int upto;
-		
-		internal FieldInvertState fieldState;
-		
-		public void  Reset()
-		{
-			// Shrink back if we are overallocated now:
-			docIDs = ArrayUtil.Shrink(docIDs, upto);
-			norms = ArrayUtil.Shrink(norms, upto);
-			upto = 0;
-		}
-		
-		public NormsWriterPerField(DocInverterPerField docInverterPerField, NormsWriterPerThread perThread, FieldInfo fieldInfo)
-		{
-			this.perThread = perThread;
-			this.fieldInfo = fieldInfo;
-			docState = perThread.docState;
-			fieldState = docInverterPerField.fieldState;
-		}
-		
-		internal override void  Abort()
-		{
-			upto = 0;
-		}
-		
-		public int CompareTo(NormsWriterPerField other)
-		{
-			return String.CompareOrdinal(fieldInfo.name, other.fieldInfo.name);
-		}
-		
-		internal override void  Finish()
-		{
-			System.Diagnostics.Debug.Assert(docIDs.Length == norms.Length);
-			if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
-			{
-				if (docIDs.Length <= upto)
-				{
-					System.Diagnostics.Debug.Assert(docIDs.Length == upto);
-					docIDs = ArrayUtil.Grow(docIDs, 1 + upto);
-					norms = ArrayUtil.Grow(norms, 1 + upto);
-				}
-				float norm = docState.similarity.ComputeNorm(fieldInfo.name, fieldState);
-				norms[upto] = Similarity.EncodeNorm(norm);
-				docIDs[upto] = docState.docID;
-				upto++;
-			}
-		}
-	}
+    
+    /// <summary>Taps into DocInverter, as an InvertedDocEndConsumer,
+    /// which is called at the end of inverting each field.  We
+    /// just look at the length for the field (docState.length)
+    /// and record the norm. 
+    /// </summary>
+    
+    sealed class NormsWriterPerField:InvertedDocEndConsumerPerField, System.IComparable<NormsWriterPerField>
+    {
+        
+        internal NormsWriterPerThread perThread;
+        internal FieldInfo fieldInfo;
+        internal DocumentsWriter.DocState docState;
+        
+        // Holds all docID/norm pairs we've seen
+        internal int[] docIDs = new int[1];
+        internal byte[] norms = new byte[1];
+        internal int upto;
+        
+        internal FieldInvertState fieldState;
+        
+        public void  Reset()
+        {
+            // Shrink back if we are overallocated now:
+            docIDs = ArrayUtil.Shrink(docIDs, upto);
+            norms = ArrayUtil.Shrink(norms, upto);
+            upto = 0;
+        }
+        
+        public NormsWriterPerField(DocInverterPerField docInverterPerField, NormsWriterPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.perThread = perThread;
+            this.fieldInfo = fieldInfo;
+            docState = perThread.docState;
+            fieldState = docInverterPerField.fieldState;
+        }
+        
+        internal override void  Abort()
+        {
+            upto = 0;
+        }
+        
+        public int CompareTo(NormsWriterPerField other)
+        {
+            return String.CompareOrdinal(fieldInfo.name, other.fieldInfo.name);
+        }
+        
+        internal override void  Finish()
+        {
+            System.Diagnostics.Debug.Assert(docIDs.Length == norms.Length);
+            if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+            {
+                if (docIDs.Length <= upto)
+                {
+                    System.Diagnostics.Debug.Assert(docIDs.Length == upto);
+                    docIDs = ArrayUtil.Grow(docIDs, 1 + upto);
+                    norms = ArrayUtil.Grow(norms, 1 + upto);
+                }
+                float norm = docState.similarity.ComputeNorm(fieldInfo.name, fieldState);
+                norms[upto] = Similarity.EncodeNorm(norm);
+                docIDs[upto] = docState.docID;
+                upto++;
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/NormsWriterPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NormsWriterPerThread.cs b/src/core/Index/NormsWriterPerThread.cs
index d5cd5ed..6d83f84 100644
--- a/src/core/Index/NormsWriterPerThread.cs
+++ b/src/core/Index/NormsWriterPerThread.cs
@@ -19,37 +19,37 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class NormsWriterPerThread:InvertedDocEndConsumerPerThread
-	{
-		internal NormsWriter normsWriter;
-		internal DocumentsWriter.DocState docState;
-		
-		public NormsWriterPerThread(DocInverterPerThread docInverterPerThread, NormsWriter normsWriter)
-		{
-			this.normsWriter = normsWriter;
-			docState = docInverterPerThread.docState;
-		}
-		
-		internal override InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
-		{
-			return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
-		}
-		
-		internal override void  Abort()
-		{
-		}
-		
-		internal override void  StartDocument()
-		{
-		}
-		internal override void  FinishDocument()
-		{
-		}
-		
-		internal bool FreeRAM()
-		{
-			return false;
-		}
-	}
+    
+    sealed class NormsWriterPerThread:InvertedDocEndConsumerPerThread
+    {
+        internal NormsWriter normsWriter;
+        internal DocumentsWriter.DocState docState;
+        
+        public NormsWriterPerThread(DocInverterPerThread docInverterPerThread, NormsWriter normsWriter)
+        {
+            this.normsWriter = normsWriter;
+            docState = docInverterPerThread.docState;
+        }
+        
+        internal override InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
+        {
+            return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
+        }
+        
+        internal override void  Abort()
+        {
+        }
+        
+        internal override void  StartDocument()
+        {
+        }
+        internal override void  FinishDocument()
+        {
+        }
+        
+        internal bool FreeRAM()
+        {
+            return false;
+        }
+    }
 }
\ No newline at end of file


[14/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocumentsWriterThreadState.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriterThreadState.cs b/src/core/Index/DocumentsWriterThreadState.cs
index e20fbee..255a2ae 100644
--- a/src/core/Index/DocumentsWriterThreadState.cs
+++ b/src/core/Index/DocumentsWriterThreadState.cs
@@ -19,38 +19,38 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Used by DocumentsWriter to maintain per-thread state.
-	/// We keep a separate Posting hash and other state for each
-	/// thread and then merge postings hashes from all threads
-	/// when writing the segment. 
-	/// </summary>
-	sealed class DocumentsWriterThreadState
-	{
-		
-		internal bool isIdle = true; // false if this is currently in use by a thread
-		internal int numThreads = 1; // Number of threads that share this instance
-		internal bool doFlushAfter; // true if we should flush after processing current doc
-		internal DocConsumerPerThread consumer;
-		internal DocumentsWriter.DocState docState;
-		
-		internal DocumentsWriter docWriter;
-		
-		public DocumentsWriterThreadState(DocumentsWriter docWriter)
-		{
-			this.docWriter = docWriter;
-			docState = new DocumentsWriter.DocState();
-			docState.maxFieldLength = docWriter.maxFieldLength;
-			docState.infoStream = docWriter.infoStream;
-			docState.similarity = docWriter.similarity;
-			docState.docWriter = docWriter;
-			consumer = docWriter.consumer.AddThread(this);
-		}
-		
-		internal void  DoAfterFlush()
-		{
-			numThreads = 0;
-			doFlushAfter = false;
-		}
-	}
+    
+    /// <summary>Used by DocumentsWriter to maintain per-thread state.
+    /// We keep a separate Posting hash and other state for each
+    /// thread and then merge postings hashes from all threads
+    /// when writing the segment. 
+    /// </summary>
+    sealed class DocumentsWriterThreadState
+    {
+        
+        internal bool isIdle = true; // false if this is currently in use by a thread
+        internal int numThreads = 1; // Number of threads that share this instance
+        internal bool doFlushAfter; // true if we should flush after processing current doc
+        internal DocConsumerPerThread consumer;
+        internal DocumentsWriter.DocState docState;
+        
+        internal DocumentsWriter docWriter;
+        
+        public DocumentsWriterThreadState(DocumentsWriter docWriter)
+        {
+            this.docWriter = docWriter;
+            docState = new DocumentsWriter.DocState();
+            docState.maxFieldLength = docWriter.maxFieldLength;
+            docState.infoStream = docWriter.infoStream;
+            docState.similarity = docWriter.similarity;
+            docState.docWriter = docWriter;
+            consumer = docWriter.consumer.AddThread(this);
+        }
+        
+        internal void  DoAfterFlush()
+        {
+            numThreads = 0;
+            doFlushAfter = false;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldInfo.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldInfo.cs b/src/core/Index/FieldInfo.cs
index bfca8af..d74ac4e 100644
--- a/src/core/Index/FieldInfo.cs
+++ b/src/core/Index/FieldInfo.cs
@@ -19,89 +19,89 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	public sealed class FieldInfo : System.ICloneable
-	{
-		internal System.String name;
-		internal bool isIndexed;
-		internal int number;
-		
-		// true if term vector for this field should be stored
-		internal bool storeTermVector;
-		internal bool storeOffsetWithTermVector;
-		internal bool storePositionWithTermVector;
-		
-		internal bool omitNorms; // omit norms associated with indexed fields  
-		internal bool omitTermFreqAndPositions;
-		
-		internal bool storePayloads; // whether this field stores payloads together with term positions
-		
-		internal FieldInfo(System.String na, bool tk, int nu, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
-		{
-			name = na;
-			isIndexed = tk;
-			number = nu;
-			if (isIndexed)
-			{
-				this.storeTermVector = storeTermVector;
-				this.storeOffsetWithTermVector = storeOffsetWithTermVector;
-				this.storePositionWithTermVector = storePositionWithTermVector;
-				this.storePayloads = storePayloads;
-				this.omitNorms = omitNorms;
-				this.omitTermFreqAndPositions = omitTermFreqAndPositions;
-			}
-			else
-			{
-				// for non-indexed fields, leave defaults
-				this.storeTermVector = false;
-				this.storeOffsetWithTermVector = false;
-				this.storePositionWithTermVector = false;
-				this.storePayloads = false;
-				this.omitNorms = true;
-				this.omitTermFreqAndPositions = false;
-			}
-		}
-		
-		public System.Object Clone()
-		{
-			return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
-		}
-		
-		internal void  Update(bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
-		{
-			if (this.isIndexed != isIndexed)
-			{
-				this.isIndexed = true; // once indexed, always index
-			}
-			if (isIndexed)
-			{
-				// if updated field data is not for indexing, leave the updates out
-				if (this.storeTermVector != storeTermVector)
-				{
-					this.storeTermVector = true; // once vector, always vector
-				}
-				if (this.storePositionWithTermVector != storePositionWithTermVector)
-				{
-					this.storePositionWithTermVector = true; // once vector, always vector
-				}
-				if (this.storeOffsetWithTermVector != storeOffsetWithTermVector)
-				{
-					this.storeOffsetWithTermVector = true; // once vector, always vector
-				}
-				if (this.storePayloads != storePayloads)
-				{
-					this.storePayloads = true;
-				}
-				if (this.omitNorms != omitNorms)
-				{
-					this.omitNorms = false; // once norms are stored, always store
-				}
-				if (this.omitTermFreqAndPositions != omitTermFreqAndPositions)
-				{
-					this.omitTermFreqAndPositions = true; // if one require omitTermFreqAndPositions at least once, it remains off for life
-				}
-			}
-		}
+    
+    public sealed class FieldInfo : System.ICloneable
+    {
+        internal System.String name;
+        internal bool isIndexed;
+        internal int number;
+        
+        // true if term vector for this field should be stored
+        internal bool storeTermVector;
+        internal bool storeOffsetWithTermVector;
+        internal bool storePositionWithTermVector;
+        
+        internal bool omitNorms; // omit norms associated with indexed fields  
+        internal bool omitTermFreqAndPositions;
+        
+        internal bool storePayloads; // whether this field stores payloads together with term positions
+        
+        internal FieldInfo(System.String na, bool tk, int nu, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+        {
+            name = na;
+            isIndexed = tk;
+            number = nu;
+            if (isIndexed)
+            {
+                this.storeTermVector = storeTermVector;
+                this.storeOffsetWithTermVector = storeOffsetWithTermVector;
+                this.storePositionWithTermVector = storePositionWithTermVector;
+                this.storePayloads = storePayloads;
+                this.omitNorms = omitNorms;
+                this.omitTermFreqAndPositions = omitTermFreqAndPositions;
+            }
+            else
+            {
+                // for non-indexed fields, leave defaults
+                this.storeTermVector = false;
+                this.storeOffsetWithTermVector = false;
+                this.storePositionWithTermVector = false;
+                this.storePayloads = false;
+                this.omitNorms = true;
+                this.omitTermFreqAndPositions = false;
+            }
+        }
+        
+        public System.Object Clone()
+        {
+            return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+        }
+        
+        internal void  Update(bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+        {
+            if (this.isIndexed != isIndexed)
+            {
+                this.isIndexed = true; // once indexed, always index
+            }
+            if (isIndexed)
+            {
+                // if updated field data is not for indexing, leave the updates out
+                if (this.storeTermVector != storeTermVector)
+                {
+                    this.storeTermVector = true; // once vector, always vector
+                }
+                if (this.storePositionWithTermVector != storePositionWithTermVector)
+                {
+                    this.storePositionWithTermVector = true; // once vector, always vector
+                }
+                if (this.storeOffsetWithTermVector != storeOffsetWithTermVector)
+                {
+                    this.storeOffsetWithTermVector = true; // once vector, always vector
+                }
+                if (this.storePayloads != storePayloads)
+                {
+                    this.storePayloads = true;
+                }
+                if (this.omitNorms != omitNorms)
+                {
+                    this.omitNorms = false; // once norms are stored, always store
+                }
+                if (this.omitTermFreqAndPositions != omitTermFreqAndPositions)
+                {
+                    this.omitTermFreqAndPositions = true; // if one require omitTermFreqAndPositions at least once, it remains off for life
+                }
+            }
+        }
 
         public bool storePayloads_ForNUnit
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldInfos.cs b/src/core/Index/FieldInfos.cs
index 8c9cae6..7ff523d 100644
--- a/src/core/Index/FieldInfos.cs
+++ b/src/core/Index/FieldInfos.cs
@@ -26,102 +26,102 @@ using StringHelper = Lucene.Net.Util.StringHelper;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Access to the Fieldable Info file that describes document fields and whether or
-	/// not they are indexed. Each segment has a separate Fieldable Info file. Objects
-	/// of this class are thread-safe for multiple readers, but only one thread can
-	/// be adding documents at a time, with no other reader or writer threads
-	/// accessing this object.
-	/// </summary>
-	public sealed class FieldInfos : ICloneable
-	{
-		
-		// Used internally (ie not written to *.fnm files) for pre-2.9 files
-		public const int FORMAT_PRE = - 1;
-		
-		// First used in 2.9; prior to 2.9 there was no format header
-		public const int FORMAT_START = - 2;
-		
-		internal static readonly int CURRENT_FORMAT = FORMAT_START;
-		
-		internal const byte IS_INDEXED = (0x1);
-		internal const byte STORE_TERMVECTOR = (0x2);
-		internal const byte STORE_POSITIONS_WITH_TERMVECTOR =(0x4);
-		internal const byte STORE_OFFSET_WITH_TERMVECTOR = (0x8);
-		internal const byte OMIT_NORMS = (0x10);
-		internal const byte STORE_PAYLOADS = (0x20);
-		internal const byte OMIT_TERM_FREQ_AND_POSITIONS = (0x40);
+    
+    /// <summary>Access to the Fieldable Info file that describes document fields and whether or
+    /// not they are indexed. Each segment has a separate Fieldable Info file. Objects
+    /// of this class are thread-safe for multiple readers, but only one thread can
+    /// be adding documents at a time, with no other reader or writer threads
+    /// accessing this object.
+    /// </summary>
+    public sealed class FieldInfos : ICloneable
+    {
+        
+        // Used internally (ie not written to *.fnm files) for pre-2.9 files
+        public const int FORMAT_PRE = - 1;
+        
+        // First used in 2.9; prior to 2.9 there was no format header
+        public const int FORMAT_START = - 2;
+        
+        internal static readonly int CURRENT_FORMAT = FORMAT_START;
+        
+        internal const byte IS_INDEXED = (0x1);
+        internal const byte STORE_TERMVECTOR = (0x2);
+        internal const byte STORE_POSITIONS_WITH_TERMVECTOR =(0x4);
+        internal const byte STORE_OFFSET_WITH_TERMVECTOR = (0x8);
+        internal const byte OMIT_NORMS = (0x10);
+        internal const byte STORE_PAYLOADS = (0x20);
+        internal const byte OMIT_TERM_FREQ_AND_POSITIONS = (0x40);
 
         private readonly System.Collections.Generic.List<FieldInfo> byNumber = new System.Collections.Generic.List<FieldInfo>();
         private readonly HashMap<string, FieldInfo> byName = new HashMap<string, FieldInfo>();
-		private int format;
-		
-		public /*internal*/ FieldInfos()
-		{
-		}
-		
-		/// <summary> Construct a FieldInfos object using the directory and the name of the file
-		/// IndexInput
-		/// </summary>
-		/// <param name="d">The directory to open the IndexInput from
-		/// </param>
-		/// <param name="name">The name of the file to open the IndexInput from in the Directory
-		/// </param>
-		/// <throws>  IOException </throws>
-		public /*internal*/ FieldInfos(Directory d, String name)
-		{
-			IndexInput input = d.OpenInput(name);
-			try
-			{
-				try
-				{
-					Read(input, name);
-				}
-				catch (System.IO.IOException)
-				{
-					if (format == FORMAT_PRE)
-					{
-						// LUCENE-1623: FORMAT_PRE (before there was a
-						// format) may be 2.3.2 (pre-utf8) or 2.4.x (utf8)
-						// encoding; retry with input set to pre-utf8
-						input.Seek(0);
-						input.SetModifiedUTF8StringsMode();
-						byNumber.Clear();
-						byName.Clear();
+        private int format;
+        
+        public /*internal*/ FieldInfos()
+        {
+        }
+        
+        /// <summary> Construct a FieldInfos object using the directory and the name of the file
+        /// IndexInput
+        /// </summary>
+        /// <param name="d">The directory to open the IndexInput from
+        /// </param>
+        /// <param name="name">The name of the file to open the IndexInput from in the Directory
+        /// </param>
+        /// <throws>  IOException </throws>
+        public /*internal*/ FieldInfos(Directory d, String name)
+        {
+            IndexInput input = d.OpenInput(name);
+            try
+            {
+                try
+                {
+                    Read(input, name);
+                }
+                catch (System.IO.IOException)
+                {
+                    if (format == FORMAT_PRE)
+                    {
+                        // LUCENE-1623: FORMAT_PRE (before there was a
+                        // format) may be 2.3.2 (pre-utf8) or 2.4.x (utf8)
+                        // encoding; retry with input set to pre-utf8
+                        input.Seek(0);
+                        input.SetModifiedUTF8StringsMode();
+                        byNumber.Clear();
+                        byName.Clear();
 
-					    bool rethrow = false;
-						try
-						{
-							Read(input, name);
-						}
-						catch (Exception)
+                        bool rethrow = false;
+                        try
+                        {
+                            Read(input, name);
+                        }
+                        catch (Exception)
                         {
                             // Ignore any new exception & set to throw original IOE
-						    rethrow = true;
-						}
+                            rethrow = true;
+                        }
                         if(rethrow)
                         {
                             // Preserve stack trace
                             throw;
                         }
-					}
-					else
-					{
-						// The IOException cannot be caused by
-						// LUCENE-1623, so re-throw it
-						throw;
-					}
-				}
-			}
-			finally
-			{
-				input.Close();
-			}
-		}
-		
-		/// <summary> Returns a deep clone of this FieldInfos instance.</summary>
-		public Object Clone()
-		{
+                    }
+                    else
+                    {
+                        // The IOException cannot be caused by
+                        // LUCENE-1623, so re-throw it
+                        throw;
+                    }
+                }
+            }
+            finally
+            {
+                input.Close();
+            }
+        }
+        
+        /// <summary> Returns a deep clone of this FieldInfos instance.</summary>
+        public Object Clone()
+        {
             lock (this)
             {
                 var fis = new FieldInfos();
@@ -134,358 +134,358 @@ namespace Lucene.Net.Index
                 }
                 return fis;
             }
-		}
-		
-		/// <summary>Adds field info for a Document. </summary>
-		public void  Add(Document doc)
-		{
-			lock (this)
-			{
-				System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
+        }
+        
+        /// <summary>Adds field info for a Document. </summary>
+        public void  Add(Document doc)
+        {
+            lock (this)
+            {
+                System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
                 foreach(IFieldable field in fields)
                 {
                     Add(field.Name, field.IsIndexed, field.IsTermVectorStored,
                         field.IsStorePositionWithTermVector, field.IsStoreOffsetWithTermVector, field.OmitNorms,
                         false, field.OmitTermFreqAndPositions);
                 }
-			}
-		}
-		
-		/// <summary>Returns true if any fields do not omitTermFreqAndPositions </summary>
-		internal bool HasProx()
-		{
-			int numFields = byNumber.Count;
-			for (int i = 0; i < numFields; i++)
-			{
-				FieldInfo fi = FieldInfo(i);
-				if (fi.isIndexed && !fi.omitTermFreqAndPositions)
-				{
-					return true;
-				}
-			}
-			return false;
-		}
-		
-		/// <summary> Add fields that are indexed. Whether they have termvectors has to be specified.
-		/// 
-		/// </summary>
-		/// <param name="names">The names of the fields
-		/// </param>
-		/// <param name="storeTermVectors">Whether the fields store term vectors or not
-		/// </param>
-		/// <param name="storePositionWithTermVector">true if positions should be stored.
-		/// </param>
-		/// <param name="storeOffsetWithTermVector">true if offsets should be stored
-		/// </param>
-		public void  AddIndexed(System.Collections.Generic.ICollection<string> names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
-		{
-			lock (this)
-			{
-				foreach(string name in names)
-				{
-					Add(name, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector);
-				}
-			}
-		}
-		
-		/// <summary> Assumes the fields are not storing term vectors.
-		/// 
-		/// </summary>
-		/// <param name="names">The names of the fields
-		/// </param>
-		/// <param name="isIndexed">Whether the fields are indexed or not
-		/// 
-		/// </param>
-		/// <seealso cref="Add(String, bool)">
-		/// </seealso>
+            }
+        }
+        
+        /// <summary>Returns true if any fields do not omitTermFreqAndPositions </summary>
+        internal bool HasProx()
+        {
+            int numFields = byNumber.Count;
+            for (int i = 0; i < numFields; i++)
+            {
+                FieldInfo fi = FieldInfo(i);
+                if (fi.isIndexed && !fi.omitTermFreqAndPositions)
+                {
+                    return true;
+                }
+            }
+            return false;
+        }
+        
+        /// <summary> Add fields that are indexed. Whether they have termvectors has to be specified.
+        /// 
+        /// </summary>
+        /// <param name="names">The names of the fields
+        /// </param>
+        /// <param name="storeTermVectors">Whether the fields store term vectors or not
+        /// </param>
+        /// <param name="storePositionWithTermVector">true if positions should be stored.
+        /// </param>
+        /// <param name="storeOffsetWithTermVector">true if offsets should be stored
+        /// </param>
+        public void  AddIndexed(System.Collections.Generic.ICollection<string> names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
+        {
+            lock (this)
+            {
+                foreach(string name in names)
+                {
+                    Add(name, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector);
+                }
+            }
+        }
+        
+        /// <summary> Assumes the fields are not storing term vectors.
+        /// 
+        /// </summary>
+        /// <param name="names">The names of the fields
+        /// </param>
+        /// <param name="isIndexed">Whether the fields are indexed or not
+        /// 
+        /// </param>
+        /// <seealso cref="Add(String, bool)">
+        /// </seealso>
         public void Add(System.Collections.Generic.ICollection<string> names, bool isIndexed)
-		{
-			lock (this)
-			{
-				foreach(string name in names)
-				{
-					Add(name, isIndexed);
-				}
-			}
-		}
-		
-		/// <summary> Calls 5 parameter add with false for all TermVector parameters.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the Fieldable
-		/// </param>
-		/// <param name="isIndexed">true if the field is indexed
-		/// </param>
+        {
+            lock (this)
+            {
+                foreach(string name in names)
+                {
+                    Add(name, isIndexed);
+                }
+            }
+        }
+        
+        /// <summary> Calls 5 parameter add with false for all TermVector parameters.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the Fieldable
+        /// </param>
+        /// <param name="isIndexed">true if the field is indexed
+        /// </param>
         /// <seealso cref="Add(String, bool, bool, bool, bool)">
-		/// </seealso>
-		public void  Add(String name, bool isIndexed)
-		{
-			lock (this)
-			{
-				Add(name, isIndexed, false, false, false, false);
-			}
-		}
-		
-		/// <summary> Calls 5 parameter add with false for term vector positions and offsets.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="isIndexed"> true if the field is indexed
-		/// </param>
-		/// <param name="storeTermVector">true if the term vector should be stored
-		/// </param>
-		public void  Add(System.String name, bool isIndexed, bool storeTermVector)
-		{
-			lock (this)
-			{
-				Add(name, isIndexed, storeTermVector, false, false, false);
-			}
-		}
-		
-		/// <summary>If the field is not yet known, adds it. If it is known, checks to make
-		/// sure that the isIndexed flag is the same as was given previously for this
-		/// field. If not - marks it as being indexed.  Same goes for the TermVector
-		/// parameters.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="isIndexed">true if the field is indexed
-		/// </param>
-		/// <param name="storeTermVector">true if the term vector should be stored
-		/// </param>
-		/// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
-		/// </param>
-		/// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
-		/// </param>
-		public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
-		{
-			lock (this)
-			{
-				
-				Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, false);
-			}
-		}
-		
-		/// <summary>If the field is not yet known, adds it. If it is known, checks to make
-		/// sure that the isIndexed flag is the same as was given previously for this
-		/// field. If not - marks it as being indexed.  Same goes for the TermVector
-		/// parameters.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="isIndexed">true if the field is indexed
-		/// </param>
-		/// <param name="storeTermVector">true if the term vector should be stored
-		/// </param>
-		/// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
-		/// </param>
-		/// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
-		/// </param>
-		/// <param name="omitNorms">true if the norms for the indexed field should be omitted
-		/// </param>
-		public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms)
-		{
-			lock (this)
-			{
-				Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, false, false);
-			}
-		}
-		
-		/// <summary>If the field is not yet known, adds it. If it is known, checks to make
-		/// sure that the isIndexed flag is the same as was given previously for this
-		/// field. If not - marks it as being indexed.  Same goes for the TermVector
-		/// parameters.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="isIndexed">true if the field is indexed
-		/// </param>
-		/// <param name="storeTermVector">true if the term vector should be stored
-		/// </param>
-		/// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
-		/// </param>
-		/// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
-		/// </param>
-		/// <param name="omitNorms">true if the norms for the indexed field should be omitted
-		/// </param>
-		/// <param name="storePayloads">true if payloads should be stored for this field
-		/// </param>
-		/// <param name="omitTermFreqAndPositions">true if term freqs should be omitted for this field
-		/// </param>
-		public FieldInfo Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
-		{
-			lock (this)
-			{
-				FieldInfo fi = FieldInfo(name);
-				if (fi == null)
-				{
-					return AddInternal(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
-				}
-				else
-				{
-					fi.Update(isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
-				}
-				return fi;
-			}
-		}
-		
-		private FieldInfo AddInternal(String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
-		{
-			name = StringHelper.Intern(name);
-			var fi = new FieldInfo(name, isIndexed, byNumber.Count, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
-			byNumber.Add(fi);
-			byName[name] = fi;
-			return fi;
-		}
-		
-		public int FieldNumber(System.String fieldName)
-		{
-			FieldInfo fi = FieldInfo(fieldName);
-			return (fi != null)?fi.number:- 1;
-		}
-		
-		public FieldInfo FieldInfo(System.String fieldName)
-		{
-			return byName[fieldName];
-		}
-		
-		/// <summary> Return the fieldName identified by its number.
-		/// 
-		/// </summary>
-		/// <param name="fieldNumber">
-		/// </param>
-		/// <returns> the fieldName or an empty string when the field
-		/// with the given number doesn't exist.
-		/// </returns>
-		public System.String FieldName(int fieldNumber)
-		{
-		    FieldInfo fi = FieldInfo(fieldNumber);
-		    return (fi != null) ? fi.name : "";
-		}
-		
-		/// <summary> Return the fieldinfo object referenced by the fieldNumber.</summary>
-		/// <param name="fieldNumber">
-		/// </param>
-		/// <returns> the FieldInfo object or null when the given fieldNumber
-		/// doesn't exist.
-		/// </returns>
-		public FieldInfo FieldInfo(int fieldNumber)
-		{
-		    return (fieldNumber >= 0) ? byNumber[fieldNumber] : null;
-		}
-		
-		public int Size()
-		{
-			return byNumber.Count;
-		}
-		
-		public bool HasVectors()
-		{
-			bool hasVectors = false;
-			for (int i = 0; i < Size(); i++)
-			{
-				if (FieldInfo(i).storeTermVector)
-				{
-					hasVectors = true;
-					break;
-				}
-			}
-			return hasVectors;
-		}
-		
-		public void  Write(Directory d, System.String name)
-		{
-			IndexOutput output = d.CreateOutput(name);
-			try
-			{
-				Write(output);
-			}
-			finally
-			{
-				output.Close();
-			}
-		}
-		
-		public void  Write(IndexOutput output)
-		{
-			output.WriteVInt(CURRENT_FORMAT);
-			output.WriteVInt(Size());
-			for (int i = 0; i < Size(); i++)
-			{
-				FieldInfo fi = FieldInfo(i);
-				var bits = (byte) (0x0);
-				if (fi.isIndexed)
-					bits |= IS_INDEXED;
-				if (fi.storeTermVector)
-					bits |= STORE_TERMVECTOR;
-				if (fi.storePositionWithTermVector)
-					bits |= STORE_POSITIONS_WITH_TERMVECTOR;
-				if (fi.storeOffsetWithTermVector)
-					bits |= STORE_OFFSET_WITH_TERMVECTOR;
-				if (fi.omitNorms)
-					bits |= OMIT_NORMS;
-				if (fi.storePayloads)
-					bits |= STORE_PAYLOADS;
-				if (fi.omitTermFreqAndPositions)
-					bits |= OMIT_TERM_FREQ_AND_POSITIONS;
-				
-				output.WriteString(fi.name);
-				output.WriteByte(bits);
-			}
-		}
-		
-		private void  Read(IndexInput input, String fileName)
-		{
-			int firstInt = input.ReadVInt();
-			
-			if (firstInt < 0)
-			{
-				// This is a real format
-				format = firstInt;
-			}
-			else
-			{
-				format = FORMAT_PRE;
-			}
-			
-			if (format != FORMAT_PRE & format != FORMAT_START)
-			{
-				throw new CorruptIndexException("unrecognized format " + format + " in file \"" + fileName + "\"");
-			}
-			
-			int size;
-			if (format == FORMAT_PRE)
-			{
-				size = firstInt;
-			}
-			else
-			{
-				size = input.ReadVInt(); //read in the size
-			}
-			
-			for (int i = 0; i < size; i++)
-			{
-				String name = StringHelper.Intern(input.ReadString());
-				byte bits = input.ReadByte();
-				bool isIndexed = (bits & IS_INDEXED) != 0;
-				bool storeTermVector = (bits & STORE_TERMVECTOR) != 0;
-				bool storePositionsWithTermVector = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
-				bool storeOffsetWithTermVector = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
-				bool omitNorms = (bits & OMIT_NORMS) != 0;
-				bool storePayloads = (bits & STORE_PAYLOADS) != 0;
-				bool omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0;
-				
-				AddInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
-			}
-			
-			if (input.FilePointer != input.Length())
-			{
-				throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.FilePointer + " vs size " + input.Length());
-			}
-		}
-	}
+        /// </seealso>
+        public void  Add(String name, bool isIndexed)
+        {
+            lock (this)
+            {
+                Add(name, isIndexed, false, false, false, false);
+            }
+        }
+        
+        /// <summary> Calls 5 parameter add with false for term vector positions and offsets.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="isIndexed"> true if the field is indexed
+        /// </param>
+        /// <param name="storeTermVector">true if the term vector should be stored
+        /// </param>
+        public void  Add(System.String name, bool isIndexed, bool storeTermVector)
+        {
+            lock (this)
+            {
+                Add(name, isIndexed, storeTermVector, false, false, false);
+            }
+        }
+        
+        /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+        /// sure that the isIndexed flag is the same as was given previously for this
+        /// field. If not - marks it as being indexed.  Same goes for the TermVector
+        /// parameters.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="isIndexed">true if the field is indexed
+        /// </param>
+        /// <param name="storeTermVector">true if the term vector should be stored
+        /// </param>
+        /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+        /// </param>
+        /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+        /// </param>
+        public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
+        {
+            lock (this)
+            {
+                
+                Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, false);
+            }
+        }
+        
+        /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+        /// sure that the isIndexed flag is the same as was given previously for this
+        /// field. If not - marks it as being indexed.  Same goes for the TermVector
+        /// parameters.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="isIndexed">true if the field is indexed
+        /// </param>
+        /// <param name="storeTermVector">true if the term vector should be stored
+        /// </param>
+        /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+        /// </param>
+        /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+        /// </param>
+        /// <param name="omitNorms">true if the norms for the indexed field should be omitted
+        /// </param>
+        public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms)
+        {
+            lock (this)
+            {
+                Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, false, false);
+            }
+        }
+        
+        /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+        /// sure that the isIndexed flag is the same as was given previously for this
+        /// field. If not - marks it as being indexed.  Same goes for the TermVector
+        /// parameters.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="isIndexed">true if the field is indexed
+        /// </param>
+        /// <param name="storeTermVector">true if the term vector should be stored
+        /// </param>
+        /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+        /// </param>
+        /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+        /// </param>
+        /// <param name="omitNorms">true if the norms for the indexed field should be omitted
+        /// </param>
+        /// <param name="storePayloads">true if payloads should be stored for this field
+        /// </param>
+        /// <param name="omitTermFreqAndPositions">true if term freqs should be omitted for this field
+        /// </param>
+        public FieldInfo Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+        {
+            lock (this)
+            {
+                FieldInfo fi = FieldInfo(name);
+                if (fi == null)
+                {
+                    return AddInternal(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                }
+                else
+                {
+                    fi.Update(isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                }
+                return fi;
+            }
+        }
+        
+        private FieldInfo AddInternal(String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+        {
+            name = StringHelper.Intern(name);
+            var fi = new FieldInfo(name, isIndexed, byNumber.Count, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+            byNumber.Add(fi);
+            byName[name] = fi;
+            return fi;
+        }
+        
+        public int FieldNumber(System.String fieldName)
+        {
+            FieldInfo fi = FieldInfo(fieldName);
+            return (fi != null)?fi.number:- 1;
+        }
+        
+        public FieldInfo FieldInfo(System.String fieldName)
+        {
+            return byName[fieldName];
+        }
+        
+        /// <summary> Return the fieldName identified by its number.
+        /// 
+        /// </summary>
+        /// <param name="fieldNumber">
+        /// </param>
+        /// <returns> the fieldName or an empty string when the field
+        /// with the given number doesn't exist.
+        /// </returns>
+        public System.String FieldName(int fieldNumber)
+        {
+            FieldInfo fi = FieldInfo(fieldNumber);
+            return (fi != null) ? fi.name : "";
+        }
+        
+        /// <summary> Return the fieldinfo object referenced by the fieldNumber.</summary>
+        /// <param name="fieldNumber">
+        /// </param>
+        /// <returns> the FieldInfo object or null when the given fieldNumber
+        /// doesn't exist.
+        /// </returns>
+        public FieldInfo FieldInfo(int fieldNumber)
+        {
+            return (fieldNumber >= 0) ? byNumber[fieldNumber] : null;
+        }
+        
+        public int Size()
+        {
+            return byNumber.Count;
+        }
+        
+        public bool HasVectors()
+        {
+            bool hasVectors = false;
+            for (int i = 0; i < Size(); i++)
+            {
+                if (FieldInfo(i).storeTermVector)
+                {
+                    hasVectors = true;
+                    break;
+                }
+            }
+            return hasVectors;
+        }
+        
+        public void  Write(Directory d, System.String name)
+        {
+            IndexOutput output = d.CreateOutput(name);
+            try
+            {
+                Write(output);
+            }
+            finally
+            {
+                output.Close();
+            }
+        }
+        
+        public void  Write(IndexOutput output)
+        {
+            output.WriteVInt(CURRENT_FORMAT);
+            output.WriteVInt(Size());
+            for (int i = 0; i < Size(); i++)
+            {
+                FieldInfo fi = FieldInfo(i);
+                var bits = (byte) (0x0);
+                if (fi.isIndexed)
+                    bits |= IS_INDEXED;
+                if (fi.storeTermVector)
+                    bits |= STORE_TERMVECTOR;
+                if (fi.storePositionWithTermVector)
+                    bits |= STORE_POSITIONS_WITH_TERMVECTOR;
+                if (fi.storeOffsetWithTermVector)
+                    bits |= STORE_OFFSET_WITH_TERMVECTOR;
+                if (fi.omitNorms)
+                    bits |= OMIT_NORMS;
+                if (fi.storePayloads)
+                    bits |= STORE_PAYLOADS;
+                if (fi.omitTermFreqAndPositions)
+                    bits |= OMIT_TERM_FREQ_AND_POSITIONS;
+                
+                output.WriteString(fi.name);
+                output.WriteByte(bits);
+            }
+        }
+        
+        private void  Read(IndexInput input, String fileName)
+        {
+            int firstInt = input.ReadVInt();
+            
+            if (firstInt < 0)
+            {
+                // This is a real format
+                format = firstInt;
+            }
+            else
+            {
+                format = FORMAT_PRE;
+            }
+            
+            if (format != FORMAT_PRE & format != FORMAT_START)
+            {
+                throw new CorruptIndexException("unrecognized format " + format + " in file \"" + fileName + "\"");
+            }
+            
+            int size;
+            if (format == FORMAT_PRE)
+            {
+                size = firstInt;
+            }
+            else
+            {
+                size = input.ReadVInt(); //read in the size
+            }
+            
+            for (int i = 0; i < size; i++)
+            {
+                String name = StringHelper.Intern(input.ReadString());
+                byte bits = input.ReadByte();
+                bool isIndexed = (bits & IS_INDEXED) != 0;
+                bool storeTermVector = (bits & STORE_TERMVECTOR) != 0;
+                bool storePositionsWithTermVector = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
+                bool storeOffsetWithTermVector = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
+                bool omitNorms = (bits & OMIT_NORMS) != 0;
+                bool storePayloads = (bits & STORE_PAYLOADS) != 0;
+                bool omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0;
+                
+                AddInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+            }
+            
+            if (input.FilePointer != input.Length())
+            {
+                throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.FilePointer + " vs size " + input.Length());
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldInvertState.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldInvertState.cs b/src/core/Index/FieldInvertState.cs
index 96d6c83..24d63d7 100644
--- a/src/core/Index/FieldInvertState.cs
+++ b/src/core/Index/FieldInvertState.cs
@@ -21,90 +21,90 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This class tracks the number and position / offset parameters of terms
-	/// being added to the index. The information collected in this class is
-	/// also used to calculate the normalization factor for a field.
-	/// 
-	/// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
-	/// change.<p/>
-	/// </summary>
-	public sealed class FieldInvertState
-	{
-		internal int position;
-		internal int length;
-		internal int numOverlap;
-		internal int offset;
-		internal float boost;
-		internal AttributeSource attributeSource;
-		
-		public FieldInvertState()
-		{
-		}
-		
-		public FieldInvertState(int position, int length, int numOverlap, int offset, float boost)
-		{
-			this.position = position;
-			this.length = length;
-			this.numOverlap = numOverlap;
-			this.offset = offset;
-			this.boost = boost;
-		}
-		
-		/// <summary> Re-initialize the state, using this boost value.</summary>
-		/// <param name="docBoost">boost value to use.
-		/// </param>
-		internal void  Reset(float docBoost)
-		{
-			position = 0;
-			length = 0;
-			numOverlap = 0;
-			offset = 0;
-			boost = docBoost;
-			attributeSource = null;
-		}
+    
+    /// <summary> This class tracks the number and position / offset parameters of terms
+    /// being added to the index. The information collected in this class is
+    /// also used to calculate the normalization factor for a field.
+    /// 
+    /// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
+    /// change.<p/>
+    /// </summary>
+    public sealed class FieldInvertState
+    {
+        internal int position;
+        internal int length;
+        internal int numOverlap;
+        internal int offset;
+        internal float boost;
+        internal AttributeSource attributeSource;
+        
+        public FieldInvertState()
+        {
+        }
+        
+        public FieldInvertState(int position, int length, int numOverlap, int offset, float boost)
+        {
+            this.position = position;
+            this.length = length;
+            this.numOverlap = numOverlap;
+            this.offset = offset;
+            this.boost = boost;
+        }
+        
+        /// <summary> Re-initialize the state, using this boost value.</summary>
+        /// <param name="docBoost">boost value to use.
+        /// </param>
+        internal void  Reset(float docBoost)
+        {
+            position = 0;
+            length = 0;
+            numOverlap = 0;
+            offset = 0;
+            boost = docBoost;
+            attributeSource = null;
+        }
 
-	    /// <summary> Get the last processed term position.</summary>
-	    /// <value> the position </value>
-	    public int Position
-	    {
-	        get { return position; }
-	    }
+        /// <summary> Get the last processed term position.</summary>
+        /// <value> the position </value>
+        public int Position
+        {
+            get { return position; }
+        }
 
-	    /// <summary> Get total number of terms in this field.</summary>
-	    /// <value> the length </value>
-	    public int Length
-	    {
-	        get { return length; }
-	    }
+        /// <summary> Get total number of terms in this field.</summary>
+        /// <value> the length </value>
+        public int Length
+        {
+            get { return length; }
+        }
 
-	    /// <summary> Get the number of terms with <c>positionIncrement == 0</c>.</summary>
-	    /// <value> the numOverlap </value>
-	    public int NumOverlap
-	    {
-	        get { return numOverlap; }
-	    }
+        /// <summary> Get the number of terms with <c>positionIncrement == 0</c>.</summary>
+        /// <value> the numOverlap </value>
+        public int NumOverlap
+        {
+            get { return numOverlap; }
+        }
 
-	    /// <summary> Get end offset of the last processed term.</summary>
-	    /// <value> the offset </value>
-	    public int Offset
-	    {
-	        get { return offset; }
-	    }
+        /// <summary> Get end offset of the last processed term.</summary>
+        /// <value> the offset </value>
+        public int Offset
+        {
+            get { return offset; }
+        }
 
-	    /// <summary> Get boost value. This is the cumulative product of
-	    /// document boost and field boost for all field instances
-	    /// sharing the same field name.
-	    /// </summary>
-	    /// <value> the boost </value>
-	    public float Boost
-	    {
-	        get { return boost; }
-	    }
+        /// <summary> Get boost value. This is the cumulative product of
+        /// document boost and field boost for all field instances
+        /// sharing the same field name.
+        /// </summary>
+        /// <value> the boost </value>
+        public float Boost
+        {
+            get { return boost; }
+        }
 
-	    public AttributeSource AttributeSource
-	    {
-	        get { return attributeSource; }
-	    }
-	}
+        public AttributeSource AttributeSource
+        {
+            get { return attributeSource; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FieldSortedTermVectorMapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldSortedTermVectorMapper.cs b/src/core/Index/FieldSortedTermVectorMapper.cs
index 6c1915e..4d193f5 100644
--- a/src/core/Index/FieldSortedTermVectorMapper.cs
+++ b/src/core/Index/FieldSortedTermVectorMapper.cs
@@ -20,59 +20,59 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> For each Field, store a sorted collection of <see cref="TermVectorEntry" />s
-	/// <p/>
-	/// This is not thread-safe.
-	/// </summary>
-	public class FieldSortedTermVectorMapper:TermVectorMapper
-	{
+    
+    /// <summary> For each Field, store a sorted collection of <see cref="TermVectorEntry" />s
+    /// <p/>
+    /// This is not thread-safe.
+    /// </summary>
+    public class FieldSortedTermVectorMapper:TermVectorMapper
+    {
         private readonly IDictionary<string, SortedSet<TermVectorEntry>> fieldToTerms = new HashMap<string, SortedSet<TermVectorEntry>>();
-		private SortedSet<TermVectorEntry> currentSet;
-		private System.String currentField;
+        private SortedSet<TermVectorEntry> currentSet;
+        private System.String currentField;
         private readonly IComparer<TermVectorEntry> comparator;
-		
-		/// <summary> </summary>
-		/// <param name="comparator">A Comparator for sorting <see cref="TermVectorEntry" />s
-		/// </param>
+        
+        /// <summary> </summary>
+        /// <param name="comparator">A Comparator for sorting <see cref="TermVectorEntry" />s
+        /// </param>
         public FieldSortedTermVectorMapper(IComparer<TermVectorEntry> comparator)
             : this(false, false, comparator)
-		{
-		}
+        {
+        }
 
 
         public FieldSortedTermVectorMapper(bool ignoringPositions, bool ignoringOffsets, IComparer<TermVectorEntry> comparator)
             : base(ignoringPositions, ignoringOffsets)
-		{
-			this.comparator = comparator;
-		}
-		
-		public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
-		{
-			var entry = new TermVectorEntry(currentField, term, frequency, offsets, positions);
-			currentSet.Add(entry);
-		}
-		
-		public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
-		{
-			currentSet = new SortedSet<TermVectorEntry>(comparator);
-			currentField = field;
-			fieldToTerms[field] = currentSet;
-		}
+        {
+            this.comparator = comparator;
+        }
+        
+        public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+        {
+            var entry = new TermVectorEntry(currentField, term, frequency, offsets, positions);
+            currentSet.Add(entry);
+        }
+        
+        public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+        {
+            currentSet = new SortedSet<TermVectorEntry>(comparator);
+            currentField = field;
+            fieldToTerms[field] = currentSet;
+        }
 
-	    /// <summary> Get the mapping between fields and terms, sorted by the comparator
-	    /// 
-	    /// </summary>
-	    /// <value> A map between field names and &lt;see cref=&quot;System.Collections.Generic.SortedDictionary{Object,Object}&quot; /&gt;s per field. SortedSet entries are &lt;see cref=&quot;TermVectorEntry&quot; /&gt; </value>
-	    public virtual IDictionary<string, SortedSet<TermVectorEntry>> FieldToTerms
-	    {
-	        get { return fieldToTerms; }
-	    }
+        /// <summary> Get the mapping between fields and terms, sorted by the comparator
+        /// 
+        /// </summary>
+        /// <value> A map between field names and &lt;see cref=&quot;System.Collections.Generic.SortedDictionary{Object,Object}&quot; /&gt;s per field. SortedSet entries are &lt;see cref=&quot;TermVectorEntry&quot; /&gt; </value>
+        public virtual IDictionary<string, SortedSet<TermVectorEntry>> FieldToTerms
+        {
+            get { return fieldToTerms; }
+        }
 
 
-	    public virtual IComparer<TermVectorEntry> Comparator
-	    {
-	        get { return comparator; }
-	    }
-	}
+        public virtual IComparer<TermVectorEntry> Comparator
+        {
+            get { return comparator; }
+        }
+    }
 }
\ No newline at end of file


[44/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/FinnishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/FinnishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/FinnishStemmer.cs
index eb111f6..ef8b971 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/FinnishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/FinnishStemmer.cs
@@ -23,1128 +23,1128 @@ namespace SF.Snowball.Ext
 {
 #pragma warning disable 162,164
 
-	/// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class FinnishStemmer:SnowballProgram
-	{
-		public FinnishStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("pa", - 1, 1, "", this), new Among("sti", - 1, 2, "", this), new Among("kaan", - 1, 1, "", this), new Among("han", - 1, 1, "", this), new Among("kin", - 1, 1, "", this), new Among("h\u00E4n", - 1, 1, "", this), new Among("k\u00E4\u00E4n", - 1, 1, "", this), new Among("ko", - 1, 1, "", this), new Among("p\u00E4", - 1, 1, "", this), new Among("k\u00F6", - 1, 1, "", this)};
-			a_1 = new Among[]{new Among("lla", - 1, - 1, "", this), new Among("na", - 1, - 1, "", this), new Among("ssa", - 1, - 1, "", this), new Among("ta", - 1, - 1, "", this), new Among("lta", 3, - 1, "", this), new Among("sta", 3, - 1, "", this)};
-			a_2 = new Among[]{new Among("ll\u00E4", - 1, - 1, "", this), new Among("n\u00E4", - 1, - 1, "", this), new Among("ss\u00E4", - 1, - 1, "", this), new Among("t\u00E4", - 1, - 1, "", this), new Among("lt\u00E4", 3, - 1, "", this), new Among("st\u00E4", 3, - 1, "", this)};
-			a_3 = new Among[]{new Among("lle", - 1, - 1, "", this), new Among("ine", - 1, - 1, "", this)};
-			a_4 = new Among[]{new Among("nsa", - 1, 3, "", this), new Among("mme", - 1, 3, "", this), new Among("nne", - 1, 3, "", this), new Among("ni", - 1, 2, "", this), new Among("si", - 1, 1, "", this), new Among("an", - 1, 4, "", this), new Among("en", - 1, 6, "", this), new Among("\u00E4n", - 1, 5, "", this), new Among("ns\u00E4", - 1, 3, "", this)};
-			a_5 = new Among[]{new Among("aa", - 1, - 1, "", this), new Among("ee", - 1, - 1, "", this), new Among("ii", - 1, - 1, "", this), new Among("oo", - 1, - 1, "", this), new Among("uu", - 1, - 1, "", this), new Among("\u00E4\u00E4", - 1, - 1, "", this), new Among("\u00F6\u00F6", - 1, - 1, "", this)};
-			a_6 = new Among[]{new Among("a", - 1, 8, "", this), new Among("lla", 0, - 1, "", this), new Among("na", 0, - 1, "", this), new Among("ssa", 0, - 1, "", this), new Among("ta", 0, - 1, "", this), new Among("lta", 4, - 1, "", this), new Among("sta", 4, - 1, "", this), new Among("tta", 4, 9, "", this), new Among("lle", - 1, - 1, "", this), new Among("ine", - 1, - 1, "", this), new Among("ksi", - 1, - 1, "", this), new Among("n", - 1, 7, "", this), new Among("han", 11, 1, "", this), new Among("den", 11, - 1, "r_VI", this), new Among("seen", 11, - 1, "r_LONG", this), new Among("hen", 11, 2, "", this), new Among("tten", 11, - 1, "r_VI", this), new Among("hin", 11, 3, "", this), new Among("siin", 11, - 1, "r_VI", this), new Among("hon", 11, 4, "", this), new Among("h\u00E4n", 11, 5, "", this), new Among("h\u00F6n", 11, 6, "", this), new Among("\u00E4", - 1, 8, "", this), new Among("ll\u00E4", 22, - 1, "", this), new Among("n\u00E4", 22, - 1, "", this), new Among("ss\u00E4", 22, - 1, "", 
 this), new Among("t\u00E4", 22, - 1, "", this), new Among("lt\u00E4", 26, - 1, "", this), new Among("st\u00E4", 26, - 1, "", this), new Among("tt\u00E4", 26, 9, "", this)};
-			a_7 = new Among[]{new Among("eja", - 1, - 1, "", this), new Among("mma", - 1, 1, "", this), new Among("imma", 1, - 1, "", this), new Among("mpa", - 1, 1, "", this), new Among("impa", 3, - 1, "", this), new Among("mmi", - 1, 1, "", this), new Among("immi", 5, - 1, "", this), new Among("mpi", - 1, 1, "", this), new Among("impi", 7, - 1, "", this), new Among("ej\u00E4", - 1, - 1, "", this), new Among("mm\u00E4", - 1, 1, "", this), new Among("imm\u00E4", 10, - 1, "", this), new Among("mp\u00E4", - 1, 1, "", this), new Among("imp\u00E4", 12, - 1, "", this)};
-			a_8 = new Among[]{new Among("i", - 1, - 1, "", this), new Among("j", - 1, - 1, "", this)};
-			a_9 = new Among[]{new Among("mma", - 1, 1, "", this), new Among("imma", 0, - 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private Among[] a_8;
-		private Among[] a_9;
+    /// <summary> Generated class implementing code defined by a snowball script.</summary>
+    public class FinnishStemmer:SnowballProgram
+    {
+        public FinnishStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("pa", - 1, 1, "", this), new Among("sti", - 1, 2, "", this), new Among("kaan", - 1, 1, "", this), new Among("han", - 1, 1, "", this), new Among("kin", - 1, 1, "", this), new Among("h\u00E4n", - 1, 1, "", this), new Among("k\u00E4\u00E4n", - 1, 1, "", this), new Among("ko", - 1, 1, "", this), new Among("p\u00E4", - 1, 1, "", this), new Among("k\u00F6", - 1, 1, "", this)};
+            a_1 = new Among[]{new Among("lla", - 1, - 1, "", this), new Among("na", - 1, - 1, "", this), new Among("ssa", - 1, - 1, "", this), new Among("ta", - 1, - 1, "", this), new Among("lta", 3, - 1, "", this), new Among("sta", 3, - 1, "", this)};
+            a_2 = new Among[]{new Among("ll\u00E4", - 1, - 1, "", this), new Among("n\u00E4", - 1, - 1, "", this), new Among("ss\u00E4", - 1, - 1, "", this), new Among("t\u00E4", - 1, - 1, "", this), new Among("lt\u00E4", 3, - 1, "", this), new Among("st\u00E4", 3, - 1, "", this)};
+            a_3 = new Among[]{new Among("lle", - 1, - 1, "", this), new Among("ine", - 1, - 1, "", this)};
+            a_4 = new Among[]{new Among("nsa", - 1, 3, "", this), new Among("mme", - 1, 3, "", this), new Among("nne", - 1, 3, "", this), new Among("ni", - 1, 2, "", this), new Among("si", - 1, 1, "", this), new Among("an", - 1, 4, "", this), new Among("en", - 1, 6, "", this), new Among("\u00E4n", - 1, 5, "", this), new Among("ns\u00E4", - 1, 3, "", this)};
+            a_5 = new Among[]{new Among("aa", - 1, - 1, "", this), new Among("ee", - 1, - 1, "", this), new Among("ii", - 1, - 1, "", this), new Among("oo", - 1, - 1, "", this), new Among("uu", - 1, - 1, "", this), new Among("\u00E4\u00E4", - 1, - 1, "", this), new Among("\u00F6\u00F6", - 1, - 1, "", this)};
+            a_6 = new Among[]{new Among("a", - 1, 8, "", this), new Among("lla", 0, - 1, "", this), new Among("na", 0, - 1, "", this), new Among("ssa", 0, - 1, "", this), new Among("ta", 0, - 1, "", this), new Among("lta", 4, - 1, "", this), new Among("sta", 4, - 1, "", this), new Among("tta", 4, 9, "", this), new Among("lle", - 1, - 1, "", this), new Among("ine", - 1, - 1, "", this), new Among("ksi", - 1, - 1, "", this), new Among("n", - 1, 7, "", this), new Among("han", 11, 1, "", this), new Among("den", 11, - 1, "r_VI", this), new Among("seen", 11, - 1, "r_LONG", this), new Among("hen", 11, 2, "", this), new Among("tten", 11, - 1, "r_VI", this), new Among("hin", 11, 3, "", this), new Among("siin", 11, - 1, "r_VI", this), new Among("hon", 11, 4, "", this), new Among("h\u00E4n", 11, 5, "", this), new Among("h\u00F6n", 11, 6, "", this), new Among("\u00E4", - 1, 8, "", this), new Among("ll\u00E4", 22, - 1, "", this), new Among("n\u00E4", 22, - 1, "", this), new Among("ss\u00E4", 22, 
 - 1, "", this), new Among("t\u00E4", 22, - 1, "", this), new Among("lt\u00E4", 26, - 1, "", this), new Among("st\u00E4", 26, - 1, "", this), new Among("tt\u00E4", 26, 9, "", this)};
+            a_7 = new Among[]{new Among("eja", - 1, - 1, "", this), new Among("mma", - 1, 1, "", this), new Among("imma", 1, - 1, "", this), new Among("mpa", - 1, 1, "", this), new Among("impa", 3, - 1, "", this), new Among("mmi", - 1, 1, "", this), new Among("immi", 5, - 1, "", this), new Among("mpi", - 1, 1, "", this), new Among("impi", 7, - 1, "", this), new Among("ej\u00E4", - 1, - 1, "", this), new Among("mm\u00E4", - 1, 1, "", this), new Among("imm\u00E4", 10, - 1, "", this), new Among("mp\u00E4", - 1, 1, "", this), new Among("imp\u00E4", 12, - 1, "", this)};
+            a_8 = new Among[]{new Among("i", - 1, - 1, "", this), new Among("j", - 1, - 1, "", this)};
+            a_9 = new Among[]{new Among("mma", - 1, 1, "", this), new Among("imma", 0, - 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private Among[] a_8;
+        private Among[] a_9;
 
         private static readonly char[] g_AEI = new char[]{(char) (17), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8)};
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
-		private static readonly char[] g_V = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
-		private static readonly char[] g_particle_end = new char[]{(char) (17), (char) (97), (char) (24), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
-		
-		private bool B_ending_removed;
-		private System.Text.StringBuilder S_x = new System.Text.StringBuilder();
-		private int I_p2;
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(FinnishStemmer other)
-		{
-			B_ending_removed = other.B_ending_removed;
-			S_x = other.S_x;
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			int v_3;
-			// (, line 41
-			I_p1 = limit;
-			I_p2 = limit;
-			// goto, line 46
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 246)))
-					{
-						goto lab1_brk;
-					}
-					cursor = v_1;
-					goto golab0_brk;
-				}
-				while (false);
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
+        private static readonly char[] g_V = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
+        private static readonly char[] g_particle_end = new char[]{(char) (17), (char) (97), (char) (24), (char) (1), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (8), (char) (0), (char) (32)};
+        
+        private bool B_ending_removed;
+        private System.Text.StringBuilder S_x = new System.Text.StringBuilder();
+        private int I_p2;
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(FinnishStemmer other)
+        {
+            B_ending_removed = other.B_ending_removed;
+            S_x = other.S_x;
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            int v_3;
+            // (, line 41
+            I_p1 = limit;
+            I_p2 = limit;
+            // goto, line 46
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 246)))
+                    {
+                        goto lab1_brk;
+                    }
+                    cursor = v_1;
+                    goto golab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_1;
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                cursor = v_1;
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab0_brk: ;
-			
-			// gopast, line 46
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 246)))
-					{
-						goto lab3_brk;
-					}
-					goto golab2_brk;
-				}
-				while (false);
+            
+            // gopast, line 46
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 246)))
+                    {
+                        goto lab3_brk;
+                    }
+                    goto golab2_brk;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab2_brk: ;
-			
-			// setmark p1, line 46
-			I_p1 = cursor;
-			// goto, line 47
-			while (true)
-			{
-				v_3 = cursor;
-				do 
-				{
-					if (!(in_grouping(g_v, 97, 246)))
-					{
-						goto lab5_brk;
-					}
-					cursor = v_3;
-					goto golab4_brk;
-				}
-				while (false);
+            
+            // setmark p1, line 46
+            I_p1 = cursor;
+            // goto, line 47
+            while (true)
+            {
+                v_3 = cursor;
+                do 
+                {
+                    if (!(in_grouping(g_v, 97, 246)))
+                    {
+                        goto lab5_brk;
+                    }
+                    cursor = v_3;
+                    goto golab4_brk;
+                }
+                while (false);
 
 lab5_brk: ;
-				
-				cursor = v_3;
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                cursor = v_3;
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab4_brk: ;
-			
-			// gopast, line 47
-			while (true)
-			{
-				do 
-				{
-					if (!(out_grouping(g_v, 97, 246)))
-					{
-						goto lab7_brk;
-					}
-					goto golab6_brk;
-				}
-				while (false);
+            
+            // gopast, line 47
+            while (true)
+            {
+                do 
+                {
+                    if (!(out_grouping(g_v, 97, 246)))
+                    {
+                        goto lab7_brk;
+                    }
+                    goto golab6_brk;
+                }
+                while (false);
 
 lab7_brk: ;
-				
-				if (cursor >= limit)
-				{
-					return false;
-				}
-				cursor++;
-			}
+                
+                if (cursor >= limit)
+                {
+                    return false;
+                }
+                cursor++;
+            }
 
 golab6_brk: ;
-			
-			// setmark p2, line 47
-			I_p2 = cursor;
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_particle_etc()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 54
-			// setlimit, line 55
-			v_1 = limit - cursor;
-			// tomark, line 55
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 55
-			// [, line 55
-			ket = cursor;
-			// substring, line 55
-			among_var = find_among_b(a_0, 10);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 55
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 62
-					if (!(in_grouping_b(g_particle_end, 97, 246)))
-					{
-						return false;
-					}
-					break;
-				
-				case 2: 
-					// (, line 64
-					// call R2, line 64
-					if (!r_R2())
-					{
-						return false;
-					}
-					break;
-				}
-			// delete, line 66
-			slice_del();
-			return true;
-		}
-		
-		private bool r_possessive()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 68
-			// setlimit, line 69
-			v_1 = limit - cursor;
-			// tomark, line 69
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 69
-			// [, line 69
-			ket = cursor;
-			// substring, line 69
-			among_var = find_among_b(a_4, 9);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 69
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 72
-					// not, line 72
-					{
-						v_3 = limit - cursor;
-						do 
-						{
-							// literal, line 72
-							if (!(eq_s_b(1, "k")))
-							{
-								goto lab0_brk;
-							}
-							return false;
-						}
-						while (false);
+            
+            // setmark p2, line 47
+            I_p2 = cursor;
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_particle_etc()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 54
+            // setlimit, line 55
+            v_1 = limit - cursor;
+            // tomark, line 55
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 55
+            // [, line 55
+            ket = cursor;
+            // substring, line 55
+            among_var = find_among_b(a_0, 10);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 55
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 62
+                    if (!(in_grouping_b(g_particle_end, 97, 246)))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 2: 
+                    // (, line 64
+                    // call R2, line 64
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    break;
+                }
+            // delete, line 66
+            slice_del();
+            return true;
+        }
+        
+        private bool r_possessive()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 68
+            // setlimit, line 69
+            v_1 = limit - cursor;
+            // tomark, line 69
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 69
+            // [, line 69
+            ket = cursor;
+            // substring, line 69
+            among_var = find_among_b(a_4, 9);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 69
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 72
+                    // not, line 72
+                    {
+                        v_3 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 72
+                            if (!(eq_s_b(1, "k")))
+                            {
+                                goto lab0_brk;
+                            }
+                            return false;
+                        }
+                        while (false);
 
 lab0_brk: ;
-						
-						cursor = limit - v_3;
-					}
-					// delete, line 72
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 74
-					// delete, line 74
-					slice_del();
-					// [, line 74
-					ket = cursor;
-					// literal, line 74
-					if (!(eq_s_b(3, "kse")))
-					{
-						return false;
-					}
-					// ], line 74
-					bra = cursor;
-					// <-, line 74
-					slice_from("ksi");
-					break;
-				
-				case 3: 
-					// (, line 78
-					// delete, line 78
-					slice_del();
-					break;
-				
-				case 4: 
-					// (, line 81
-					// among, line 81
-					if (find_among_b(a_1, 6) == 0)
-					{
-						return false;
-					}
-					// delete, line 81
-					slice_del();
-					break;
-				
-				case 5: 
-					// (, line 83
-					// among, line 83
-					if (find_among_b(a_2, 6) == 0)
-					{
-						return false;
-					}
-					// delete, line 84
-					slice_del();
-					break;
-				
-				case 6: 
-					// (, line 86
-					// among, line 86
-					if (find_among_b(a_3, 2) == 0)
-					{
-						return false;
-					}
-					// delete, line 86
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_LONG()
-		{
-			// among, line 91
-			if (find_among_b(a_5, 7) == 0)
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_VI()
-		{
-			// (, line 93
-			// literal, line 93
-			if (!(eq_s_b(1, "i")))
-			{
-				return false;
-			}
-			if (!(in_grouping_b(g_V, 97, 246)))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_case()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 95
-			// setlimit, line 96
-			v_1 = limit - cursor;
-			// tomark, line 96
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 96
-			// [, line 96
-			ket = cursor;
-			// substring, line 96
-			among_var = find_among_b(a_6, 30);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 96
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 98
-					// literal, line 98
-					if (!(eq_s_b(1, "a")))
-					{
-						return false;
-					}
-					break;
-				
-				case 2: 
-					// (, line 99
-					// literal, line 99
-					if (!(eq_s_b(1, "e")))
-					{
-						return false;
-					}
-					break;
-				
-				case 3: 
-					// (, line 100
-					// literal, line 100
-					if (!(eq_s_b(1, "i")))
-					{
-						return false;
-					}
-					break;
-				
-				case 4: 
-					// (, line 101
-					// literal, line 101
-					if (!(eq_s_b(1, "o")))
-					{
-						return false;
-					}
-					break;
-				
-				case 5: 
-					// (, line 102
-					// literal, line 102
-					if (!(eq_s_b(1, "\u00E4")))
-					{
-						return false;
-					}
-					break;
-				
-				case 6: 
-					// (, line 103
-					// literal, line 103
-					if (!(eq_s_b(1, "\u00F6")))
-					{
-						return false;
-					}
-					break;
-				
-				case 7: 
-					// (, line 111
-					// try, line 111
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 111
-						// and, line 113
-						v_4 = limit - cursor;
-						// or, line 112
-						do 
-						{
-							v_5 = limit - cursor;
-							do 
-							{
-								// call LONG, line 111
-								if (!r_LONG())
-								{
-									goto lab2_brk;
-								}
-								goto lab1_brk;
-							}
-							while (false);
+                        
+                        cursor = limit - v_3;
+                    }
+                    // delete, line 72
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 74
+                    // delete, line 74
+                    slice_del();
+                    // [, line 74
+                    ket = cursor;
+                    // literal, line 74
+                    if (!(eq_s_b(3, "kse")))
+                    {
+                        return false;
+                    }
+                    // ], line 74
+                    bra = cursor;
+                    // <-, line 74
+                    slice_from("ksi");
+                    break;
+                
+                case 3: 
+                    // (, line 78
+                    // delete, line 78
+                    slice_del();
+                    break;
+                
+                case 4: 
+                    // (, line 81
+                    // among, line 81
+                    if (find_among_b(a_1, 6) == 0)
+                    {
+                        return false;
+                    }
+                    // delete, line 81
+                    slice_del();
+                    break;
+                
+                case 5: 
+                    // (, line 83
+                    // among, line 83
+                    if (find_among_b(a_2, 6) == 0)
+                    {
+                        return false;
+                    }
+                    // delete, line 84
+                    slice_del();
+                    break;
+                
+                case 6: 
+                    // (, line 86
+                    // among, line 86
+                    if (find_among_b(a_3, 2) == 0)
+                    {
+                        return false;
+                    }
+                    // delete, line 86
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_LONG()
+        {
+            // among, line 91
+            if (find_among_b(a_5, 7) == 0)
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_VI()
+        {
+            // (, line 93
+            // literal, line 93
+            if (!(eq_s_b(1, "i")))
+            {
+                return false;
+            }
+            if (!(in_grouping_b(g_V, 97, 246)))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_case()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 95
+            // setlimit, line 96
+            v_1 = limit - cursor;
+            // tomark, line 96
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 96
+            // [, line 96
+            ket = cursor;
+            // substring, line 96
+            among_var = find_among_b(a_6, 30);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 96
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 98
+                    // literal, line 98
+                    if (!(eq_s_b(1, "a")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 2: 
+                    // (, line 99
+                    // literal, line 99
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 3: 
+                    // (, line 100
+                    // literal, line 100
+                    if (!(eq_s_b(1, "i")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 4: 
+                    // (, line 101
+                    // literal, line 101
+                    if (!(eq_s_b(1, "o")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 5: 
+                    // (, line 102
+                    // literal, line 102
+                    if (!(eq_s_b(1, "\u00E4")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 6: 
+                    // (, line 103
+                    // literal, line 103
+                    if (!(eq_s_b(1, "\u00F6")))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 7: 
+                    // (, line 111
+                    // try, line 111
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 111
+                        // and, line 113
+                        v_4 = limit - cursor;
+                        // or, line 112
+                        do 
+                        {
+                            v_5 = limit - cursor;
+                            do 
+                            {
+                                // call LONG, line 111
+                                if (!r_LONG())
+                                {
+                                    goto lab2_brk;
+                                }
+                                goto lab1_brk;
+                            }
+                            while (false);
 
 lab2_brk: ;
-							
-							cursor = limit - v_5;
-							// literal, line 112
-							if (!(eq_s_b(2, "ie")))
-							{
-								cursor = limit - v_3;
-								goto lab0_brk;
-							}
-						}
-						while (false);
+                            
+                            cursor = limit - v_5;
+                            // literal, line 112
+                            if (!(eq_s_b(2, "ie")))
+                            {
+                                cursor = limit - v_3;
+                                goto lab0_brk;
+                            }
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						cursor = limit - v_4;
-						// next, line 113
-						if (cursor <= limit_backward)
-						{
-							cursor = limit - v_3;
-							goto lab0_brk;
-						}
-						cursor--;
-						// ], line 113
-						bra = cursor;
-					}
-					while (false);
+                        
+                        cursor = limit - v_4;
+                        // next, line 113
+                        if (cursor <= limit_backward)
+                        {
+                            cursor = limit - v_3;
+                            goto lab0_brk;
+                        }
+                        cursor--;
+                        // ], line 113
+                        bra = cursor;
+                    }
+                    while (false);
 
 lab0_brk: ;
-					
-					break;
-				
-				case 8: 
-					// (, line 119
-					if (!(in_grouping_b(g_v, 97, 246)))
-					{
-						return false;
-					}
-					if (!(out_grouping_b(g_v, 97, 246)))
-					{
-						return false;
-					}
-					break;
-				
-				case 9: 
-					// (, line 121
-					// literal, line 121
-					if (!(eq_s_b(1, "e")))
-					{
-						return false;
-					}
-					break;
-				}
-			// delete, line 138
-			slice_del();
-			// set ending_removed, line 139
-			B_ending_removed = true;
-			return true;
-		}
-		
-		private bool r_other_endings()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			// (, line 141
-			// setlimit, line 142
-			v_1 = limit - cursor;
-			// tomark, line 142
-			if (cursor < I_p2)
-			{
-				return false;
-			}
-			cursor = I_p2;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 142
-			// [, line 142
-			ket = cursor;
-			// substring, line 142
-			among_var = find_among_b(a_7, 14);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 142
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 146
-					// not, line 146
-					{
-						v_3 = limit - cursor;
-						do 
-						{
-							// literal, line 146
-							if (!(eq_s_b(2, "po")))
-							{
-								goto lab4_brk;
-							}
-							return false;
-						}
-						while (false);
+                    
+                    break;
+                
+                case 8: 
+                    // (, line 119
+                    if (!(in_grouping_b(g_v, 97, 246)))
+                    {
+                        return false;
+                    }
+                    if (!(out_grouping_b(g_v, 97, 246)))
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 9: 
+                    // (, line 121
+                    // literal, line 121
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        return false;
+                    }
+                    break;
+                }
+            // delete, line 138
+            slice_del();
+            // set ending_removed, line 139
+            B_ending_removed = true;
+            return true;
+        }
+        
+        private bool r_other_endings()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            // (, line 141
+            // setlimit, line 142
+            v_1 = limit - cursor;
+            // tomark, line 142
+            if (cursor < I_p2)
+            {
+                return false;
+            }
+            cursor = I_p2;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 142
+            // [, line 142
+            ket = cursor;
+            // substring, line 142
+            among_var = find_among_b(a_7, 14);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 142
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 146
+                    // not, line 146
+                    {
+                        v_3 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 146
+                            if (!(eq_s_b(2, "po")))
+                            {
+                                goto lab4_brk;
+                            }
+                            return false;
+                        }
+                        while (false);
 
 lab4_brk: ;
-						
-						cursor = limit - v_3;
-					}
-					break;
-				}
-			// delete, line 151
-			slice_del();
-			return true;
-		}
-		
-		private bool r_i_plural()
-		{
-			int v_1;
-			int v_2;
-			// (, line 153
-			// setlimit, line 154
-			v_1 = limit - cursor;
-			// tomark, line 154
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 154
-			// [, line 154
-			ket = cursor;
-			// substring, line 154
-			if (find_among_b(a_8, 2) == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 154
-			bra = cursor;
-			limit_backward = v_2;
-			// delete, line 158
-			slice_del();
-			return true;
-		}
-		
-		private bool r_t_plural()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			// (, line 160
-			// setlimit, line 161
-			v_1 = limit - cursor;
-			// tomark, line 161
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 161
-			// [, line 162
-			ket = cursor;
-			// literal, line 162
-			if (!(eq_s_b(1, "t")))
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 162
-			bra = cursor;
-			// test, line 162
-			v_3 = limit - cursor;
-			if (!(in_grouping_b(g_v, 97, 246)))
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			cursor = limit - v_3;
-			// delete, line 163
-			slice_del();
-			limit_backward = v_2;
-			// setlimit, line 165
-			v_4 = limit - cursor;
-			// tomark, line 165
-			if (cursor < I_p2)
-			{
-				return false;
-			}
-			cursor = I_p2;
-			v_5 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_4;
-			// (, line 165
-			// [, line 165
-			ket = cursor;
-			// substring, line 165
-			among_var = find_among_b(a_9, 2);
-			if (among_var == 0)
-			{
-				limit_backward = v_5;
-				return false;
-			}
-			// ], line 165
-			bra = cursor;
-			limit_backward = v_5;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 167
-					// not, line 167
-					{
-						v_6 = limit - cursor;
-						do 
-						{
-							// literal, line 167
-							if (!(eq_s_b(2, "po")))
-							{
-								goto lab4_brk;
-							}
-							return false;
-						}
-						while (false);
+                        
+                        cursor = limit - v_3;
+                    }
+                    break;
+                }
+            // delete, line 151
+            slice_del();
+            return true;
+        }
+        
+        private bool r_i_plural()
+        {
+            int v_1;
+            int v_2;
+            // (, line 153
+            // setlimit, line 154
+            v_1 = limit - cursor;
+            // tomark, line 154
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 154
+            // [, line 154
+            ket = cursor;
+            // substring, line 154
+            if (find_among_b(a_8, 2) == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 154
+            bra = cursor;
+            limit_backward = v_2;
+            // delete, line 158
+            slice_del();
+            return true;
+        }
+        
+        private bool r_t_plural()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            // (, line 160
+            // setlimit, line 161
+            v_1 = limit - cursor;
+            // tomark, line 161
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 161
+            // [, line 162
+            ket = cursor;
+            // literal, line 162
+            if (!(eq_s_b(1, "t")))
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 162
+            bra = cursor;
+            // test, line 162
+            v_3 = limit - cursor;
+            if (!(in_grouping_b(g_v, 97, 246)))
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            cursor = limit - v_3;
+            // delete, line 163
+            slice_del();
+            limit_backward = v_2;
+            // setlimit, line 165
+            v_4 = limit - cursor;
+            // tomark, line 165
+            if (cursor < I_p2)
+            {
+                return false;
+            }
+            cursor = I_p2;
+            v_5 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_4;
+            // (, line 165
+            // [, line 165
+            ket = cursor;
+            // substring, line 165
+            among_var = find_among_b(a_9, 2);
+            if (among_var == 0)
+            {
+                limit_backward = v_5;
+                return false;
+            }
+            // ], line 165
+            bra = cursor;
+            limit_backward = v_5;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 167
+                    // not, line 167
+                    {
+                        v_6 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 167
+                            if (!(eq_s_b(2, "po")))
+                            {
+                                goto lab4_brk;
+                            }
+                            return false;
+                        }
+                        while (false);
 
 lab4_brk: ;
-						
-						cursor = limit - v_6;
-					}
-					break;
-				}
-			// delete, line 170
-			slice_del();
-			return true;
-		}
-		
-		private bool r_tidy()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			// (, line 172
-			// setlimit, line 173
-			v_1 = limit - cursor;
-			// tomark, line 173
-			if (cursor < I_p1)
-			{
-				return false;
-			}
-			cursor = I_p1;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 173
-			// do, line 174
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 174
-				// and, line 174
-				v_4 = limit - cursor;
-				// call LONG, line 174
-				if (!r_LONG())
-				{
-					goto lab0_brk;
-				}
-				cursor = limit - v_4;
-				// (, line 174
-				// [, line 174
-				ket = cursor;
-				// next, line 174
-				if (cursor <= limit_backward)
-				{
-					goto lab0_brk;
-				}
-				cursor--;
-				// ], line 174
-				bra = cursor;
-				// delete, line 174
-				slice_del();
-			}
-			while (false);
+                        
+                        cursor = limit - v_6;
+                    }
+                    break;
+                }
+            // delete, line 170
+            slice_del();
+            return true;
+        }
+        
+        private bool r_tidy()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            // (, line 172
+            // setlimit, line 173
+            v_1 = limit - cursor;
+            // tomark, line 173
+            if (cursor < I_p1)
+            {
+                return false;
+            }
+            cursor = I_p1;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 173
+            // do, line 174
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 174
+                // and, line 174
+                v_4 = limit - cursor;
+                // call LONG, line 174
+                if (!r_LONG())
+                {
+                    goto lab0_brk;
+                }
+                cursor = limit - v_4;
+                // (, line 174
+                // [, line 174
+                ket = cursor;
+                // next, line 174
+                if (cursor <= limit_backward)
+                {
+                    goto lab0_brk;
+                }
+                cursor--;
+                // ], line 174
+                bra = cursor;
+                // delete, line 174
+                slice_del();
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 175
-			v_5 = limit - cursor;
-			do 
-			{
-				// (, line 175
-				// [, line 175
-				ket = cursor;
-				if (!(in_grouping_b(g_AEI, 97, 228)))
-				{
-					goto lab1_brk;
-				}
-				// ], line 175
-				bra = cursor;
-				if (!(out_grouping_b(g_v, 97, 246)))
-				{
-					goto lab1_brk;
-				}
-				// delete, line 175
-				slice_del();
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            // do, line 175
+            v_5 = limit - cursor;
+            do 
+            {
+                // (, line 175
+                // [, line 175
+                ket = cursor;
+                if (!(in_grouping_b(g_AEI, 97, 228)))
+                {
+                    goto lab1_brk;
+                }
+                // ], line 175
+                bra = cursor;
+                if (!(out_grouping_b(g_v, 97, 246)))
+                {
+                    goto lab1_brk;
+                }
+                // delete, line 175
+                slice_del();
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_5;
-			// do, line 176
-			v_6 = limit - cursor;
-			do 
-			{
-				// (, line 176
-				// [, line 176
-				ket = cursor;
-				// literal, line 176
-				if (!(eq_s_b(1, "j")))
-				{
-					goto lab2_brk;
-				}
-				// ], line 176
-				bra = cursor;
-				// or, line 176
-				do 
-				{
-					v_7 = limit - cursor;
-					do 
-					{
-						// literal, line 176
-						if (!(eq_s_b(1, "o")))
-						{
-							goto lab4_brk;
-						}
-						goto lab3_brk;
-					}
-					while (false);
+            
+            cursor = limit - v_5;
+            // do, line 176
+            v_6 = limit - cursor;
+            do 
+            {
+                // (, line 176
+                // [, line 176
+                ket = cursor;
+                // literal, line 176
+                if (!(eq_s_b(1, "j")))
+                {
+                    goto lab2_brk;
+                }
+                // ], line 176
+                bra = cursor;
+                // or, line 176
+                do 
+                {
+                    v_7 = limit - cursor;
+                    do 
+                    {
+                        // literal, line 176
+                        if (!(eq_s_b(1, "o")))
+                        {
+                            goto lab4_brk;
+                        }
+                        goto lab3_brk;
+                    }
+                    while (false);
 
 lab4_brk: ;
-					
-					cursor = limit - v_7;
-					// literal, line 176
-					if (!(eq_s_b(1, "u")))
-					{
-						goto lab2_brk;
-					}
-				}
-				while (false);
+                    
+                    cursor = limit - v_7;
+                    // literal, line 176
+                    if (!(eq_s_b(1, "u")))
+                    {
+                        goto lab2_brk;
+                    }
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				// delete, line 176
-				slice_del();
-			}
-			while (false);
+                
+                // delete, line 176
+                slice_del();
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_6;
-			// do, line 177
-			v_8 = limit - cursor;
-			do 
-			{
-				// (, line 177
-				// [, line 177
-				ket = cursor;
-				// literal, line 177
-				if (!(eq_s_b(1, "o")))
-				{
-					goto lab5_brk;
-				}
-				// ], line 177
-				bra = cursor;
-				// literal, line 177
-				if (!(eq_s_b(1, "j")))
-				{
-					goto lab5_brk;
-				}
-				// delete, line 177
-				slice_del();
-			}
-			while (false);
+            
+            cursor = limit - v_6;
+            // do, line 177
+            v_8 = limit - cursor;
+            do 
+            {
+                // (, line 177
+                // [, line 177
+                ket = cursor;
+                // literal, line 177
+                if (!(eq_s_b(1, "o")))
+                {
+                    goto lab5_brk;
+                }
+                // ], line 177
+                bra = cursor;
+                // literal, line 177
+                if (!(eq_s_b(1, "j")))
+                {
+                    goto lab5_brk;
+                }
+                // delete, line 177
+                slice_del();
+            }
+            while (false);
 
 lab5_brk: ;
-			
-			cursor = limit - v_8;
-			limit_backward = v_2;
-			// goto, line 179
-			while (true)
-			{
-				v_9 = limit - cursor;
-				do 
-				{
-					if (!(out_grouping_b(g_v, 97, 246)))
-					{
-						goto lab7_brk;
-					}
-					cursor = limit - v_9;
-					goto golab6_brk;
-				}
-				while (false);
+            
+            cursor = limit - v_8;
+            limit_backward = v_2;
+            // goto, line 179
+            while (true)
+            {
+                v_9 = limit - cursor;
+                do 
+                {
+                    if (!(out_grouping_b(g_v, 97, 246)))
+                    {
+                        goto lab7_brk;
+                    }
+                    cursor = limit - v_9;
+                    goto golab6_brk;
+                }
+                while (false);
 
 lab7_brk: ;
-				
-				cursor = limit - v_9;
-				if (cursor <= limit_backward)
-				{
-					return false;
-				}
-				cursor--;
-			}
+                
+                cursor = limit - v_9;
+                if (cursor <= limit_backward)
+                {
+                    return false;
+                }
+                cursor--;
+            }
 
 golab6_brk: ;
-			
-			// [, line 179
-			ket = cursor;
-			// next, line 179
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// ], line 179
-			bra = cursor;
-			// -> x, line 179
-			S_x = slice_to(S_x);
-			// name x, line 179
-			if (!(eq_v_b(S_x)))
-			{
-				return false;
-			}
-			// delete, line 179
-			slice_del();
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			// (, line 183
-			// do, line 185
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 185
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            // [, line 179
+            ket = cursor;
+            // next, line 179
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // ], line 179
+            bra = cursor;
+            // -> x, line 179
+            S_x = slice_to(S_x);
+            // name x, line 179
+            if (!(eq_v_b(S_x)))
+            {
+                return false;
+            }
+            // delete, line 179
+            slice_del();
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            // (, line 183
+            // do, line 185
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 185
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// unset ending_removed, line 186
-			B_ending_removed = false;
-			// backwards, line 187
-			limit_backward = cursor; cursor = limit;
-			// (, line 187
-			// do, line 188
-			v_2 = limit - cursor;
-			do 
-			{
-				// call particle_etc, line 188
-				if (!r_particle_etc())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // unset ending_removed, line 186
+            B_ending_removed = false;
+            // backwards, line 187
+            limit_backward = cursor; cursor = limit;
+            // (, line 187
+            // do, line 188
+            v_2 = limit - cursor;
+            do 
+            {
+                // call particle_etc, line 188
+                if (!r_particle_etc())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 189
-			v_3 = limit - cursor;
-			do 
-			{
-				// call possessive, line 189
-				if (!r_possessive())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_2;
+            // do, line 189
+            v_3 = limit - cursor;
+            do 
+            {
+                // call possessive, line 189
+                if (!r_possessive())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 190
-			v_4 = limit - cursor;
-			do 
-			{
-				// call case, line 190
-				if (!r_case())
-				{
-					goto lab3_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_3;
+            // do, line 190
+            v_4 = limit - cursor;
+            do 
+            {
+                // call case, line 190
+                if (!r_case())
+                {
+                    goto lab3_brk;
+                }
+            }
+            while (false);
 
 lab3_brk: ;
-			
-			cursor = limit - v_4;
-			// do, line 191
-			v_5 = limit - cursor;
-			do 
-			{
-				// call other_endings, line 191
-				if (!r_other_endings())
-				{
-					goto lab4_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_4;
+            // do, line 191
+            v_5 = limit - cursor;
+            do 
+            {
+                // call other_endings, line 191
+                if (!r_other_endings())
+                {
+                    goto lab4_brk;
+                }
+            }
+            while (false);
 
 lab4_brk: ;
-			
-			cursor = limit - v_5;
-			// or, line 192
-			do 
-			{
-				v_6 = limit - cursor;
-				do 
-				{
-					// (, line 192
-					// Boolean test ending_removed, line 192
-					if (!(B_ending_removed))
-					{
-						goto lab6_brk;
-					}
-					// do, line 192
-					v_7 = limit - cursor;
-					do 
-					{
-						// call i_plural, line 192
-						if (!r_i_plural())
-						{
-							goto lab7_brk;
-						}
-					}
-					while (false);
+            
+            cursor = limit - v_5;
+            // or, line 192
+            do 
+            {
+                v_6 = limit - cursor;
+                do 
+                {
+                    // (, line 192
+                    // Boolean test ending_removed, line 192
+                    if (!(B_ending_removed))
+                    {
+                        goto lab6_brk;
+                    }
+                    // do, line 192
+                    v_7 = limit - cursor;
+                    do 
+                    {
+                        // call i_plural, line 192
+                        if (!r_i_plural())
+                        {
+                            goto lab7_brk;
+                        }
+                    }
+                    while (false);
 
 lab7_brk: ;
-					
-					cursor = limit - v_7;
-					goto lab6_brk;
-				}
-				while (false);
+                    
+                    cursor = limit - v_7;
+                    goto lab6_brk;
+                }
+                while (false);
 
 lab6_brk: ;
-				
-				cursor = limit - v_6;
-				// do, line 192
-				v_8 = limit - cursor;
-				do 
-				{
-					// call t_plural, line 192
-					if (!r_t_plural())
-					{
-						goto lab8_brk;
-					}
-				}
-				while (false);
+                
+                cursor = limit - v_6;
+                // do, line 192
+                v_8 = limit - cursor;
+                do 
+                {
+                    // call t_plural, line 192
+                    if (!r_t_plural())
+                    {
+                        goto lab8_brk;
+                    }
+                }
+                while (false);
 
 lab8_brk: ;
-				
-				cursor = limit - v_8;
-			}
-			while (false);
+                
+                cursor = limit - v_8;
+            }
+            while (false);
 
 lab5_brk: ;
 
-			// do, line 193
-			v_9 = limit - cursor;
-			do 
-			{
-				// call tidy, line 193
-				if (!r_tidy())
-				{
-					goto lab9_brk;
-				}
-			}
-			while (false);
+            // do, line 193
+            v_9 = limit - cursor;
+            do 
+            {
+                // call tidy, line 193
+                if (!r_tidy())
+                {
+                    goto lab9_brk;
+                }
+            }
+            while (false);
 
 lab9_brk: ;
-			
-			cursor = limit - v_9;
-			cursor = limit_backward; return true;
-		}
-	}
+            
+            cursor = limit - v_9;
+            cursor = limit_backward; return true;
+        }
+    }
 }


[17/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DirectoryReader.cs b/src/core/Index/DirectoryReader.cs
index 574448d..5fdd3c0 100644
--- a/src/core/Index/DirectoryReader.cs
+++ b/src/core/Index/DirectoryReader.cs
@@ -572,9 +572,9 @@ namespace Lucene.Net.Index
 
         class AnonymousFindSegmentsFile : SegmentInfos.FindSegmentsFile
         {
-        	readonly DirectoryReader enclosingInstance;
-        	readonly bool openReadOnly;
-        	readonly Directory dir;
+            readonly DirectoryReader enclosingInstance;
+            readonly bool openReadOnly;
+            readonly Directory dir;
             public AnonymousFindSegmentsFile(Directory directory, bool openReadOnly, DirectoryReader dirReader) : base(directory)
             {
                 this.dir = directory;
@@ -664,7 +664,7 @@ namespace Lucene.Net.Index
             {
                 // check cache
                 int n = subReaders.Sum(t => t.NumDocs()); // cache miss--recompute
-            	numDocs = n;
+                numDocs = n;
             }
             return numDocs;
         }
@@ -713,9 +713,9 @@ namespace Lucene.Net.Index
         protected internal override void  DoUndeleteAll()
         {
             foreach (SegmentReader t in subReaders)
-            	t.UndeleteAll();
+                t.UndeleteAll();
 
-        	hasDeletions = false;
+            hasDeletions = false;
             numDocs = - 1; // invalidate cache
         }
         
@@ -755,7 +755,7 @@ namespace Lucene.Net.Index
         public override bool HasNorms(System.String field)
         {
             EnsureOpen();
-        	return subReaders.Any(t => t.HasNorms(field));
+            return subReaders.Any(t => t.HasNorms(field));
         }
         
         public override byte[] Norms(System.String field)
@@ -926,9 +926,9 @@ namespace Lucene.Net.Index
                 try
                 {
                     foreach (SegmentReader t in subReaders)
-                    	t.Commit();
+                        t.Commit();
 
-                	// Sync all files we just wrote
+                    // Sync all files we just wrote
                     foreach(string fileName in segmentInfos.Files(internalDirectory, false))
                     {
                         if(!synced.Contains(fileName))
@@ -980,23 +980,23 @@ namespace Lucene.Net.Index
         
         internal virtual void  StartCommit()
         {
-        	rollbackHasChanges = hasChanges;
-        	foreach (SegmentReader t in subReaders)
-        	{
-        		t.StartCommit();
-        	}
+            rollbackHasChanges = hasChanges;
+            foreach (SegmentReader t in subReaders)
+            {
+                t.StartCommit();
+            }
         }
 
-    	internal virtual void  RollbackCommit()
-    	{
-    		hasChanges = rollbackHasChanges;
-    		foreach (SegmentReader t in subReaders)
-    		{
-    			t.RollbackCommit();
-    		}
-    	}
+        internal virtual void  RollbackCommit()
+        {
+            hasChanges = rollbackHasChanges;
+            foreach (SegmentReader t in subReaders)
+            {
+                t.RollbackCommit();
+            }
+        }
 
-    	public override IDictionary<string, string> CommitUserData
+        public override IDictionary<string, string> CommitUserData
         {
             get
             {
@@ -1027,19 +1027,19 @@ namespace Lucene.Net.Index
                 normsCache = null;
                 foreach (SegmentReader t in subReaders)
                 {
-					// try to close each reader, even if an exception is thrown
-                	try
-                	{
-                		t.DecRef();
-                	}
-                	catch (System.IO.IOException e)
-                	{
-                		if (ioe == null)
-                			ioe = e;
-                	}
+                    // try to close each reader, even if an exception is thrown
+                    try
+                    {
+                        t.DecRef();
+                    }
+                    catch (System.IO.IOException e)
+                    {
+                        if (ioe == null)
+                            ioe = e;
+                    }
                 }
 
-            	// NOTE: only needed in case someone had asked for
+                // NOTE: only needed in case someone had asked for
                 // FieldCache for top-level reader (which is generally
                 // not a good idea):
                 Search.FieldCache_Fields.DEFAULT.Purge(this);
@@ -1111,31 +1111,31 @@ namespace Lucene.Net.Index
             
             foreach (string fileName in files)
             {
-            	if (fileName.StartsWith(IndexFileNames.SEGMENTS) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
-            	{
+                if (fileName.StartsWith(IndexFileNames.SEGMENTS) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
+                {
                     
-            		var sis = new SegmentInfos();
-            		try
-            		{
-            			// IOException allowed to throw there, in case
-            			// segments_N is corrupt
-            			sis.Read(dir, fileName);
-            		}
-            		catch (System.IO.FileNotFoundException)
-            		{
-            			// LUCENE-948: on NFS (and maybe others), if
-            			// you have writers switching back and forth
-            			// between machines, it's very likely that the
-            			// dir listing will be stale and will claim a
-            			// file segments_X exists when in fact it
-            			// doesn't.  So, we catch this and handle it
-            			// as if the file does not exist
-            			sis = null;
-            		}
+                    var sis = new SegmentInfos();
+                    try
+                    {
+                        // IOException allowed to throw there, in case
+                        // segments_N is corrupt
+                        sis.Read(dir, fileName);
+                    }
+                    catch (System.IO.FileNotFoundException)
+                    {
+                        // LUCENE-948: on NFS (and maybe others), if
+                        // you have writers switching back and forth
+                        // between machines, it's very likely that the
+                        // dir listing will be stale and will claim a
+                        // file segments_X exists when in fact it
+                        // doesn't.  So, we catch this and handle it
+                        // as if the file does not exist
+                        sis = null;
+                    }
                     
-            		if (sis != null)
-            			commits.Add(new ReaderCommit(sis, dir));
-            	}
+                    if (sis != null)
+                        commits.Add(new ReaderCommit(sis, dir));
+                }
             }
             
             return commits;
@@ -1144,12 +1144,12 @@ namespace Lucene.Net.Index
         private sealed class ReaderCommit:IndexCommit
         {
             private readonly String segmentsFileName;
-        	private readonly ICollection<string> files;
-        	private readonly Directory dir;
-        	private readonly long generation;
-        	private readonly long version;
-        	private readonly bool isOptimized;
-        	private readonly IDictionary<string, string> userData;
+            private readonly ICollection<string> files;
+            private readonly Directory dir;
+            private readonly long generation;
+            private readonly long version;
+            private readonly bool isOptimized;
+            private readonly IDictionary<string, string> userData;
             
             internal ReaderCommit(SegmentInfos infos, Directory dir)
             {
@@ -1230,10 +1230,10 @@ namespace Lucene.Net.Index
                 {
                     IndexReader reader = readers[i];
 
-                	TermEnum termEnum = t != null ? reader.Terms(t) : reader.Terms();
+                    TermEnum termEnum = t != null ? reader.Terms(t) : reader.Terms();
 
-                	var smi = new SegmentMergeInfo(starts[i], termEnum, reader) {ord = i};
-                	if (t == null?smi.Next():termEnum.Term != null)
+                    var smi = new SegmentMergeInfo(starts[i], termEnum, reader) {ord = i};
+                    if (t == null?smi.Next():termEnum.Term != null)
                         queue.Add(smi);
                     // initialize queue
                     else
@@ -1250,12 +1250,12 @@ namespace Lucene.Net.Index
             {
                 foreach (SegmentMergeInfo smi in matchingSegments)
                 {
-                	if (smi == null)
-                		break;
-                	if (smi.Next())
-                		queue.Add(smi);
-                	else
-                		smi.Dispose(); // done with segment
+                    if (smi == null)
+                        break;
+                    if (smi.Next())
+                        queue.Add(smi);
+                    else
+                        smi.Dispose(); // done with segment
                 }
                 
                 int numMatchingSegments = 0;
@@ -1353,13 +1353,13 @@ namespace Lucene.Net.Index
             public virtual void  Seek(TermEnum termEnum)
             {
                 Seek(termEnum.Term);
-            	var multiTermEnum = termEnum as MultiTermEnum;
-            	if (multiTermEnum != null)
-            	{
-            		tenum = multiTermEnum;
-            		if (topReader != tenum.topReader)
-            			tenum = null;
-            	}
+                var multiTermEnum = termEnum as MultiTermEnum;
+                if (multiTermEnum != null)
+                {
+                    tenum = multiTermEnum;
+                    if (topReader != tenum.topReader)
+                        tenum = null;
+                }
             }
             
             public virtual bool Next()
@@ -1469,7 +1469,7 @@ namespace Lucene.Net.Index
             private TermDocs TermDocs(int i)
             {
                 TermDocs result = readerTermDocs[i] ?? (readerTermDocs[i] = TermDocs(readers[i]));
-            	if (smi != null)
+                if (smi != null)
                 {
                     System.Diagnostics.Debug.Assert((smi.ord == i));
                     System.Diagnostics.Debug.Assert((smi.termEnum.Term.Equals(term)));
@@ -1503,8 +1503,8 @@ namespace Lucene.Net.Index
                 {
                     foreach (TermDocs t in readerTermDocs)
                     {
-                    	if (t != null)
-                    		t.Close();
+                        if (t != null)
+                            t.Close();
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocConsumer.cs b/src/core/Index/DocConsumer.cs
index 238e38c..e5ea817 100644
--- a/src/core/Index/DocConsumer.cs
+++ b/src/core/Index/DocConsumer.cs
@@ -19,13 +19,13 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocConsumer
-	{
-		public abstract DocConsumerPerThread AddThread(DocumentsWriterThreadState perThread);
-		public abstract void  Flush(System.Collections.Generic.ICollection<DocConsumerPerThread> threads, SegmentWriteState state);
-		public abstract void  CloseDocStore(SegmentWriteState state);
-		public abstract void  Abort();
-		public abstract bool FreeRAM();
-	}
+    
+    abstract class DocConsumer
+    {
+        public abstract DocConsumerPerThread AddThread(DocumentsWriterThreadState perThread);
+        public abstract void  Flush(System.Collections.Generic.ICollection<DocConsumerPerThread> threads, SegmentWriteState state);
+        public abstract void  CloseDocStore(SegmentWriteState state);
+        public abstract void  Abort();
+        public abstract bool FreeRAM();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocConsumerPerThread.cs b/src/core/Index/DocConsumerPerThread.cs
index 7c7ed02..d92457d 100644
--- a/src/core/Index/DocConsumerPerThread.cs
+++ b/src/core/Index/DocConsumerPerThread.cs
@@ -19,19 +19,19 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocConsumerPerThread
-	{
-		
-		/// <summary>Process the document. If there is
-		/// something for this document to be done in docID order,
-		/// you should encapsulate that as a
-		/// DocumentsWriter.DocWriter and return it.
-		/// DocumentsWriter then calls finish() on this object
-		/// when it's its turn. 
-		/// </summary>
-		public abstract DocumentsWriter.DocWriter ProcessDocument();
-		
-		public abstract void  Abort();
-	}
+    
+    abstract class DocConsumerPerThread
+    {
+        
+        /// <summary>Process the document. If there is
+        /// something for this document to be done in docID order,
+        /// you should encapsulate that as a
+        /// DocumentsWriter.DocWriter and return it.
+        /// DocumentsWriter then calls finish() on this object
+        /// when it's its turn. 
+        /// </summary>
+        public abstract DocumentsWriter.DocWriter ProcessDocument();
+        
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumer.cs b/src/core/Index/DocFieldConsumer.cs
index 7fc59da..ef4abaf 100644
--- a/src/core/Index/DocFieldConsumer.cs
+++ b/src/core/Index/DocFieldConsumer.cs
@@ -20,37 +20,37 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumer
-	{
-		
-		internal FieldInfos fieldInfos;
-		
-		/// <summary>Called when DocumentsWriter decides to create a new
-		/// segment 
-		/// </summary>
+    
+    abstract class DocFieldConsumer
+    {
+        
+        internal FieldInfos fieldInfos;
+        
+        /// <summary>Called when DocumentsWriter decides to create a new
+        /// segment 
+        /// </summary>
         public abstract void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state);
-		
-		/// <summary>Called when DocumentsWriter decides to close the doc
-		/// stores 
-		/// </summary>
-		public abstract void  CloseDocStore(SegmentWriteState state);
-		
-		/// <summary>Called when an aborting exception is hit </summary>
-		public abstract void  Abort();
-		
-		/// <summary>Add a new thread </summary>
-		public abstract DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread);
-		
-		/// <summary>Called when DocumentsWriter is using too much RAM.
-		/// The consumer should free RAM, if possible, returning
-		/// true if any RAM was in fact freed. 
-		/// </summary>
-		public abstract bool FreeRAM();
-		
-		internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			this.fieldInfos = fieldInfos;
-		}
-	}
+        
+        /// <summary>Called when DocumentsWriter decides to close the doc
+        /// stores 
+        /// </summary>
+        public abstract void  CloseDocStore(SegmentWriteState state);
+        
+        /// <summary>Called when an aborting exception is hit </summary>
+        public abstract void  Abort();
+        
+        /// <summary>Add a new thread </summary>
+        public abstract DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread);
+        
+        /// <summary>Called when DocumentsWriter is using too much RAM.
+        /// The consumer should free RAM, if possible, returning
+        /// true if any RAM was in fact freed. 
+        /// </summary>
+        public abstract bool FreeRAM();
+        
+        internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumerPerField.cs b/src/core/Index/DocFieldConsumerPerField.cs
index 27636e2..54d4743 100644
--- a/src/core/Index/DocFieldConsumerPerField.cs
+++ b/src/core/Index/DocFieldConsumerPerField.cs
@@ -20,11 +20,11 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumerPerField
-	{
-		/// <summary>Processes all occurrences of a single field </summary>
-		public abstract void  ProcessFields(IFieldable[] fields, int count);
-		public abstract void  Abort();
-	}
+    
+    abstract class DocFieldConsumerPerField
+    {
+        /// <summary>Processes all occurrences of a single field </summary>
+        public abstract void  ProcessFields(IFieldable[] fields, int count);
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumerPerThread.cs b/src/core/Index/DocFieldConsumerPerThread.cs
index 8f533ac..1b8b58f 100644
--- a/src/core/Index/DocFieldConsumerPerThread.cs
+++ b/src/core/Index/DocFieldConsumerPerThread.cs
@@ -19,12 +19,12 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumerPerThread
-	{
-		public abstract void  StartDocument();
-		public abstract DocumentsWriter.DocWriter FinishDocument();
-		public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
-		public abstract void  Abort();
-	}
+    
+    abstract class DocFieldConsumerPerThread
+    {
+        public abstract void  StartDocument();
+        public abstract DocumentsWriter.DocWriter FinishDocument();
+        public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumers.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumers.cs b/src/core/Index/DocFieldConsumers.cs
index 61b9b1d..11db7ec 100644
--- a/src/core/Index/DocFieldConsumers.cs
+++ b/src/core/Index/DocFieldConsumers.cs
@@ -22,200 +22,200 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is just a "splitter" class: it lets you wrap two
-	/// DocFieldConsumer instances as a single consumer. 
-	/// </summary>
-	
-	sealed class DocFieldConsumers : DocFieldConsumer
-	{
-		private void  InitBlock()
-		{
-			docFreeList = new PerDoc[1];
-		}
-		internal DocFieldConsumer one;
-		internal DocFieldConsumer two;
-		
-		public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two)
-		{
-			InitBlock();
-			this.one = one;
-			this.two = two;
-		}
-		
-		internal override void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			base.SetFieldInfos(fieldInfos);
-			one.SetFieldInfos(fieldInfos);
-			two.SetFieldInfos(fieldInfos);
-		}
+    
+    /// <summary>This is just a "splitter" class: it lets you wrap two
+    /// DocFieldConsumer instances as a single consumer. 
+    /// </summary>
+    
+    sealed class DocFieldConsumers : DocFieldConsumer
+    {
+        private void  InitBlock()
+        {
+            docFreeList = new PerDoc[1];
+        }
+        internal DocFieldConsumer one;
+        internal DocFieldConsumer two;
+        
+        public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two)
+        {
+            InitBlock();
+            this.one = one;
+            this.two = two;
+        }
+        
+        internal override void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            base.SetFieldInfos(fieldInfos);
+            one.SetFieldInfos(fieldInfos);
+            two.SetFieldInfos(fieldInfos);
+        }
 
         public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
+        {
 
             var oneThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			var twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			
-			foreach(var entry in threadsAndFields)
-			{
-				DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
+            var twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
+            
+            foreach(var entry in threadsAndFields)
+            {
+                DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
                 ICollection<DocFieldConsumerPerField> fields = entry.Value;
 
                 IEnumerator<DocFieldConsumerPerField> fieldsIt = fields.GetEnumerator();
                 ICollection<DocFieldConsumerPerField> oneFields = new HashSet<DocFieldConsumerPerField>();
                 ICollection<DocFieldConsumerPerField> twoFields = new HashSet<DocFieldConsumerPerField>();
-				while (fieldsIt.MoveNext())
-				{
-					DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
-					oneFields.Add(perField.one);
-					twoFields.Add(perField.two);
-				}
-				
-				oneThreadsAndFields[perThread.one] = oneFields;
-				twoThreadsAndFields[perThread.two] = twoFields;
-			}
-			
-			
-			one.Flush(oneThreadsAndFields, state);
-			two.Flush(twoThreadsAndFields, state);
-		}
+                while (fieldsIt.MoveNext())
+                {
+                    DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
+                    oneFields.Add(perField.one);
+                    twoFields.Add(perField.two);
+                }
+                
+                oneThreadsAndFields[perThread.one] = oneFields;
+                twoThreadsAndFields[perThread.two] = twoFields;
+            }
+            
+            
+            one.Flush(oneThreadsAndFields, state);
+            two.Flush(twoThreadsAndFields, state);
+        }
 
-	    public override void  CloseDocStore(SegmentWriteState state)
-		{
-			try
-			{
-				one.CloseDocStore(state);
-			}
-			finally
-			{
-				two.CloseDocStore(state);
-			}
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-		
-		public override bool FreeRAM()
-		{
-			bool any = one.FreeRAM();
-			any |= two.FreeRAM();
-			return any;
-		}
-		
-		public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
-		{
-			return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.AddThread(docFieldProcessorPerThread), two.AddThread(docFieldProcessorPerThread));
-		}
-		
-		internal PerDoc[] docFreeList;
-		internal int freeCount;
-		internal int allocCount;
-		
-		internal PerDoc GetPerDoc()
-		{
-			lock (this)
-			{
-				if (freeCount == 0)
-				{
-					allocCount++;
-					if (allocCount > docFreeList.Length)
-					{
-						// Grow our free list up front to make sure we have
-						// enough space to recycle all outstanding PerDoc
-						// instances
-						System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
-						docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
-					}
-					return new PerDoc(this);
-				}
-				else
-					return docFreeList[--freeCount];
-			}
-		}
-		
-		internal void  FreePerDoc(PerDoc perDoc)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
-				docFreeList[freeCount++] = perDoc;
-			}
-		}
-		
-		internal class PerDoc:DocumentsWriter.DocWriter
-		{
-			public PerDoc(DocFieldConsumers enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(DocFieldConsumers enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private DocFieldConsumers enclosingInstance;
-			public DocFieldConsumers Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			internal DocumentsWriter.DocWriter one;
-			internal DocumentsWriter.DocWriter two;
-			
-			public override long SizeInBytes()
-			{
-				return one.SizeInBytes() + two.SizeInBytes();
-			}
-			
-			public override void  Finish()
-			{
-				try
-				{
-					try
-					{
-						one.Finish();
-					}
-					finally
-					{
-						two.Finish();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-			
-			public override void  Abort()
-			{
-				try
-				{
-					try
-					{
-						one.Abort();
-					}
-					finally
-					{
-						two.Abort();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-		}
-	}
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            try
+            {
+                one.CloseDocStore(state);
+            }
+            finally
+            {
+                two.CloseDocStore(state);
+            }
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+        
+        public override bool FreeRAM()
+        {
+            bool any = one.FreeRAM();
+            any |= two.FreeRAM();
+            return any;
+        }
+        
+        public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+        {
+            return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.AddThread(docFieldProcessorPerThread), two.AddThread(docFieldProcessorPerThread));
+        }
+        
+        internal PerDoc[] docFreeList;
+        internal int freeCount;
+        internal int allocCount;
+        
+        internal PerDoc GetPerDoc()
+        {
+            lock (this)
+            {
+                if (freeCount == 0)
+                {
+                    allocCount++;
+                    if (allocCount > docFreeList.Length)
+                    {
+                        // Grow our free list up front to make sure we have
+                        // enough space to recycle all outstanding PerDoc
+                        // instances
+                        System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                        docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                    }
+                    return new PerDoc(this);
+                }
+                else
+                    return docFreeList[--freeCount];
+            }
+        }
+        
+        internal void  FreePerDoc(PerDoc perDoc)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                docFreeList[freeCount++] = perDoc;
+            }
+        }
+        
+        internal class PerDoc:DocumentsWriter.DocWriter
+        {
+            public PerDoc(DocFieldConsumers enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(DocFieldConsumers enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private DocFieldConsumers enclosingInstance;
+            public DocFieldConsumers Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            internal DocumentsWriter.DocWriter one;
+            internal DocumentsWriter.DocWriter two;
+            
+            public override long SizeInBytes()
+            {
+                return one.SizeInBytes() + two.SizeInBytes();
+            }
+            
+            public override void  Finish()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Finish();
+                    }
+                    finally
+                    {
+                        two.Finish();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+            
+            public override void  Abort()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Abort();
+                    }
+                    finally
+                    {
+                        two.Abort();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumersPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumersPerField.cs b/src/core/Index/DocFieldConsumersPerField.cs
index 71e96e0..e8ae3ea 100644
--- a/src/core/Index/DocFieldConsumersPerField.cs
+++ b/src/core/Index/DocFieldConsumersPerField.cs
@@ -20,37 +20,37 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class DocFieldConsumersPerField:DocFieldConsumerPerField
-	{
-		
-		internal DocFieldConsumerPerField one;
-		internal DocFieldConsumerPerField two;
-		internal DocFieldConsumersPerThread perThread;
-		
-		public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two)
-		{
-			this.perThread = perThread;
-			this.one = one;
-			this.two = two;
-		}
-		
-		public override void  ProcessFields(IFieldable[] fields, int count)
-		{
-			one.ProcessFields(fields, count);
-			two.ProcessFields(fields, count);
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-	}
+    
+    sealed class DocFieldConsumersPerField:DocFieldConsumerPerField
+    {
+        
+        internal DocFieldConsumerPerField one;
+        internal DocFieldConsumerPerField two;
+        internal DocFieldConsumersPerThread perThread;
+        
+        public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two)
+        {
+            this.perThread = perThread;
+            this.one = one;
+            this.two = two;
+        }
+        
+        public override void  ProcessFields(IFieldable[] fields, int count)
+        {
+            one.ProcessFields(fields, count);
+            two.ProcessFields(fields, count);
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumersPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumersPerThread.cs b/src/core/Index/DocFieldConsumersPerThread.cs
index 7098966..eea1378 100644
--- a/src/core/Index/DocFieldConsumersPerThread.cs
+++ b/src/core/Index/DocFieldConsumersPerThread.cs
@@ -19,64 +19,64 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class DocFieldConsumersPerThread:DocFieldConsumerPerThread
-	{
-		
-		internal DocFieldConsumerPerThread one;
-		internal DocFieldConsumerPerThread two;
-		internal DocFieldConsumers parent;
-		internal DocumentsWriter.DocState docState;
-		
-		public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two)
-		{
-			this.parent = parent;
-			this.one = one;
-			this.two = two;
-			docState = docFieldProcessorPerThread.docState;
-		}
-		
-		public override void  StartDocument()
-		{
-			one.StartDocument();
-			two.StartDocument();
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-		
-		public override DocumentsWriter.DocWriter FinishDocument()
-		{
-			DocumentsWriter.DocWriter oneDoc = one.FinishDocument();
-			DocumentsWriter.DocWriter twoDoc = two.FinishDocument();
-			if (oneDoc == null)
-				return twoDoc;
-			else if (twoDoc == null)
-				return oneDoc;
-			else
-			{
-				DocFieldConsumers.PerDoc both = parent.GetPerDoc();
-				both.docID = docState.docID;
-				System.Diagnostics.Debug.Assert(oneDoc.docID == docState.docID);
-				System.Diagnostics.Debug.Assert(twoDoc.docID == docState.docID);
-				both.one = oneDoc;
-				both.two = twoDoc;
-				return both;
-			}
-		}
-		
-		public override DocFieldConsumerPerField AddField(FieldInfo fi)
-		{
-			return new DocFieldConsumersPerField(this, one.AddField(fi), two.AddField(fi));
-		}
-	}
+    
+    sealed class DocFieldConsumersPerThread:DocFieldConsumerPerThread
+    {
+        
+        internal DocFieldConsumerPerThread one;
+        internal DocFieldConsumerPerThread two;
+        internal DocFieldConsumers parent;
+        internal DocumentsWriter.DocState docState;
+        
+        public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two)
+        {
+            this.parent = parent;
+            this.one = one;
+            this.two = two;
+            docState = docFieldProcessorPerThread.docState;
+        }
+        
+        public override void  StartDocument()
+        {
+            one.StartDocument();
+            two.StartDocument();
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+        
+        public override DocumentsWriter.DocWriter FinishDocument()
+        {
+            DocumentsWriter.DocWriter oneDoc = one.FinishDocument();
+            DocumentsWriter.DocWriter twoDoc = two.FinishDocument();
+            if (oneDoc == null)
+                return twoDoc;
+            else if (twoDoc == null)
+                return oneDoc;
+            else
+            {
+                DocFieldConsumers.PerDoc both = parent.GetPerDoc();
+                both.docID = docState.docID;
+                System.Diagnostics.Debug.Assert(oneDoc.docID == docState.docID);
+                System.Diagnostics.Debug.Assert(twoDoc.docID == docState.docID);
+                both.one = oneDoc;
+                both.two = twoDoc;
+                return both;
+            }
+        }
+        
+        public override DocFieldConsumerPerField AddField(FieldInfo fi)
+        {
+            return new DocFieldConsumersPerField(this, one.AddField(fi), two.AddField(fi));
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessor.cs b/src/core/Index/DocFieldProcessor.cs
index 4289118..0fce156 100644
--- a/src/core/Index/DocFieldProcessor.cs
+++ b/src/core/Index/DocFieldProcessor.cs
@@ -22,71 +22,71 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This is a DocConsumer that gathers all fields under the
-	/// same name, and calls per-field consumers to process field
-	/// by field.  This class doesn't doesn't do any "real" work
-	/// of its own: it just forwards the fields to a
-	/// DocFieldConsumer.
-	/// </summary>
-	
-	sealed class DocFieldProcessor : DocConsumer
-	{
-		
-		internal DocumentsWriter docWriter;
-		internal FieldInfos fieldInfos = new FieldInfos();
-		internal DocFieldConsumer consumer;
-		internal StoredFieldsWriter fieldsWriter;
-		
-		public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer)
-		{
-			this.docWriter = docWriter;
-			this.consumer = consumer;
-			consumer.SetFieldInfos(fieldInfos);
-			fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
-		}
-		
-		public override void  CloseDocStore(SegmentWriteState state)
-		{
-			consumer.CloseDocStore(state);
-			fieldsWriter.CloseDocStore(state);
-		}
-		
-		public override void Flush(ICollection<DocConsumerPerThread> threads, SegmentWriteState state)
-		{
-			var childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			foreach(DocConsumerPerThread thread in threads)
-			{
+    
+    /// <summary> This is a DocConsumer that gathers all fields under the
+    /// same name, and calls per-field consumers to process field
+    /// by field.  This class doesn't doesn't do any "real" work
+    /// of its own: it just forwards the fields to a
+    /// DocFieldConsumer.
+    /// </summary>
+    
+    sealed class DocFieldProcessor : DocConsumer
+    {
+        
+        internal DocumentsWriter docWriter;
+        internal FieldInfos fieldInfos = new FieldInfos();
+        internal DocFieldConsumer consumer;
+        internal StoredFieldsWriter fieldsWriter;
+        
+        public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer)
+        {
+            this.docWriter = docWriter;
+            this.consumer = consumer;
+            consumer.SetFieldInfos(fieldInfos);
+            fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
+        }
+        
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            consumer.CloseDocStore(state);
+            fieldsWriter.CloseDocStore(state);
+        }
+        
+        public override void Flush(ICollection<DocConsumerPerThread> threads, SegmentWriteState state)
+        {
+            var childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
+            foreach(DocConsumerPerThread thread in threads)
+            {
                 DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread)thread;
-				childThreadsAndFields[perThread.consumer] = perThread.Fields();
-				perThread.TrimFields(state);
-			}
-			fieldsWriter.Flush(state);
-			consumer.Flush(childThreadsAndFields, state);
-			
-			// Important to save after asking consumer to flush so
-			// consumer can alter the FieldInfo* if necessary.  EG,
-			// FreqProxTermsWriter does this with
-			// FieldInfo.storePayload.
-			System.String fileName = state.SegmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
-			fieldInfos.Write(state.directory, fileName);
+                childThreadsAndFields[perThread.consumer] = perThread.Fields();
+                perThread.TrimFields(state);
+            }
+            fieldsWriter.Flush(state);
+            consumer.Flush(childThreadsAndFields, state);
+            
+            // Important to save after asking consumer to flush so
+            // consumer can alter the FieldInfo* if necessary.  EG,
+            // FreqProxTermsWriter does this with
+            // FieldInfo.storePayload.
+            System.String fileName = state.SegmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
+            fieldInfos.Write(state.directory, fileName);
             state.flushedFiles.Add(fileName);
-		}
-		
-		public override void  Abort()
-		{
-			fieldsWriter.Abort();
-			consumer.Abort();
-		}
-		
-		public override bool FreeRAM()
-		{
-			return consumer.FreeRAM();
-		}
-		
-		public override DocConsumerPerThread AddThread(DocumentsWriterThreadState threadState)
-		{
-			return new DocFieldProcessorPerThread(threadState, this);
-		}
-	}
+        }
+        
+        public override void  Abort()
+        {
+            fieldsWriter.Abort();
+            consumer.Abort();
+        }
+        
+        public override bool FreeRAM()
+        {
+            return consumer.FreeRAM();
+        }
+        
+        public override DocConsumerPerThread AddThread(DocumentsWriterThreadState threadState)
+        {
+            return new DocFieldProcessorPerThread(threadState, this);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessorPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessorPerField.cs b/src/core/Index/DocFieldProcessorPerField.cs
index 1078988..86a03e7 100644
--- a/src/core/Index/DocFieldProcessorPerField.cs
+++ b/src/core/Index/DocFieldProcessorPerField.cs
@@ -20,30 +20,30 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Holds all per thread, per field state.</summary>
-	
-	sealed class DocFieldProcessorPerField
-	{
-		
-		internal DocFieldConsumerPerField consumer;
-		internal FieldInfo fieldInfo;
-		
-		internal DocFieldProcessorPerField next;
-		internal int lastGen = - 1;
-		
-		internal int fieldCount;
-		internal IFieldable[] fields = new IFieldable[1];
-		
-		public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
-		{
-			this.consumer = perThread.consumer.AddField(fieldInfo);
-			this.fieldInfo = fieldInfo;
-		}
-		
-		public void  Abort()
-		{
-			consumer.Abort();
-		}
-	}
+    
+    /// <summary> Holds all per thread, per field state.</summary>
+    
+    sealed class DocFieldProcessorPerField
+    {
+        
+        internal DocFieldConsumerPerField consumer;
+        internal FieldInfo fieldInfo;
+        
+        internal DocFieldProcessorPerField next;
+        internal int lastGen = - 1;
+        
+        internal int fieldCount;
+        internal IFieldable[] fields = new IFieldable[1];
+        
+        public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.consumer = perThread.consumer.AddField(fieldInfo);
+            this.fieldInfo = fieldInfo;
+        }
+        
+        public void  Abort()
+        {
+            consumer.Abort();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessorPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessorPerThread.cs b/src/core/Index/DocFieldProcessorPerThread.cs
index d108116..45eaff3 100644
--- a/src/core/Index/DocFieldProcessorPerThread.cs
+++ b/src/core/Index/DocFieldProcessorPerThread.cs
@@ -23,186 +23,186 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Gathers all Fieldables for a document under the same
-	/// name, updates FieldInfos, and calls per-field consumers
-	/// to process field by field.
-	/// 
-	/// Currently, only a single thread visits the fields,
-	/// sequentially, for processing.
-	/// </summary>
-	
-	sealed class DocFieldProcessorPerThread:DocConsumerPerThread
-	{
-		private void  InitBlock()
-		{
-			docFreeList = new PerDoc[1];
-		}
-		
-		internal float docBoost;
-		internal int fieldGen;
-		internal DocFieldProcessor docFieldProcessor;
-		internal FieldInfos fieldInfos;
-		internal DocFieldConsumerPerThread consumer;
-		
-		// Holds all fields seen in current doc
-		internal DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
-		internal int fieldCount;
-		
-		// Hash table for all fields ever seen
-		internal DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
-		internal int hashMask = 1;
-		internal int totalFieldCount;
-		
-		internal StoredFieldsWriterPerThread fieldsWriter;
-		
-		internal DocumentsWriter.DocState docState;
-		
-		public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor)
-		{
-			InitBlock();
-			this.docState = threadState.docState;
-			this.docFieldProcessor = docFieldProcessor;
-			this.fieldInfos = docFieldProcessor.fieldInfos;
-			this.consumer = docFieldProcessor.consumer.AddThread(this);
-			fieldsWriter = docFieldProcessor.fieldsWriter.AddThread(docState);
-		}
-		
-		public override void  Abort()
-		{
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField field = fieldHash[i];
-				while (field != null)
-				{
-					DocFieldProcessorPerField next = field.next;
-					field.Abort();
-					field = next;
-				}
-			}
-			fieldsWriter.Abort();
-			consumer.Abort();
-		}
-		
-		public System.Collections.Generic.ICollection<DocFieldConsumerPerField> Fields()
-		{
-		    System.Collections.Generic.ICollection<DocFieldConsumerPerField> fields =
-		        new System.Collections.Generic.HashSet<DocFieldConsumerPerField>();
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField field = fieldHash[i];
-				while (field != null)
-				{
-					fields.Add(field.consumer);
-					field = field.next;
-				}
-			}
-			System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
-			return fields;
-		}
-		
-		/// <summary>If there are fields we've seen but did not see again
-		/// in the last run, then free them up. 
-		/// </summary>
-		
-		internal void  TrimFields(SegmentWriteState state)
-		{
-			
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField perField = fieldHash[i];
-				DocFieldProcessorPerField lastPerField = null;
-				
-				while (perField != null)
-				{
-					
-					if (perField.lastGen == - 1)
-					{
-						
-						// This field was not seen since the previous
-						// flush, so, free up its resources now
-						
-						// Unhash
-						if (lastPerField == null)
-							fieldHash[i] = perField.next;
-						else
-							lastPerField.next = perField.next;
-						
-						if (state.docWriter.infoStream != null)
-							state.docWriter.infoStream.WriteLine("  purge field=" + perField.fieldInfo.name);
-						
-						totalFieldCount--;
-					}
-					else
-					{
-						// Reset
-						perField.lastGen = - 1;
-						lastPerField = perField;
-					}
-					
-					perField = perField.next;
-				}
-			}
-		}
-		
-		private void  Rehash()
-		{
-			int newHashSize = (fieldHash.Length * 2);
-			System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
-			
-			DocFieldProcessorPerField[] newHashArray = new DocFieldProcessorPerField[newHashSize];
-			
-			// Rehash
-			int newHashMask = newHashSize - 1;
-			for (int j = 0; j < fieldHash.Length; j++)
-			{
-				DocFieldProcessorPerField fp0 = fieldHash[j];
-				while (fp0 != null)
-				{
-					int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
-					DocFieldProcessorPerField nextFP0 = fp0.next;
-					fp0.next = newHashArray[hashPos2];
-					newHashArray[hashPos2] = fp0;
-					fp0 = nextFP0;
-				}
-			}
-			
-			fieldHash = newHashArray;
-			hashMask = newHashMask;
-		}
-		
-		public override DocumentsWriter.DocWriter ProcessDocument()
-		{
-			
-			consumer.StartDocument();
-			fieldsWriter.StartDocument();
-			
-			Document doc = docState.doc;
-			
-			System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
-			
-			fieldCount = 0;
-			
-			int thisFieldGen = fieldGen++;
-			
-			System.Collections.Generic.IList<IFieldable> docFields = doc.GetFields();
-			int numDocFields = docFields.Count;
-			
-			// Absorb any new fields first seen in this document.
-			// Also absorb any changes to fields we had already
-			// seen before (eg suddenly turning on norms or
-			// vectors, etc.):
-			
-			for (int i = 0; i < numDocFields; i++)
-			{
-				IFieldable field = docFields[i];
-				System.String fieldName = field.Name;
-				
-				// Make sure we have a PerField allocated
-				int hashPos = fieldName.GetHashCode() & hashMask;
-				DocFieldProcessorPerField fp = fieldHash[hashPos];
-				while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
-					fp = fp.next;
+    
+    /// <summary> Gathers all Fieldables for a document under the same
+    /// name, updates FieldInfos, and calls per-field consumers
+    /// to process field by field.
+    /// 
+    /// Currently, only a single thread visits the fields,
+    /// sequentially, for processing.
+    /// </summary>
+    
+    sealed class DocFieldProcessorPerThread:DocConsumerPerThread
+    {
+        private void  InitBlock()
+        {
+            docFreeList = new PerDoc[1];
+        }
+        
+        internal float docBoost;
+        internal int fieldGen;
+        internal DocFieldProcessor docFieldProcessor;
+        internal FieldInfos fieldInfos;
+        internal DocFieldConsumerPerThread consumer;
+        
+        // Holds all fields seen in current doc
+        internal DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
+        internal int fieldCount;
+        
+        // Hash table for all fields ever seen
+        internal DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
+        internal int hashMask = 1;
+        internal int totalFieldCount;
+        
+        internal StoredFieldsWriterPerThread fieldsWriter;
+        
+        internal DocumentsWriter.DocState docState;
+        
+        public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor)
+        {
+            InitBlock();
+            this.docState = threadState.docState;
+            this.docFieldProcessor = docFieldProcessor;
+            this.fieldInfos = docFieldProcessor.fieldInfos;
+            this.consumer = docFieldProcessor.consumer.AddThread(this);
+            fieldsWriter = docFieldProcessor.fieldsWriter.AddThread(docState);
+        }
+        
+        public override void  Abort()
+        {
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField field = fieldHash[i];
+                while (field != null)
+                {
+                    DocFieldProcessorPerField next = field.next;
+                    field.Abort();
+                    field = next;
+                }
+            }
+            fieldsWriter.Abort();
+            consumer.Abort();
+        }
+        
+        public System.Collections.Generic.ICollection<DocFieldConsumerPerField> Fields()
+        {
+            System.Collections.Generic.ICollection<DocFieldConsumerPerField> fields =
+                new System.Collections.Generic.HashSet<DocFieldConsumerPerField>();
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField field = fieldHash[i];
+                while (field != null)
+                {
+                    fields.Add(field.consumer);
+                    field = field.next;
+                }
+            }
+            System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
+            return fields;
+        }
+        
+        /// <summary>If there are fields we've seen but did not see again
+        /// in the last run, then free them up. 
+        /// </summary>
+        
+        internal void  TrimFields(SegmentWriteState state)
+        {
+            
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField perField = fieldHash[i];
+                DocFieldProcessorPerField lastPerField = null;
+                
+                while (perField != null)
+                {
+                    
+                    if (perField.lastGen == - 1)
+                    {
+                        
+                        // This field was not seen since the previous
+                        // flush, so, free up its resources now
+                        
+                        // Unhash
+                        if (lastPerField == null)
+                            fieldHash[i] = perField.next;
+                        else
+                            lastPerField.next = perField.next;
+                        
+                        if (state.docWriter.infoStream != null)
+                            state.docWriter.infoStream.WriteLine("  purge field=" + perField.fieldInfo.name);
+                        
+                        totalFieldCount--;
+                    }
+                    else
+                    {
+                        // Reset
+                        perField.lastGen = - 1;
+                        lastPerField = perField;
+                    }
+                    
+                    perField = perField.next;
+                }
+            }
+        }
+        
+        private void  Rehash()
+        {
+            int newHashSize = (fieldHash.Length * 2);
+            System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
+            
+            DocFieldProcessorPerField[] newHashArray = new DocFieldProcessorPerField[newHashSize];
+            
+            // Rehash
+            int newHashMask = newHashSize - 1;
+            for (int j = 0; j < fieldHash.Length; j++)
+            {
+                DocFieldProcessorPerField fp0 = fieldHash[j];
+                while (fp0 != null)
+                {
+                    int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
+                    DocFieldProcessorPerField nextFP0 = fp0.next;
+                    fp0.next = newHashArray[hashPos2];
+                    newHashArray[hashPos2] = fp0;
+                    fp0 = nextFP0;
+                }
+            }
+            
+            fieldHash = newHashArray;
+            hashMask = newHashMask;
+        }
+        
+        public override DocumentsWriter.DocWriter ProcessDocument()
+        {
+            
+            consumer.StartDocument();
+            fieldsWriter.StartDocument();
+            
+            Document doc = docState.doc;
+            
+            System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
+            
+            fieldCount = 0;
+            
+            int thisFieldGen = fieldGen++;
+            
+            System.Collections.Generic.IList<IFieldable> docFields = doc.GetFields();
+            int numDocFields = docFields.Count;
+            
+            // Absorb any new fields first seen in this document.
+            // Also absorb any changes to fields we had already
+            // seen before (eg suddenly turning on norms or
+            // vectors, etc.):
+            
+            for (int i = 0; i < numDocFields; i++)
+            {
+                IFieldable field = docFields[i];
+                System.String fieldName = field.Name;
+                
+                // Make sure we have a PerField allocated
+                int hashPos = fieldName.GetHashCode() & hashMask;
+                DocFieldProcessorPerField fp = fieldHash[hashPos];
+                while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
+                    fp = fp.next;
 
                 if (fp == null)
                 {
@@ -231,248 +231,248 @@ namespace Lucene.Net.Index
                                         field.OmitNorms, false, field.OmitTermFreqAndPositions);
                 }
 
-			    if (thisFieldGen != fp.lastGen)
-				{
-					
-					// First time we're seeing this field for this doc
-					fp.fieldCount = 0;
-					
-					if (fieldCount == fields.Length)
-					{
-						int newSize = fields.Length * 2;
-						DocFieldProcessorPerField[] newArray = new DocFieldProcessorPerField[newSize];
-						Array.Copy(fields, 0, newArray, 0, fieldCount);
-						fields = newArray;
-					}
-					
-					fields[fieldCount++] = fp;
-					fp.lastGen = thisFieldGen;
-				}
-				
-				if (fp.fieldCount == fp.fields.Length)
-				{
-					IFieldable[] newArray = new IFieldable[fp.fields.Length * 2];
-					Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
-					fp.fields = newArray;
-				}
-				
-				fp.fields[fp.fieldCount++] = field;
-				if (field.IsStored)
-				{
-					fieldsWriter.AddField(field, fp.fieldInfo);
-				}
-			}
-			
-			// If we are writing vectors then we must visit
-			// fields in sorted order so they are written in
-			// sorted order.  TODO: we actually only need to
-			// sort the subset of fields that have vectors
-			// enabled; we could save [small amount of] CPU
-			// here.
-			QuickSort(fields, 0, fieldCount - 1);
-			
-			for (int i = 0; i < fieldCount; i++)
-				fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
+                if (thisFieldGen != fp.lastGen)
+                {
+                    
+                    // First time we're seeing this field for this doc
+                    fp.fieldCount = 0;
+                    
+                    if (fieldCount == fields.Length)
+                    {
+                        int newSize = fields.Length * 2;
+                        DocFieldProcessorPerField[] newArray = new DocFieldProcessorPerField[newSize];
+                        Array.Copy(fields, 0, newArray, 0, fieldCount);
+                        fields = newArray;
+                    }
+                    
+                    fields[fieldCount++] = fp;
+                    fp.lastGen = thisFieldGen;
+                }
+                
+                if (fp.fieldCount == fp.fields.Length)
+                {
+                    IFieldable[] newArray = new IFieldable[fp.fields.Length * 2];
+                    Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
+                    fp.fields = newArray;
+                }
+                
+                fp.fields[fp.fieldCount++] = field;
+                if (field.IsStored)
+                {
+                    fieldsWriter.AddField(field, fp.fieldInfo);
+                }
+            }
+            
+            // If we are writing vectors then we must visit
+            // fields in sorted order so they are written in
+            // sorted order.  TODO: we actually only need to
+            // sort the subset of fields that have vectors
+            // enabled; we could save [small amount of] CPU
+            // here.
+            QuickSort(fields, 0, fieldCount - 1);
+            
+            for (int i = 0; i < fieldCount; i++)
+                fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
 
             if (docState.maxTermPrefix != null && docState.infoStream != null)
             {
                 docState.infoStream.WriteLine("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped.  Please correct the analyzer to not produce such terms.  The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
                 docState.maxTermPrefix = null;
             }
-			
-			DocumentsWriter.DocWriter one = fieldsWriter.FinishDocument();
-			DocumentsWriter.DocWriter two = consumer.FinishDocument();
-			if (one == null)
-			{
-				return two;
-			}
-			else if (two == null)
-			{
-				return one;
-			}
-			else
-			{
-				PerDoc both = GetPerDoc();
-				both.docID = docState.docID;
-				System.Diagnostics.Debug.Assert(one.docID == docState.docID);
-				System.Diagnostics.Debug.Assert(two.docID == docState.docID);
-				both.one = one;
-				both.two = two;
-				return both;
-			}
-		}
-		
-		internal void  QuickSort(DocFieldProcessorPerField[] array, int lo, int hi)
-		{
-			if (lo >= hi)
-				return ;
-			else if (hi == 1 + lo)
-			{
-				if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
-				{
-					DocFieldProcessorPerField tmp = array[lo];
-					array[lo] = array[hi];
-					array[hi] = tmp;
-				}
-				return ;
-			}
-			
-			int mid = Number.URShift((lo + hi), 1);
-			
-			if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
-			{
-				DocFieldProcessorPerField tmp = array[lo];
-				array[lo] = array[mid];
-				array[mid] = tmp;
-			}
-			
-			if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
-			{
-				DocFieldProcessorPerField tmp = array[mid];
-				array[mid] = array[hi];
-				array[hi] = tmp;
-				
-				if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
-				{
-					DocFieldProcessorPerField tmp2 = array[lo];
-					array[lo] = array[mid];
-					array[mid] = tmp2;
-				}
-			}
-			
-			int left = lo + 1;
-			int right = hi - 1;
-			
-			if (left >= right)
-				return ;
-			
-			DocFieldProcessorPerField partition = array[mid];
-			
-			for (; ; )
-			{
-				while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
-					--right;
-				
-				while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
-					++left;
-				
-				if (left < right)
-				{
-					DocFieldProcessorPerField tmp = array[left];
-					array[left] = array[right];
-					array[right] = tmp;
-					--right;
-				}
-				else
-				{
-					break;
-				}
-			}
-			
-			QuickSort(array, lo, left);
-			QuickSort(array, left + 1, hi);
-		}
-		
-		internal PerDoc[] docFreeList;
-		internal int freeCount;
-		internal int allocCount;
-		
-		internal PerDoc GetPerDoc()
-		{
-			lock (this)
-			{
-				if (freeCount == 0)
-				{
-					allocCount++;
-					if (allocCount > docFreeList.Length)
-					{
-						// Grow our free list up front to make sure we have
-						// enough space to recycle all outstanding PerDoc
-						// instances
-						System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
-						docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
-					}
-					return new PerDoc(this);
-				}
-				else
-					return docFreeList[--freeCount];
-			}
-		}
-		
-		internal void  FreePerDoc(PerDoc perDoc)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
-				docFreeList[freeCount++] = perDoc;
-			}
-		}
-		
-		internal class PerDoc:DocumentsWriter.DocWriter
-		{
-			public PerDoc(DocFieldProcessorPerThread enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(DocFieldProcessorPerThread enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private DocFieldProcessorPerThread enclosingInstance;
-			public DocFieldProcessorPerThread Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			internal DocumentsWriter.DocWriter one;
-			internal DocumentsWriter.DocWriter two;
-			
-			public override long SizeInBytes()
-			{
-				return one.SizeInBytes() + two.SizeInBytes();
-			}
-			
-			public override void  Finish()
-			{
-				try
-				{
-					try
-					{
-						one.Finish();
-					}
-					finally
-					{
-						two.Finish();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-			
-			public override void  Abort()
-			{
-				try
-				{
-					try
-					{
-						one.Abort();
-					}
-					finally
-					{
-						two.Abort();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-		}
-	}
+            
+            DocumentsWriter.DocWriter one = fieldsWriter.FinishDocument();
+            DocumentsWriter.DocWriter two = consumer.FinishDocument();
+            if (one == null)
+            {
+                return two;
+            }
+            else if (two == null)
+            {
+                return one;
+            }
+            else
+            {
+                PerDoc both = GetPerDoc();
+                both.docID = docState.docID;
+                System.Diagnostics.Debug.Assert(one.docID == docState.docID);
+                System.Diagnostics.Debug.Assert(two.docID == docState.docID);
+                both.one = one;
+                both.two = two;
+                return both;
+            }
+        }
+        
+        internal void  QuickSort(DocFieldProcessorPerField[] array, int lo, int hi)
+        {
+            if (lo >= hi)
+                return ;
+            else if (hi == 1 + lo)
+            {
+                if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+                {
+                    DocFieldProcessorPerField tmp = array[lo];
+                    array[lo] = array[hi];
+                    array[hi] = tmp;
+                }
+                return ;
+            }
+            
+            int mid = Number.URShift((lo + hi), 1);
+            
+            if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+            {
+                DocFieldProcessorPerField tmp = array[lo];
+                array[lo] = array[mid];
+                array[mid] = tmp;
+            }
+            
+            if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+            {
+                DocFieldProcessorPerField tmp = array[mid];
+                array[mid] = array[hi];
+                array[hi] = tmp;
+                
+                if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+                {
+                    DocFieldProcessorPerField tmp2 = array[lo];
+                    array[lo] = array[mid];
+                    array[mid] = tmp2;
+                }
+            }
+            
+            int left = lo + 1;
+            int right = hi - 1;
+            
+            if (left >= right)
+                return ;
+            
+            DocFieldProcessorPerField partition = array[mid];
+            
+            for (; ; )
+            {
+                while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
+                    --right;
+                
+                while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
+                    ++left;
+                
+                if (left < right)
+                {
+                    DocFieldProcessorPerField tmp = array[left];
+                    array[left] = array[right];
+                    array[right] = tmp;
+                    --right;
+                }
+                else
+                {
+                    break;
+                }
+            }
+            
+            QuickSort(array, lo, left);
+            QuickSort(array, left + 1, hi);
+        }
+        
+        internal PerDoc[] docFreeList;
+        internal int freeCount;
+        internal int allocCount;
+        
+        internal PerDoc GetPerDoc()
+        {
+            lock (this)
+            {
+                if (freeCount == 0)
+                {
+                    allocCount++;
+                    if (allocCount > docFreeList.Length)
+                    {
+                        // Grow our free list up front to make sure we have
+                        // enough space to recycle all outstanding PerDoc
+                        // instances
+                        System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                        docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                    }
+                    return new PerDoc(this);
+                }
+                else
+                    return docFreeList[--freeCount];
+            }
+        }
+        
+        internal void  FreePerDoc(PerDoc perDoc)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                docFreeList[freeCount++] = perDoc;
+            }
+        }
+        
+        internal class PerDoc:DocumentsWriter.DocWriter
+        {
+            public PerDoc(DocFieldProcessorPerThread enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(DocFieldProcessorPerThread enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private DocFieldProcessorPerThread enclosingInstance;
+            public DocFieldProcessorPerThread Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            internal DocumentsWriter.DocWriter one;
+            internal DocumentsWriter.DocWriter two;
+            
+            public override long SizeInBytes()
+            {
+                return one.SizeInBytes() + two.SizeInBytes();
+            }
+            
+            public override void  Finish()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Finish();
+                    }
+                    finally
+                    {
+                        two.Finish();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+            
+            public override void  Abort()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Abort();
+                    }
+                    finally
+                    {
+                        two.Abort();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocInverter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocInverter.cs b/src/core/Index/DocInverter.cs
index 4153465..9a058aa 100644
--- a/src/core/Index/DocInverter.cs
+++ b/src/core/Index/DocInverter.cs
@@ -20,78 +20,78 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is a DocFieldConsumer that inverts each field,
-	/// separately, from a Document, and accepts a
-	/// InvertedTermsConsumer to process those terms. 
-	/// </summary>
-	
-	sealed class DocInverter : DocFieldConsumer
-	{
-		
-		internal InvertedDocConsumer consumer;
-		internal InvertedDocEndConsumer endConsumer;
-		
-		public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer)
-		{
-			this.consumer = consumer;
-			this.endConsumer = endConsumer;
-		}
-		
-		internal override void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			base.SetFieldInfos(fieldInfos);
-			consumer.SetFieldInfos(fieldInfos);
-			endConsumer.SetFieldInfos(fieldInfos);
-		}
+    
+    /// <summary>This is a DocFieldConsumer that inverts each field,
+    /// separately, from a Document, and accepts a
+    /// InvertedTermsConsumer to process those terms. 
+    /// </summary>
+    
+    sealed class DocInverter : DocFieldConsumer
+    {
+        
+        internal InvertedDocConsumer consumer;
+        internal InvertedDocEndConsumer endConsumer;
+        
+        public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer)
+        {
+            this.consumer = consumer;
+            this.endConsumer = endConsumer;
+        }
+        
+        internal override void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            base.SetFieldInfos(fieldInfos);
+            consumer.SetFieldInfos(fieldInfos);
+            endConsumer.SetFieldInfos(fieldInfos);
+        }
 
         public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
+        {
 
             var childThreadsAndFields = new HashMap<InvertedDocConsumerPerThread, ICollection<InvertedDocConsumerPerField>>();
             var endChildThreadsAndFields = new HashMap<InvertedDocEndConsumerPerThread, ICollection<InvertedDocEndConsumerPerField>>();
 
             foreach (var entry in threadsAndFields)
-			{
-				var perThread = (DocInverterPerThread) entry.Key;
+            {
+                var perThread = (DocInverterPerThread) entry.Key;
 
-				ICollection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
-				ICollection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
-				foreach(DocFieldConsumerPerField field in entry.Value)
-				{
+                ICollection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
+                ICollection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
+                foreach(DocFieldConsumerPerField field in entry.Value)
+                {
                     var perField = (DocInverterPerField)field;
-					childFields.Add(perField.consumer);
-					endChildFields.Add(perField.endConsumer);
-				}
-				
-				childThreadsAndFields[perThread.consumer] = childFields;
-				endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
-			}
-			
-			consumer.Flush(childThreadsAndFields, state);
-			endConsumer.Flush(endChildThreadsAndFields, state);
-		}
+                    childFields.Add(perField.consumer);
+                    endChildFields.Add(perField.endConsumer);
+                }
+                
+                childThreadsAndFields[perThread.consumer] = childFields;
+                endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
+            }
+            
+            consumer.Flush(childThreadsAndFields, state);
+            endConsumer.Flush(endChildThreadsAndFields, state);
+        }
 
-	    public override void  CloseDocStore(SegmentWriteState state)
-		{
-			consumer.CloseDocStore(state);
-			endConsumer.CloseDocStore(state);
-		}
-		
-		public override void  Abort()
-		{
-			consumer.Abort();
-			endConsumer.Abort();
-		}
-		
-		public override bool FreeRAM()
-		{
-			return consumer.FreeRAM();
-		}
-		
-		public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
-		{
-			return new DocInverterPerThread(docFieldProcessorPerThread, this);
-		}
-	}
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            consumer.CloseDocStore(state);
+            endConsumer.CloseDocStore(state);
+        }
+        
+        public override void  Abort()
+        {
+            consumer.Abort();
+            endConsumer.Abort();
+        }
+        
+        public override bool FreeRAM()
+        {
+            return consumer.FreeRAM();
+        }
+        
+        public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+        {
+            return new DocInverterPerThread(docFieldProcessorPerThread, this);
+        }
+    }
 }
\ No newline at end of file


[50/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/De/GermanStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/De/GermanStemmer.cs b/src/contrib/Analyzers/De/GermanStemmer.cs
index d94d604..4dc80e3 100644
--- a/src/contrib/Analyzers/De/GermanStemmer.cs
+++ b/src/contrib/Analyzers/De/GermanStemmer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -26,167 +26,167 @@ using System.Collections;
 
 namespace Lucene.Net.Analysis.De
 {
-	/// <summary>
-	/// A stemmer for German words. The algorithm is based on the report
-	/// "A Fast and Simple Stemming Algorithm for German Words" by Jörg
-	/// Caumanns (joerg.caumanns@isst.fhg.de).
-	/// </summary>
-	public class GermanStemmer
-	{
-		/// <summary>
-		/// Buffer for the terms while stemming them. 
-		/// </summary>
-		private StringBuilder sb = new StringBuilder();
+    /// <summary>
+    /// A stemmer for German words. The algorithm is based on the report
+    /// "A Fast and Simple Stemming Algorithm for German Words" by Jörg
+    /// Caumanns (joerg.caumanns@isst.fhg.de).
+    /// </summary>
+    public class GermanStemmer
+    {
+        /// <summary>
+        /// Buffer for the terms while stemming them. 
+        /// </summary>
+        private StringBuilder sb = new StringBuilder();
 
-		/// <summary>
-		/// Amount of characters that are removed with <tt>Substitute()</tt> while stemming.
-		/// </summary>
-		protected int substCount = 0;
+        /// <summary>
+        /// Amount of characters that are removed with <tt>Substitute()</tt> while stemming.
+        /// </summary>
+        protected int substCount = 0;
 
-		/// <summary>
-		/// Stemms the given term to an unique <tt>discriminator</tt>.
-		/// </summary>
-		/// <param name="term">The term that should be stemmed.</param>
-		/// <returns>Discriminator for <tt>term</tt></returns>
-		internal String Stem( String term )
-		{
-			// Use lowercase for medium stemming.
-			term = term.ToLower();
-			if ( !IsStemmable( term ) )
-				return term;
-			// Reset the StringBuilder.
-			sb.Remove(0, sb.Length);
-			sb.Insert(0, term);
-			// Stemming starts here...
-			Substitute( sb );
-			Strip( sb );
-			Optimize( sb );
-			Resubstitute( sb );
-			RemoveParticleDenotion( sb );
-			return sb.ToString();
-		}
+        /// <summary>
+        /// Stemms the given term to an unique <tt>discriminator</tt>.
+        /// </summary>
+        /// <param name="term">The term that should be stemmed.</param>
+        /// <returns>Discriminator for <tt>term</tt></returns>
+        internal String Stem( String term )
+        {
+            // Use lowercase for medium stemming.
+            term = term.ToLower();
+            if ( !IsStemmable( term ) )
+                return term;
+            // Reset the StringBuilder.
+            sb.Remove(0, sb.Length);
+            sb.Insert(0, term);
+            // Stemming starts here...
+            Substitute( sb );
+            Strip( sb );
+            Optimize( sb );
+            Resubstitute( sb );
+            RemoveParticleDenotion( sb );
+            return sb.ToString();
+        }
 
-		/// <summary>
-		/// Checks if a term could be stemmed.
-		/// </summary>
-		/// <param name="term"></param>
-		/// <returns>true if, and only if, the given term consists in letters.</returns>
-		private bool IsStemmable( String term )
-		{
-			for ( int c = 0; c < term.Length; c++ ) 
-			{
-				if ( !Char.IsLetter(term[c])) return false;
-			}
-			return true;
-		}
+        /// <summary>
+        /// Checks if a term could be stemmed.
+        /// </summary>
+        /// <param name="term"></param>
+        /// <returns>true if, and only if, the given term consists in letters.</returns>
+        private bool IsStemmable( String term )
+        {
+            for ( int c = 0; c < term.Length; c++ ) 
+            {
+                if ( !Char.IsLetter(term[c])) return false;
+            }
+            return true;
+        }
 
-		/// <summary>
-		/// Suffix stripping (stemming) on the current term. The stripping is reduced
-		/// to the seven "base" suffixes "e", "s", "n", "t", "em", "er" and * "nd",
-		/// from which all regular suffixes are build of. The simplification causes
-		/// some overstemming, and way more irregular stems, but still provides unique.
-		/// discriminators in the most of those cases.
-		/// The algorithm is context free, except of the length restrictions.
-		/// </summary>
-		/// <param name="buffer"></param>
-		private void Strip( StringBuilder buffer )
-		{
-			bool doMore = true;
-			while ( doMore && buffer.Length > 3 ) 
-			{
-				if ( ( buffer.Length + substCount > 5 ) &&
-					buffer.ToString().Substring(buffer.Length - 2, 2).Equals( "nd" ) )
-				{
-					buffer.Remove( buffer.Length - 2, 2 );
-				}
-				else if ( ( buffer.Length + substCount > 4 ) &&
-					buffer.ToString().Substring( buffer.Length - 2, 2).Equals( "em" ) ) 
-				{
-					buffer.Remove( buffer.Length - 2, 2 );
-				}
-				else if ( ( buffer.Length + substCount > 4 ) &&
-					buffer.ToString().Substring( buffer.Length - 2, 2).Equals( "er" ) ) 
-				{
-					buffer.Remove( buffer.Length - 2, 2 );
-				}
-				else if ( buffer[buffer.Length - 1] == 'e' ) 
-				{
-					buffer.Remove(buffer.Length - 1, 1);
-				}
-				else if ( buffer[buffer.Length - 1] == 's' ) 
-				{
-					buffer.Remove(buffer.Length - 1, 1);
-				}
-				else if ( buffer[buffer.Length - 1] == 'n' ) 
-				{
-					buffer.Remove(buffer.Length - 1, 1);
-				}
-					// "t" occurs only as suffix of verbs.
-				else if ( buffer[buffer.Length - 1] == 't') 
-				{
-					buffer.Remove(buffer.Length - 1, 1);
-				}
-				else 
-				{
-					doMore = false;
-				}
-			}
-		}
+        /// <summary>
+        /// Suffix stripping (stemming) on the current term. The stripping is reduced
+        /// to the seven "base" suffixes "e", "s", "n", "t", "em", "er" and * "nd",
+        /// from which all regular suffixes are build of. The simplification causes
+        /// some overstemming, and way more irregular stems, but still provides unique.
+        /// discriminators in the most of those cases.
+        /// The algorithm is context free, except of the length restrictions.
+        /// </summary>
+        /// <param name="buffer"></param>
+        private void Strip( StringBuilder buffer )
+        {
+            bool doMore = true;
+            while ( doMore && buffer.Length > 3 ) 
+            {
+                if ( ( buffer.Length + substCount > 5 ) &&
+                    buffer.ToString().Substring(buffer.Length - 2, 2).Equals( "nd" ) )
+                {
+                    buffer.Remove( buffer.Length - 2, 2 );
+                }
+                else if ( ( buffer.Length + substCount > 4 ) &&
+                    buffer.ToString().Substring( buffer.Length - 2, 2).Equals( "em" ) ) 
+                {
+                    buffer.Remove( buffer.Length - 2, 2 );
+                }
+                else if ( ( buffer.Length + substCount > 4 ) &&
+                    buffer.ToString().Substring( buffer.Length - 2, 2).Equals( "er" ) ) 
+                {
+                    buffer.Remove( buffer.Length - 2, 2 );
+                }
+                else if ( buffer[buffer.Length - 1] == 'e' ) 
+                {
+                    buffer.Remove(buffer.Length - 1, 1);
+                }
+                else if ( buffer[buffer.Length - 1] == 's' ) 
+                {
+                    buffer.Remove(buffer.Length - 1, 1);
+                }
+                else if ( buffer[buffer.Length - 1] == 'n' ) 
+                {
+                    buffer.Remove(buffer.Length - 1, 1);
+                }
+                    // "t" occurs only as suffix of verbs.
+                else if ( buffer[buffer.Length - 1] == 't') 
+                {
+                    buffer.Remove(buffer.Length - 1, 1);
+                }
+                else 
+                {
+                    doMore = false;
+                }
+            }
+        }
 
-		/// <summary>
-		/// Does some optimizations on the term. This optimisations are contextual.
-		/// </summary>
-		/// <param name="buffer"></param>
-		private void Optimize( StringBuilder buffer )
-		{
-			// Additional step for female plurals of professions and inhabitants.
-			if ( buffer.Length > 5 && buffer.ToString().Substring(buffer.Length - 5, 5).Equals( "erin*" )) 
-			{
-				buffer.Remove(buffer.Length - 1, 1);
-				Strip(buffer);
-			}
-			// Additional step for irregular plural nouns like "Matrizen -> Matrix".
-			if ( buffer[buffer.Length - 1] == ('z') ) 
-			{
-				buffer[buffer.Length - 1] = 'x';
-			}
-		}
+        /// <summary>
+        /// Does some optimizations on the term. This optimisations are contextual.
+        /// </summary>
+        /// <param name="buffer"></param>
+        private void Optimize( StringBuilder buffer )
+        {
+            // Additional step for female plurals of professions and inhabitants.
+            if ( buffer.Length > 5 && buffer.ToString().Substring(buffer.Length - 5, 5).Equals( "erin*" )) 
+            {
+                buffer.Remove(buffer.Length - 1, 1);
+                Strip(buffer);
+            }
+            // Additional step for irregular plural nouns like "Matrizen -> Matrix".
+            if ( buffer[buffer.Length - 1] == ('z') ) 
+            {
+                buffer[buffer.Length - 1] = 'x';
+            }
+        }
 
-		/// <summary>
-		/// Removes a particle denotion ("ge") from a term.
-		/// </summary>
-		/// <param name="buffer"></param>
-		private void RemoveParticleDenotion( StringBuilder buffer )
-		{
-			if ( buffer.Length > 4 ) 
-			{
-				for ( int c = 0; c < buffer.Length - 3; c++ ) 
-				{
-					if ( buffer.ToString().Substring( c, 4 ).Equals( "gege" ) ) 
-					{
-						buffer.Remove(c, 2);
-						return;
-					}
-				}
-			}
-		}
+        /// <summary>
+        /// Removes a particle denotion ("ge") from a term.
+        /// </summary>
+        /// <param name="buffer"></param>
+        private void RemoveParticleDenotion( StringBuilder buffer )
+        {
+            if ( buffer.Length > 4 ) 
+            {
+                for ( int c = 0; c < buffer.Length - 3; c++ ) 
+                {
+                    if ( buffer.ToString().Substring( c, 4 ).Equals( "gege" ) ) 
+                    {
+                        buffer.Remove(c, 2);
+                        return;
+                    }
+                }
+            }
+        }
 
-		/// <summary>
-		/// Do some substitutions for the term to reduce overstemming:
-		///
-		/// - Substitute Umlauts with their corresponding vowel: äöü -> aou,
+        /// <summary>
+        /// Do some substitutions for the term to reduce overstemming:
+        ///
+        /// - Substitute Umlauts with their corresponding vowel: äöü -> aou,
         ///   "&#223;" is substituted by "ss"
-		/// - Substitute a second char of a pair of equal characters with
-		/// an asterisk: ?? -&gt; ?*
-		/// - Substitute some common character combinations with a token:
+        /// - Substitute a second char of a pair of equal characters with
+        /// an asterisk: ?? -&gt; ?*
+        /// - Substitute some common character combinations with a token:
         ///   sch/ch/ei/ie/ig/st -&gt; $/В&#167;/%/&amp;/#/!
-		/// </summary>
-		protected virtual void Substitute( StringBuilder buffer )
-		{
-			substCount = 0;
-			for ( int c = 0; c < buffer.Length; c++ ) 
-			{
-				// Replace the second char of a pair of the equal characters with an asterisk
+        /// </summary>
+        protected virtual void Substitute( StringBuilder buffer )
+        {
+            substCount = 0;
+            for ( int c = 0; c < buffer.Length; c++ ) 
+            {
+                // Replace the second char of a pair of the equal characters with an asterisk
                 if (c > 0 && buffer[c] == buffer[c - 1])
                 {
                     buffer[c] = '*';
@@ -212,97 +212,97 @@ namespace Lucene.Net.Analysis.De
                     substCount++;
                 }
 
-			    // Take care that at least one character is left left side from the current one
-				if ( c < buffer.Length - 1 ) 
-				{
-					// Masking several common character combinations with an token
-					if ( ( c < buffer.Length - 2 ) && buffer[c] == 's' &&
-						buffer[c + 1] == 'c' && buffer[c + 2] == 'h' )
-					{
-						buffer[c] = '$';
-						buffer.Remove(c + 1, 2);
-						substCount =+ 2;
-					}
-					else if ( buffer[c] == 'c' && buffer[c + 1] == 'h' ) 
-					{
-						buffer[c] = '§';
-						buffer.Remove(c + 1, 1);
-						substCount++;
-					}
-					else if ( buffer[c] == 'e' && buffer[c + 1] == 'i' ) 
-					{
-						buffer[c] = '%';
-						buffer.Remove(c + 1, 1);
-						substCount++;
-					}
-					else if ( buffer[c] == 'i' && buffer[c + 1] == 'e' ) 
-					{
-						buffer[c] = '&';
-						buffer.Remove(c + 1, 1);
-						substCount++;
-					}
-					else if ( buffer[c] == 'i' && buffer[c + 1] == 'g' ) 
-					{
-						buffer[c] = '#';
-						buffer.Remove(c + 1, 1);
-						substCount++;
-					}
-					else if ( buffer[c] == 's' && buffer[c + 1] == 't' ) 
-					{
-						buffer[c] = '!';
-						buffer.Remove(c + 1, 1);
-						substCount++;
-					}
-				}
-			}
-		}
+                // Take care that at least one character is left left side from the current one
+                if ( c < buffer.Length - 1 ) 
+                {
+                    // Masking several common character combinations with an token
+                    if ( ( c < buffer.Length - 2 ) && buffer[c] == 's' &&
+                        buffer[c + 1] == 'c' && buffer[c + 2] == 'h' )
+                    {
+                        buffer[c] = '$';
+                        buffer.Remove(c + 1, 2);
+                        substCount =+ 2;
+                    }
+                    else if ( buffer[c] == 'c' && buffer[c + 1] == 'h' ) 
+                    {
+                        buffer[c] = '§';
+                        buffer.Remove(c + 1, 1);
+                        substCount++;
+                    }
+                    else if ( buffer[c] == 'e' && buffer[c + 1] == 'i' ) 
+                    {
+                        buffer[c] = '%';
+                        buffer.Remove(c + 1, 1);
+                        substCount++;
+                    }
+                    else if ( buffer[c] == 'i' && buffer[c + 1] == 'e' ) 
+                    {
+                        buffer[c] = '&';
+                        buffer.Remove(c + 1, 1);
+                        substCount++;
+                    }
+                    else if ( buffer[c] == 'i' && buffer[c + 1] == 'g' ) 
+                    {
+                        buffer[c] = '#';
+                        buffer.Remove(c + 1, 1);
+                        substCount++;
+                    }
+                    else if ( buffer[c] == 's' && buffer[c + 1] == 't' ) 
+                    {
+                        buffer[c] = '!';
+                        buffer.Remove(c + 1, 1);
+                        substCount++;
+                    }
+                }
+            }
+        }
 
-	    /// <summary>
-		/// Undoes the changes made by Substitute(). That are character pairs and
-		/// character combinations. Umlauts will remain as their corresponding vowel,
-		/// as "?" remains as "ss".
-		/// </summary>
-		/// <param name="buffer"></param>
-		private void Resubstitute( StringBuilder buffer )
-		{
-			for ( int c = 0; c < buffer.Length; c++ ) 
-			{
-				if ( buffer[c] == '*' ) 
-				{
-					char x = buffer[c - 1];
-					buffer[c] = x;
-				}
-				else if ( buffer[c] == '$' ) 
-				{
-					buffer[c] = 's';
-					buffer.Insert( c + 1, new char[]{'c', 'h'}, 0, 2);
-				}
-				else if ( buffer[c] == '§' ) 
-				{
-					buffer[c] = 'c';
-					buffer.Insert( c + 1, 'h' );
-				}
-				else if ( buffer[c] == '%' ) 
-				{
-					buffer[c] = 'e';
-					buffer.Insert( c + 1, 'i' );
-				}
-				else if ( buffer[c] == '&' ) 
-				{
-					buffer[c] = 'i';
-					buffer.Insert( c + 1, 'e' );
-				}
-				else if ( buffer[c] == '#' ) 
-				{
-					buffer[c] = 'i';
-					buffer.Insert( c + 1, 'g' );
-				}
-				else if ( buffer[c] == '!' ) 
-				{
-					buffer[c] = 's';
-					buffer.Insert( c + 1, 't' );
-				}
-			}
-		}
-	}
+        /// <summary>
+        /// Undoes the changes made by Substitute(). That are character pairs and
+        /// character combinations. Umlauts will remain as their corresponding vowel,
+        /// as "?" remains as "ss".
+        /// </summary>
+        /// <param name="buffer"></param>
+        private void Resubstitute( StringBuilder buffer )
+        {
+            for ( int c = 0; c < buffer.Length; c++ ) 
+            {
+                if ( buffer[c] == '*' ) 
+                {
+                    char x = buffer[c - 1];
+                    buffer[c] = x;
+                }
+                else if ( buffer[c] == '$' ) 
+                {
+                    buffer[c] = 's';
+                    buffer.Insert( c + 1, new char[]{'c', 'h'}, 0, 2);
+                }
+                else if ( buffer[c] == '§' ) 
+                {
+                    buffer[c] = 'c';
+                    buffer.Insert( c + 1, 'h' );
+                }
+                else if ( buffer[c] == '%' ) 
+                {
+                    buffer[c] = 'e';
+                    buffer.Insert( c + 1, 'i' );
+                }
+                else if ( buffer[c] == '&' ) 
+                {
+                    buffer[c] = 'i';
+                    buffer.Insert( c + 1, 'e' );
+                }
+                else if ( buffer[c] == '#' ) 
+                {
+                    buffer[c] = 'i';
+                    buffer.Insert( c + 1, 'g' );
+                }
+                else if ( buffer[c] == '!' ) 
+                {
+                    buffer[c] = 's';
+                    buffer.Insert( c + 1, 't' );
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/El/GreekAnalyzer.cs b/src/contrib/Analyzers/El/GreekAnalyzer.cs
index 1242ec7..354bc0f 100644
--- a/src/contrib/Analyzers/El/GreekAnalyzer.cs
+++ b/src/contrib/Analyzers/El/GreekAnalyzer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Filters/ChainedFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Filters/ChainedFilter.cs b/src/contrib/Analyzers/Filters/ChainedFilter.cs
index 8bc2ffd..0fa4e69 100644
--- a/src/contrib/Analyzers/Filters/ChainedFilter.cs
+++ b/src/contrib/Analyzers/Filters/ChainedFilter.cs
@@ -1,4 +1,4 @@
-/**
+/**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Fr/ElisionFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Fr/ElisionFilter.cs b/src/contrib/Analyzers/Fr/ElisionFilter.cs
index cf2d2ae..630b29d 100644
--- a/src/contrib/Analyzers/Fr/ElisionFilter.cs
+++ b/src/contrib/Analyzers/Fr/ElisionFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs b/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
index 9bdc94f..43bd1f9 100644
--- a/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
+++ b/src/contrib/Analyzers/Fr/FrenchAnalyzer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Fr/FrenchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Fr/FrenchStemmer.cs b/src/contrib/Analyzers/Fr/FrenchStemmer.cs
index e2decb5..2dc3a1c 100644
--- a/src/contrib/Analyzers/Fr/FrenchStemmer.cs
+++ b/src/contrib/Analyzers/Fr/FrenchStemmer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -47,42 +47,42 @@ public class FrenchStemmer {
      */
      private StringBuilder tb = new StringBuilder();
 
-	/*
-	 * Region R0 is equal to the whole buffer
-	 */
-	private String R0;
-
-	/*
-	 * Region RV
-	 * "If the word begins with two vowels, RV is the region after the third letter,
-	 * otherwise the region after the first vowel not at the beginning of the word,
-	 * or the end of the word if these positions cannot be found."
-	 */
+    /*
+     * Region R0 is equal to the whole buffer
+     */
+    private String R0;
+
+    /*
+     * Region RV
+     * "If the word begins with two vowels, RV is the region after the third letter,
+     * otherwise the region after the first vowel not at the beginning of the word,
+     * or the end of the word if these positions cannot be found."
+     */
     private String RV;
 
-	/*
-	 * Region R1
-	 * "R1 is the region after the first non-vowel following a vowel
-	 * or is the null region at the end of the word if there is no such non-vowel"
-	 */
+    /*
+     * Region R1
+     * "R1 is the region after the first non-vowel following a vowel
+     * or is the null region at the end of the word if there is no such non-vowel"
+     */
     private String R1;
 
-	/*
-	 * Region R2
-	 * "R2 is the region after the first non-vowel in R1 following a vowel
-	 * or is the null region at the end of the word if there is no such non-vowel"
-	 */
+    /*
+     * Region R2
+     * "R2 is the region after the first non-vowel in R1 following a vowel
+     * or is the null region at the end of the word if there is no such non-vowel"
+     */
     private String R2;
 
 
-	/*
-	 * Set to true if we need to perform step 2
-	 */
+    /*
+     * Set to true if we need to perform step 2
+     */
     private bool suite;
 
-	/*
-	 * Set to true if the buffer was modified
-	 */
+    /*
+     * Set to true if the buffer was modified
+     */
     private bool modified;
 
 
@@ -93,599 +93,599 @@ public class FrenchStemmer {
      * @return java.lang.String  Discriminator for <tt>term</tt>
      */
     protected internal String Stem( String term ) {
-		if ( !IsStemmable( term ) ) {
-			return term;
-		}
+        if ( !IsStemmable( term ) ) {
+            return term;
+        }
+
+        // Use lowercase for medium stemming.
+        term = term.ToLower();
 
-		// Use lowercase for medium stemming.
-		term = term.ToLower();
+        // Reset the StringBuilder.
+        sb.Length =  0;
+        sb.Insert( 0, term );
 
-		// Reset the StringBuilder.
-		sb.Length =  0;
-		sb.Insert( 0, term );
+        // reset the bools
+        modified = false;
+        suite = false;
 
-		// reset the bools
-		modified = false;
-		suite = false;
+        sb = TreatVowels( sb );
 
-		sb = TreatVowels( sb );
+        SetStrings();
 
-		SetStrings();
+        Step1();
 
-		Step1();
+        if (!modified || suite)
+        {
+            if (RV != null)
+            {
+                suite = Step2A();
+                if (!suite)
+                    Step2B();
+            }
+        }
 
-		if (!modified || suite)
-		{
-			if (RV != null)
-			{
-				suite = Step2A();
-				if (!suite)
-					Step2B();
-			}
-		}
+        if (modified || suite)
+            Step3();
+        else
+            Step4();
 
-		if (modified || suite)
-			Step3();
-		else
-			Step4();
+        Step5();
 
-		Step5();
+        Step6();
 
-		Step6();
+        return sb.ToString();
+    }
 
-		return sb.ToString();
+    /*
+     * Sets the search region Strings<br>
+     * it needs to be done each time the buffer was modified
+     */
+    private void SetStrings() {
+        // set the strings
+        R0 = sb.ToString();
+        RV = RetrieveRV( sb );
+        R1 = RetrieveR( sb );
+        if ( R1 != null )
+        {
+            tb.Length =  0;
+            tb.Insert( 0, R1 );
+            R2 = RetrieveR( tb );
+        }
+        else
+            R2 = null;
     }
 
-	/*
-	 * Sets the search region Strings<br>
-	 * it needs to be done each time the buffer was modified
-	 */
-	private void SetStrings() {
-		// set the strings
-		R0 = sb.ToString();
-		RV = RetrieveRV( sb );
-		R1 = RetrieveR( sb );
-		if ( R1 != null )
-		{
-			tb.Length =  0;
-			tb.Insert( 0, R1 );
-			R2 = RetrieveR( tb );
-		}
-		else
-			R2 = null;
-	}
-
-	/*
-	 * First step of the Porter Algorithm<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step1( ) {
-		String[] suffix = { "ances", "iqUes", "ismes", "ables", "istes", "ance", "iqUe", "isme", "able", "iste" };
-		DeleteFrom( R2, suffix );
-
-		ReplaceFrom( R2, new String[] { "logies", "logie" }, "log" );
-		ReplaceFrom( R2, new String[] { "usions", "utions", "usion", "ution" }, "u" );
-		ReplaceFrom( R2, new String[] { "ences", "ence" }, "ent" );
-
-		String[] search = { "atrices", "ateurs", "ations", "atrice", "ateur", "ation"};
-		DeleteButSuffixFromElseReplace( R2, search, "ic",  true, R0, "iqU" );
-
-		DeleteButSuffixFromElseReplace( R2, new String[] { "ements", "ement" }, "eus", false, R0, "eux" );
-		DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "ativ", false );
-		DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iv", false );
-		DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "abl", false );
-		DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iqU", false );
-
-		DeleteFromIfTestVowelBeforeIn( R1, new String[] { "issements", "issement" }, false, R0 );
-		DeleteFrom( RV, new String[] { "ements", "ement" } );
+    /*
+     * First step of the Porter Algorithm<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step1( ) {
+        String[] suffix = { "ances", "iqUes", "ismes", "ables", "istes", "ance", "iqUe", "isme", "able", "iste" };
+        DeleteFrom( R2, suffix );
+
+        ReplaceFrom( R2, new String[] { "logies", "logie" }, "log" );
+        ReplaceFrom( R2, new String[] { "usions", "utions", "usion", "ution" }, "u" );
+        ReplaceFrom( R2, new String[] { "ences", "ence" }, "ent" );
+
+        String[] search = { "atrices", "ateurs", "ations", "atrice", "ateur", "ation"};
+        DeleteButSuffixFromElseReplace( R2, search, "ic",  true, R0, "iqU" );
+
+        DeleteButSuffixFromElseReplace( R2, new String[] { "ements", "ement" }, "eus", false, R0, "eux" );
+        DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "ativ", false );
+        DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iv", false );
+        DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "abl", false );
+        DeleteButSuffixFrom( R2, new String[] { "ements", "ement" }, "iqU", false );
+
+        DeleteFromIfTestVowelBeforeIn( R1, new String[] { "issements", "issement" }, false, R0 );
+        DeleteFrom( RV, new String[] { "ements", "ement" } );
 
         DeleteButSuffixFromElseReplace(R2, new [] { "it\u00e9s", "it\u00e9" }, "abil", false, R0, "abl");
         DeleteButSuffixFromElseReplace(R2, new [] { "it\u00e9s", "it\u00e9" }, "ic", false, R0, "iqU");
         DeleteButSuffixFrom(R2, new [] { "it\u00e9s", "it\u00e9" }, "iv", true);
 
-		String[] autre = { "ifs", "ives", "if", "ive" };
-		DeleteButSuffixFromElseReplace( R2, autre, "icat", false, R0, "iqU" );
-		DeleteButSuffixFromElseReplace( R2, autre, "at", true, R2, "iqU" );
-
-		ReplaceFrom( R0, new String[] { "eaux" }, "eau" );
-
-		ReplaceFrom( R1, new String[] { "aux" }, "al" );
-
-		DeleteButSuffixFromElseReplace( R2, new String[] { "euses", "euse" }, "", true, R1, "eux" );
-
-		DeleteFrom( R2, new String[] { "eux" } );
-
-		// if one of the next steps is performed, we will need to perform step2a
-		bool temp = false;
-		temp = ReplaceFrom( RV, new String[] { "amment" }, "ant" );
-		if (temp == true)
-			suite = true;
-		temp = ReplaceFrom( RV, new String[] { "emment" }, "ent" );
-		if (temp == true)
-			suite = true;
-		temp = DeleteFromIfTestVowelBeforeIn( RV, new String[] { "ments", "ment" }, true, RV );
-		if (temp == true)
-			suite = true;
-
-	}
-
-	/*
-	 * Second step (A) of the Porter Algorithm<br>
-	 * Will be performed if nothing changed from the first step
-	 * or changed were done in the amment, emment, ments or ment suffixes<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 *
-	 * @return bool - true if something changed in the StringBuilder
-	 */
-	private bool Step2A() {
+        String[] autre = { "ifs", "ives", "if", "ive" };
+        DeleteButSuffixFromElseReplace( R2, autre, "icat", false, R0, "iqU" );
+        DeleteButSuffixFromElseReplace( R2, autre, "at", true, R2, "iqU" );
+
+        ReplaceFrom( R0, new String[] { "eaux" }, "eau" );
+
+        ReplaceFrom( R1, new String[] { "aux" }, "al" );
+
+        DeleteButSuffixFromElseReplace( R2, new String[] { "euses", "euse" }, "", true, R1, "eux" );
+
+        DeleteFrom( R2, new String[] { "eux" } );
+
+        // if one of the next steps is performed, we will need to perform step2a
+        bool temp = false;
+        temp = ReplaceFrom( RV, new String[] { "amment" }, "ant" );
+        if (temp == true)
+            suite = true;
+        temp = ReplaceFrom( RV, new String[] { "emment" }, "ent" );
+        if (temp == true)
+            suite = true;
+        temp = DeleteFromIfTestVowelBeforeIn( RV, new String[] { "ments", "ment" }, true, RV );
+        if (temp == true)
+            suite = true;
+
+    }
+
+    /*
+     * Second step (A) of the Porter Algorithm<br>
+     * Will be performed if nothing changed from the first step
+     * or changed were done in the amment, emment, ments or ment suffixes<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     *
+     * @return bool - true if something changed in the StringBuilder
+     */
+    private bool Step2A() {
         String[] search = { "\u00eemes", "\u00eetes", "iraIent", "irait", "irais", "irai", "iras", "ira",
-							"irent", "iriez", "irez", "irions", "irons", "iront",
-							"issaIent", "issais", "issantes", "issante", "issants", "issant",
-							"issait", "issais", "issions", "issons", "issiez", "issez", "issent",
-							"isses", "isse", "ir", "is", "\u00eet", "it", "ies", "ie", "i" };
-		return DeleteFromIfTestVowelBeforeIn( RV, search, false, RV );
-	}
-
-	/*
-	 * Second step (B) of the Porter Algorithm<br>
-	 * Will be performed if step 2 A was performed unsuccessfully<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step2B() {
-		String[] suffix = { "eraIent", "erais", "erait", "erai", "eras", "erions", "eriez",
-							"erons", "eront","erez", "\u00e8rent", "era", "\u00e9es", "iez",
-							"\u00e9e", "\u00e9s", "er", "ez", "\u00e9" };
-		DeleteFrom( RV, suffix );
-
-		String[] search = { "assions", "assiez", "assent", "asses", "asse", "aIent",
-							"antes", "aIent", "Aient", "ante", "\u00e2mes", "\u00e2tes", "ants", "ant",
-							"ait", "a\u00eet", "ais", "Ait", "A\u00eet", "Ais", "\u00e2t", "as", "ai", "Ai", "a" };
-		DeleteButSuffixFrom( RV, search, "e", true );
-
-		DeleteFrom( R2, new String[] { "ions" } );
-	}
-
-	/*
-	 * Third step of the Porter Algorithm<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step3() {
-		if (sb.Length>0)
-		{
-			char ch = sb[ sb.Length -1];
-			if (ch == 'Y')
-			{
-				sb[sb.Length -1] = 'i' ;
-				SetStrings();
-			}
+                            "irent", "iriez", "irez", "irions", "irons", "iront",
+                            "issaIent", "issais", "issantes", "issante", "issants", "issant",
+                            "issait", "issais", "issions", "issons", "issiez", "issez", "issent",
+                            "isses", "isse", "ir", "is", "\u00eet", "it", "ies", "ie", "i" };
+        return DeleteFromIfTestVowelBeforeIn( RV, search, false, RV );
+    }
+
+    /*
+     * Second step (B) of the Porter Algorithm<br>
+     * Will be performed if step 2 A was performed unsuccessfully<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step2B() {
+        String[] suffix = { "eraIent", "erais", "erait", "erai", "eras", "erions", "eriez",
+                            "erons", "eront","erez", "\u00e8rent", "era", "\u00e9es", "iez",
+                            "\u00e9e", "\u00e9s", "er", "ez", "\u00e9" };
+        DeleteFrom( RV, suffix );
+
+        String[] search = { "assions", "assiez", "assent", "asses", "asse", "aIent",
+                            "antes", "aIent", "Aient", "ante", "\u00e2mes", "\u00e2tes", "ants", "ant",
+                            "ait", "a\u00eet", "ais", "Ait", "A\u00eet", "Ais", "\u00e2t", "as", "ai", "Ai", "a" };
+        DeleteButSuffixFrom( RV, search, "e", true );
+
+        DeleteFrom( R2, new String[] { "ions" } );
+    }
+
+    /*
+     * Third step of the Porter Algorithm<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step3() {
+        if (sb.Length>0)
+        {
+            char ch = sb[ sb.Length -1];
+            if (ch == 'Y')
+            {
+                sb[sb.Length -1] = 'i' ;
+                SetStrings();
+            }
             else if (ch == 'ç')
-			{
-				sb[sb.Length -1] = 'c';
-				SetStrings();
-			}
-		}
-	}
-
-	/*
-	 * Fourth step of the Porter Algorithm<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step4() {
-		if (sb.Length > 1)
-		{
-			char ch = sb[ sb.Length -1];
-			if (ch == 's')
-			{
-				char b = sb[ sb.Length -2];
-				if (b != 'a' && b != 'i' && b != 'o' && b != 'u' && b != 'è' && b != 's')
-				{
-					sb.Length = sb.Length - 1;
-					SetStrings();
-				}
-			}
-		}
-		bool found = DeleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "s" );
-		if (!found)
-		found = DeleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "t" );
+            {
+                sb[sb.Length -1] = 'c';
+                SetStrings();
+            }
+        }
+    }
+
+    /*
+     * Fourth step of the Porter Algorithm<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step4() {
+        if (sb.Length > 1)
+        {
+            char ch = sb[ sb.Length -1];
+            if (ch == 's')
+            {
+                char b = sb[ sb.Length -2];
+                if (b != 'a' && b != 'i' && b != 'o' && b != 'u' && b != 'è' && b != 's')
+                {
+                    sb.Length = sb.Length - 1;
+                    SetStrings();
+                }
+            }
+        }
+        bool found = DeleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "s" );
+        if (!found)
+        found = DeleteFromIfPrecededIn( R2, new String[] { "ion" }, RV, "t" );
 
         ReplaceFrom(RV, new String[] { "I\u00e8re", "i\u00e8re", "Ier", "ier" }, "i");
-		DeleteFrom( RV, new String[] { "e" } );
+        DeleteFrom( RV, new String[] { "e" } );
         DeleteFromIfPrecededIn(RV, new String[] { "\u00eb" }, R0, "gu");
-	}
-
-	/*
-	 * Fifth step of the Porter Algorithm<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step5() {
-		if (R0 != null)
-		{
-			if (R0.EndsWith("enn") || R0.EndsWith("onn") || R0.EndsWith("ett") || R0.EndsWith("ell") || R0.EndsWith("eill"))
-			{
-				sb.Length =  sb.Length - 1;
-				SetStrings();
-			}
-		}
-	}
-
-	/*
-	 * Sixth (and last!) step of the Porter Algorithm<br>
-	 * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
-	 */
-	private void Step6() {
-		if (R0!=null && R0.Length>0)
-		{
-			bool seenVowel = false;
-			bool seenConson = false;
-			int pos = -1;
-			for (int i = R0.Length-1; i > -1; i--)
-			{
-				char ch = R0[i] ;
-				if (IsVowel(ch))
-				{
-					if (!seenVowel)
-					{
-						if (ch == 'é' || ch == 'è')
-						{
-							pos = i;
-							break;
-						}
-					}
-					seenVowel = true;
-				}
-				else
-				{
-					if (seenVowel)
-						break;
-					else
-						seenConson = true;
-				}
-			}
-			if (pos > -1 && seenConson && !seenVowel)
-				sb[pos] = 'e';
-		}
-	}
-
-	/*
-	 * Delete a suffix searched in zone "source" if zone "from" contains prefix + search string
-	 *
-	 * @param source java.lang.String - the primary source zone for search
-	 * @param search java.lang.String[] - the strings to search for suppression
-	 * @param from java.lang.String - the secondary source zone for search
-	 * @param prefix java.lang.String - the prefix to add to the search string to test
-	 * @return bool - true if modified
-	 */
-	private bool DeleteFromIfPrecededIn( String source, String[] search, String from, String prefix ) {
-		bool found = false;
-		if (source!=null )
-		{
-			for (int i = 0; i < search.Length; i++) {
-				if ( source.EndsWith( search[i] ))
-				{
-					if (from!=null && from.EndsWith( prefix + search[i] ))
-					{
-						sb.Length =  sb.Length - search[i].Length;
-						found = true;
-						SetStrings();
-						break;
-					}
-				}
-			}
-		}
-		return found;
-	}
-
-	/*
-	 * Delete a suffix searched in zone "source" if the preceding letter is (or isn't) a vowel
-	 *
-	 * @param source java.lang.String - the primary source zone for search
-	 * @param search java.lang.String[] - the strings to search for suppression
-	 * @param vowel bool - true if we need a vowel before the search string
-	 * @param from java.lang.String - the secondary source zone for search (where vowel could be)
-	 * @return bool - true if modified
-	 */
-	private bool DeleteFromIfTestVowelBeforeIn( String source, String[] search, bool vowel, String from ) {
-		bool found = false;
-		if (source!=null && from!=null)
-		{
-			for (int i = 0; i < search.Length; i++) {
-				if ( source.EndsWith( search[i] ))
-				{
-					if ((search[i].Length + 1) <= from.Length)
-					{
-						bool test = IsVowel(sb[sb.Length -(search[i].Length+1)]);
-						if (test == vowel)
-						{
-							sb.Length =  sb.Length - search[i].Length;
-							modified = true;
-							found = true;
-							SetStrings();
-							break;
-						}
-					}
-				}
-			}
-		}
-		return found;
-	}
-
-	/*
-	 * Delete a suffix searched in zone "source" if preceded by the prefix
-	 *
-	 * @param source java.lang.String - the primary source zone for search
-	 * @param search java.lang.String[] - the strings to search for suppression
-	 * @param prefix java.lang.String - the prefix to add to the search string to test
-	 * @param without bool - true if it will be deleted even without prefix found
-	 */
-	private void DeleteButSuffixFrom( String source, String[] search, String prefix, bool without ) {
-		if (source!=null)
-		{
-			for (int i = 0; i < search.Length; i++) {
-				if ( source.EndsWith( prefix + search[i] ))
-				{
-					sb.Length =  sb.Length - (prefix.Length + search[i].Length);
-					modified = true;
-					SetStrings();
-					break;
-				}
-				else if ( without && source.EndsWith( search[i] ))
-				{
-					sb.Length =  sb.Length - search[i].Length;
-					modified = true;
-					SetStrings();
-					break;
-				}
-			}
-		}
-	}
-
-	/*
-	 * Delete a suffix searched in zone "source" if preceded by prefix<br>
-	 * or replace it with the replace string if preceded by the prefix in the zone "from"<br>
-	 * or delete the suffix if specified
-	 *
-	 * @param source java.lang.String - the primary source zone for search
-	 * @param search java.lang.String[] - the strings to search for suppression
-	 * @param prefix java.lang.String - the prefix to add to the search string to test
-	 * @param without bool - true if it will be deleted even without prefix found
-	 */
-	private void DeleteButSuffixFromElseReplace( String source, String[] search, String prefix, bool without, String from, String replace ) {
-		if (source!=null)
-		{
-			for (int i = 0; i < search.Length; i++) {
-				if ( source.EndsWith( prefix + search[i] ))
-				{
-					sb.Length =  sb.Length - (prefix.Length + search[i].Length);
-					modified = true;
-					SetStrings();
-					break;
-				}
-				else if ( from!=null && from.EndsWith( prefix + search[i] ))
-				{
+    }
+
+    /*
+     * Fifth step of the Porter Algorithm<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step5() {
+        if (R0 != null)
+        {
+            if (R0.EndsWith("enn") || R0.EndsWith("onn") || R0.EndsWith("ett") || R0.EndsWith("ell") || R0.EndsWith("eill"))
+            {
+                sb.Length =  sb.Length - 1;
+                SetStrings();
+            }
+        }
+    }
+
+    /*
+     * Sixth (and last!) step of the Porter Algorithm<br>
+     * refer to http://snowball.sourceforge.net/french/stemmer.html for an explanation
+     */
+    private void Step6() {
+        if (R0!=null && R0.Length>0)
+        {
+            bool seenVowel = false;
+            bool seenConson = false;
+            int pos = -1;
+            for (int i = R0.Length-1; i > -1; i--)
+            {
+                char ch = R0[i] ;
+                if (IsVowel(ch))
+                {
+                    if (!seenVowel)
+                    {
+                        if (ch == 'é' || ch == 'è')
+                        {
+                            pos = i;
+                            break;
+                        }
+                    }
+                    seenVowel = true;
+                }
+                else
+                {
+                    if (seenVowel)
+                        break;
+                    else
+                        seenConson = true;
+                }
+            }
+            if (pos > -1 && seenConson && !seenVowel)
+                sb[pos] = 'e';
+        }
+    }
+
+    /*
+     * Delete a suffix searched in zone "source" if zone "from" contains prefix + search string
+     *
+     * @param source java.lang.String - the primary source zone for search
+     * @param search java.lang.String[] - the strings to search for suppression
+     * @param from java.lang.String - the secondary source zone for search
+     * @param prefix java.lang.String - the prefix to add to the search string to test
+     * @return bool - true if modified
+     */
+    private bool DeleteFromIfPrecededIn( String source, String[] search, String from, String prefix ) {
+        bool found = false;
+        if (source!=null )
+        {
+            for (int i = 0; i < search.Length; i++) {
+                if ( source.EndsWith( search[i] ))
+                {
+                    if (from!=null && from.EndsWith( prefix + search[i] ))
+                    {
+                        sb.Length =  sb.Length - search[i].Length;
+                        found = true;
+                        SetStrings();
+                        break;
+                    }
+                }
+            }
+        }
+        return found;
+    }
+
+    /*
+     * Delete a suffix searched in zone "source" if the preceding letter is (or isn't) a vowel
+     *
+     * @param source java.lang.String - the primary source zone for search
+     * @param search java.lang.String[] - the strings to search for suppression
+     * @param vowel bool - true if we need a vowel before the search string
+     * @param from java.lang.String - the secondary source zone for search (where vowel could be)
+     * @return bool - true if modified
+     */
+    private bool DeleteFromIfTestVowelBeforeIn( String source, String[] search, bool vowel, String from ) {
+        bool found = false;
+        if (source!=null && from!=null)
+        {
+            for (int i = 0; i < search.Length; i++) {
+                if ( source.EndsWith( search[i] ))
+                {
+                    if ((search[i].Length + 1) <= from.Length)
+                    {
+                        bool test = IsVowel(sb[sb.Length -(search[i].Length+1)]);
+                        if (test == vowel)
+                        {
+                            sb.Length =  sb.Length - search[i].Length;
+                            modified = true;
+                            found = true;
+                            SetStrings();
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+        return found;
+    }
+
+    /*
+     * Delete a suffix searched in zone "source" if preceded by the prefix
+     *
+     * @param source java.lang.String - the primary source zone for search
+     * @param search java.lang.String[] - the strings to search for suppression
+     * @param prefix java.lang.String - the prefix to add to the search string to test
+     * @param without bool - true if it will be deleted even without prefix found
+     */
+    private void DeleteButSuffixFrom( String source, String[] search, String prefix, bool without ) {
+        if (source!=null)
+        {
+            for (int i = 0; i < search.Length; i++) {
+                if ( source.EndsWith( prefix + search[i] ))
+                {
+                    sb.Length =  sb.Length - (prefix.Length + search[i].Length);
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+                else if ( without && source.EndsWith( search[i] ))
+                {
+                    sb.Length =  sb.Length - search[i].Length;
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+            }
+        }
+    }
+
+    /*
+     * Delete a suffix searched in zone "source" if preceded by prefix<br>
+     * or replace it with the replace string if preceded by the prefix in the zone "from"<br>
+     * or delete the suffix if specified
+     *
+     * @param source java.lang.String - the primary source zone for search
+     * @param search java.lang.String[] - the strings to search for suppression
+     * @param prefix java.lang.String - the prefix to add to the search string to test
+     * @param without bool - true if it will be deleted even without prefix found
+     */
+    private void DeleteButSuffixFromElseReplace( String source, String[] search, String prefix, bool without, String from, String replace ) {
+        if (source!=null)
+        {
+            for (int i = 0; i < search.Length; i++) {
+                if ( source.EndsWith( prefix + search[i] ))
+                {
+                    sb.Length =  sb.Length - (prefix.Length + search[i].Length);
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+                else if ( from!=null && from.EndsWith( prefix + search[i] ))
+                {
                     // java equivalent of replace
-				    sb.Length = sb.Length - (prefix.Length + search[i].Length);
+                    sb.Length = sb.Length - (prefix.Length + search[i].Length);
                     sb.Append(replace);
 
-					modified = true;
-					SetStrings();
-					break;
-				}
-				else if ( without && source.EndsWith( search[i] ))
-				{
-					sb.Length =  sb.Length - search[i].Length;
-					modified = true;
-					SetStrings();
-					break;
-				}
-			}
-		}
-	}
-
-	/*
-	 * Replace a search string with another within the source zone
-	 *
-	 * @param source java.lang.String - the source zone for search
-	 * @param search java.lang.String[] - the strings to search for replacement
-	 * @param replace java.lang.String - the replacement string
-	 */
-	private bool ReplaceFrom( String source, String[] search, String replace ) {
-		bool found = false;
-		if (source!=null)
-		{
-			for (int i = 0; i < search.Length; i++) {
-				if ( source.EndsWith( search[i] ))
-				{
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+                else if ( without && source.EndsWith( search[i] ))
+                {
+                    sb.Length =  sb.Length - search[i].Length;
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+            }
+        }
+    }
+
+    /*
+     * Replace a search string with another within the source zone
+     *
+     * @param source java.lang.String - the source zone for search
+     * @param search java.lang.String[] - the strings to search for replacement
+     * @param replace java.lang.String - the replacement string
+     */
+    private bool ReplaceFrom( String source, String[] search, String replace ) {
+        bool found = false;
+        if (source!=null)
+        {
+            for (int i = 0; i < search.Length; i++) {
+                if ( source.EndsWith( search[i] ))
+                {
                     // java equivalent for replace
-				    sb.Length = sb.Length - search[i].Length;
+                    sb.Length = sb.Length - search[i].Length;
                     sb.Append(replace);
 
-					modified = true;
-					found = true;
-					SetStrings();
-					break;
-				}
-			}
-		}
-		return found;
-	}
-
-	/*
-	 * Delete a search string within the source zone
-	 *
-	 * @param source the source zone for search
-	 * @param suffix the strings to search for suppression
-	 */
-	private void DeleteFrom(String source, String[] suffix ) {
-		if (source!=null)
-		{
-			for (int i = 0; i < suffix.Length; i++) {
-				if (source.EndsWith( suffix[i] ))
-				{
-					sb.Length = sb.Length - suffix[i].Length;
-					modified = true;
-					SetStrings();
-					break;
-				}
-			}
-		}
-	}
-
-	/*
-	 * Test if a char is a french vowel, including accentuated ones
-	 *
-	 * @param ch the char to test
-	 * @return bool - true if the char is a vowel
-	 */
-	private bool IsVowel(char ch) {
-		switch (ch)
-		{
-			case 'a':
-			case 'e':
-			case 'i':
-			case 'o':
-			case 'u':
-			case 'y':
-			case 'â':
-			case 'à':
-			case 'ë':
-			case 'é':
-			case 'ê':
-			case 'è':
-			case 'ï':
-			case 'î':
-			case 'ô':
-			case 'ü':
-			case 'ù':
-			case 'û':
-				return true;
-			default:
-				return false;
-		}
-	}
-
-	/*
-	 * Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br>
-	 * "R is the region after the first non-vowel following a vowel
-	 * or is the null region at the end of the word if there is no such non-vowel"<br>
-	 * @param buffer java.lang.StringBuilder - the in buffer
-	 * @return java.lang.String - the resulting string
-	 */
-	private String RetrieveR( StringBuilder buffer ) {
-		int len = buffer.Length;
-		int pos = -1;
-		for (int c = 0; c < len; c++) {
-			if (IsVowel( buffer[ c ] ))
-			{
-				pos = c;
-				break;
-			}
-		}
-		if (pos > -1)
-		{
-			int consonne = -1;
-			for (int c = pos; c < len; c++) {
-				if (!IsVowel(buffer[ c ] ))
-				{
-					consonne = c;
-					break;
-				}
-			}
-			if (consonne > -1 && (consonne+1) < len)
+                    modified = true;
+                    found = true;
+                    SetStrings();
+                    break;
+                }
+            }
+        }
+        return found;
+    }
+
+    /*
+     * Delete a search string within the source zone
+     *
+     * @param source the source zone for search
+     * @param suffix the strings to search for suppression
+     */
+    private void DeleteFrom(String source, String[] suffix ) {
+        if (source!=null)
+        {
+            for (int i = 0; i < suffix.Length; i++) {
+                if (source.EndsWith( suffix[i] ))
+                {
+                    sb.Length = sb.Length - suffix[i].Length;
+                    modified = true;
+                    SetStrings();
+                    break;
+                }
+            }
+        }
+    }
+
+    /*
+     * Test if a char is a french vowel, including accentuated ones
+     *
+     * @param ch the char to test
+     * @return bool - true if the char is a vowel
+     */
+    private bool IsVowel(char ch) {
+        switch (ch)
+        {
+            case 'a':
+            case 'e':
+            case 'i':
+            case 'o':
+            case 'u':
+            case 'y':
+            case 'â':
+            case 'à':
+            case 'ë':
+            case 'é':
+            case 'ê':
+            case 'è':
+            case 'ï':
+            case 'î':
+            case 'ô':
+            case 'ü':
+            case 'ù':
+            case 'û':
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    /*
+     * Retrieve the "R zone" (1 or 2 depending on the buffer) and return the corresponding string<br>
+     * "R is the region after the first non-vowel following a vowel
+     * or is the null region at the end of the word if there is no such non-vowel"<br>
+     * @param buffer java.lang.StringBuilder - the in buffer
+     * @return java.lang.String - the resulting string
+     */
+    private String RetrieveR( StringBuilder buffer ) {
+        int len = buffer.Length;
+        int pos = -1;
+        for (int c = 0; c < len; c++) {
+            if (IsVowel( buffer[ c ] ))
+            {
+                pos = c;
+                break;
+            }
+        }
+        if (pos > -1)
+        {
+            int consonne = -1;
+            for (int c = pos; c < len; c++) {
+                if (!IsVowel(buffer[ c ] ))
+                {
+                    consonne = c;
+                    break;
+                }
+            }
+            if (consonne > -1 && (consonne+1) < len)
                 return buffer.ToString(consonne + 1, len - (consonne+1));
-			else
-				return null;
-		}
-		else
-			return null;
-	}
-
-	/*
-	 * Retrieve the "RV zone" from a buffer an return the corresponding string<br>
-	 * "If the word begins with two vowels, RV is the region after the third letter,
-	 * otherwise the region after the first vowel not at the beginning of the word,
-	 * or the end of the word if these positions cannot be found."<br>
-	 * @param buffer java.lang.StringBuilder - the in buffer
-	 * @return java.lang.String - the resulting string
-	 */
-	private String RetrieveRV( StringBuilder buffer ) {
-		int len = buffer.Length;
-		if ( buffer.Length > 3)
-		{
-			if ( IsVowel(buffer[ 0 ] ) && IsVowel(buffer[ 1 ] )) {
+            else
+                return null;
+        }
+        else
+            return null;
+    }
+
+    /*
+     * Retrieve the "RV zone" from a buffer an return the corresponding string<br>
+     * "If the word begins with two vowels, RV is the region after the third letter,
+     * otherwise the region after the first vowel not at the beginning of the word,
+     * or the end of the word if these positions cannot be found."<br>
+     * @param buffer java.lang.StringBuilder - the in buffer
+     * @return java.lang.String - the resulting string
+     */
+    private String RetrieveRV( StringBuilder buffer ) {
+        int len = buffer.Length;
+        if ( buffer.Length > 3)
+        {
+            if ( IsVowel(buffer[ 0 ] ) && IsVowel(buffer[ 1 ] )) {
                 return buffer.ToString(3, len - 3);
-			}
-			else
-			{
-				int pos = 0;
-				for (int c = 1; c < len; c++) {
-					if (IsVowel( buffer[ c ] ))
-					{
-						pos = c;
-						break;
-					}
-				}
-				if ( pos+1 < len )
+            }
+            else
+            {
+                int pos = 0;
+                for (int c = 1; c < len; c++) {
+                    if (IsVowel( buffer[ c ] ))
+                    {
+                        pos = c;
+                        break;
+                    }
+                }
+                if ( pos+1 < len )
                     return buffer.ToString(pos + 1, len - (pos+1));
-				else
-					return null;
-			}
-		}
-		else
-			return null;
-	}
+                else
+                    return null;
+            }
+        }
+        else
+            return null;
+    }
 
 
 
     /*
-	 * Turns u and i preceded AND followed by a vowel to UpperCase<br>
-	 * Turns y preceded OR followed by a vowel to UpperCase<br>
-	 * Turns u preceded by q to UpperCase<br>
+     * Turns u and i preceded AND followed by a vowel to UpperCase<br>
+     * Turns y preceded OR followed by a vowel to UpperCase<br>
+     * Turns u preceded by q to UpperCase<br>
      *
      * @param buffer java.util.StringBuilder - the buffer to treat
      * @return java.util.StringBuilder - the treated buffer
      */
     private StringBuilder TreatVowels( StringBuilder buffer ) {
-		for ( int c = 0; c < buffer.Length; c++ ) {
-			char ch = buffer[ c ] ;
-
-			if (c == 0) // first char
-			{
-				if (buffer.Length>1)
-				{
-					if (ch == 'y' && IsVowel(buffer[ c + 1 ] ))
-						buffer[c] = 'Y';
-				}
-			}
-			else if (c == buffer.Length-1) // last char
-			{
-				if (ch == 'u' && buffer[ c - 1 ] == 'q')
-					buffer[c] = 'U';
-				if (ch == 'y' && IsVowel(buffer[ c - 1 ] ))
-					buffer[c] = 'Y';
-			}
-			else // other cases
-			{
-				if (ch == 'u')
-				{
-					if (buffer[ c - 1]  == 'q')
-						buffer[c] = 'U';
-					else if (IsVowel(buffer[ c - 1 ] ) && IsVowel(buffer[ c + 1 ] ))
-						buffer[c] = 'U';
-				}
-				if (ch == 'i')
-				{
-					if (IsVowel(buffer[ c - 1 ] ) && IsVowel(buffer[ c + 1 ] ))
-						buffer[c] = 'I';
-				}
-				if (ch == 'y')
-				{
-					if (IsVowel(buffer[ c - 1 ] ) || IsVowel(buffer[ c + 1 ] ))
-						buffer[c] = 'Y';
-				}
-			}
-		}
-
-		return buffer;
+        for ( int c = 0; c < buffer.Length; c++ ) {
+            char ch = buffer[ c ] ;
+
+            if (c == 0) // first char
+            {
+                if (buffer.Length>1)
+                {
+                    if (ch == 'y' && IsVowel(buffer[ c + 1 ] ))
+                        buffer[c] = 'Y';
+                }
+            }
+            else if (c == buffer.Length-1) // last char
+            {
+                if (ch == 'u' && buffer[ c - 1 ] == 'q')
+                    buffer[c] = 'U';
+                if (ch == 'y' && IsVowel(buffer[ c - 1 ] ))
+                    buffer[c] = 'Y';
+            }
+            else // other cases
+            {
+                if (ch == 'u')
+                {
+                    if (buffer[ c - 1]  == 'q')
+                        buffer[c] = 'U';
+                    else if (IsVowel(buffer[ c - 1 ] ) && IsVowel(buffer[ c + 1 ] ))
+                        buffer[c] = 'U';
+                }
+                if (ch == 'i')
+                {
+                    if (IsVowel(buffer[ c - 1 ] ) && IsVowel(buffer[ c + 1 ] ))
+                        buffer[c] = 'I';
+                }
+                if (ch == 'y')
+                {
+                    if (IsVowel(buffer[ c - 1 ] ) || IsVowel(buffer[ c + 1 ] ))
+                        buffer[c] = 'Y';
+                }
+            }
+        }
+
+        return buffer;
     }
 
     /*
@@ -694,32 +694,32 @@ public class FrenchStemmer {
      * @return bool - true if, and only if, the given term consists in letters.
      */
     private bool IsStemmable( String term ) {
-		bool upper = false;
-		int first = -1;
-		for ( int c = 0; c < term.Length; c++ ) {
-			// Discard terms that contain non-letter chars.
-			if ( !char.IsLetter( term[c] ) ) {
-				return false;
-			}
-			// Discard terms that contain multiple uppercase letters.
-			if ( char.IsUpper( term[ c] ) ) {
-				if ( upper ) {
-					return false;
-				}
-			// First encountered uppercase letter, set flag and save
-			// position.
-				else {
-					first = c;
-					upper = true;
-				}
-			}
-		}
-		// Discard the term if it contains a single uppercase letter that
-		// is not starting the term.
-		if ( first > 0 ) {
-			return false;
-		}
-		return true;
+        bool upper = false;
+        int first = -1;
+        for ( int c = 0; c < term.Length; c++ ) {
+            // Discard terms that contain non-letter chars.
+            if ( !char.IsLetter( term[c] ) ) {
+                return false;
+            }
+            // Discard terms that contain multiple uppercase letters.
+            if ( char.IsUpper( term[ c] ) ) {
+                if ( upper ) {
+                    return false;
+                }
+            // First encountered uppercase letter, set flag and save
+            // position.
+                else {
+                    first = c;
+                    upper = true;
+                }
+            }
+        }
+        // Discard the term if it contains a single uppercase letter that
+        // is not starting the term.
+        if ( first > 0 ) {
+            return false;
+        }
+        return true;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Hunspell/HunspellStem.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Hunspell/HunspellStem.cs b/src/contrib/Analyzers/Hunspell/HunspellStem.cs
index 379c52f..5664304 100644
--- a/src/contrib/Analyzers/Hunspell/HunspellStem.cs
+++ b/src/contrib/Analyzers/Hunspell/HunspellStem.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs b/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
index 98d5a4b..bc70321 100644
--- a/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
+++ b/src/contrib/Analyzers/Miscellaneous/EmptyTokenStream.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs b/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
index e43353e..4d3c111 100644
--- a/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
+++ b/src/contrib/Analyzers/Miscellaneous/InjectablePrefixAwareTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs b/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
index 9ef8edc..0734d3c 100644
--- a/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
+++ b/src/contrib/Analyzers/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs b/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
index 45e1d19..127a503 100644
--- a/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
+++ b/src/contrib/Analyzers/Miscellaneous/PrefixAwareTokenStream.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs b/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
index b24c0f3..232e326 100644
--- a/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
+++ b/src/contrib/Analyzers/Miscellaneous/SingleTokenTokenStream.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs b/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
index b2ddd36..a4a027e 100644
--- a/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
+++ b/src/contrib/Analyzers/NGram/EdgeNGramTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs b/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
index a925d65..c174ff9 100644
--- a/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
+++ b/src/contrib/Analyzers/NGram/EdgeNGramTokenizer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/NGram/NGramTokenFilter.cs b/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
index be11de2..8bb5707 100644
--- a/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
+++ b/src/contrib/Analyzers/NGram/NGramTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/NGram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/NGram/NGramTokenizer.cs b/src/contrib/Analyzers/NGram/NGramTokenizer.cs
index 773bdb5..9616a22 100644
--- a/src/contrib/Analyzers/NGram/NGramTokenizer.cs
+++ b/src/contrib/Analyzers/NGram/NGramTokenizer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/AbstractEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/AbstractEncoder.cs b/src/contrib/Analyzers/Payloads/AbstractEncoder.cs
index 37771b6..1c9ffe8 100644
--- a/src/contrib/Analyzers/Payloads/AbstractEncoder.cs
+++ b/src/contrib/Analyzers/Payloads/AbstractEncoder.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/DelimitedPayloadTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/DelimitedPayloadTokenFilter.cs b/src/contrib/Analyzers/Payloads/DelimitedPayloadTokenFilter.cs
index e17a5f0..b514735 100644
--- a/src/contrib/Analyzers/Payloads/DelimitedPayloadTokenFilter.cs
+++ b/src/contrib/Analyzers/Payloads/DelimitedPayloadTokenFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/FloatEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/FloatEncoder.cs b/src/contrib/Analyzers/Payloads/FloatEncoder.cs
index ec5e386..ca9a8a9 100644
--- a/src/contrib/Analyzers/Payloads/FloatEncoder.cs
+++ b/src/contrib/Analyzers/Payloads/FloatEncoder.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/IdentityEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/IdentityEncoder.cs b/src/contrib/Analyzers/Payloads/IdentityEncoder.cs
index 9379db1..5a92eeb 100644
--- a/src/contrib/Analyzers/Payloads/IdentityEncoder.cs
+++ b/src/contrib/Analyzers/Payloads/IdentityEncoder.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/IntegerEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/IntegerEncoder.cs b/src/contrib/Analyzers/Payloads/IntegerEncoder.cs
index 1179955..7b16d50 100644
--- a/src/contrib/Analyzers/Payloads/IntegerEncoder.cs
+++ b/src/contrib/Analyzers/Payloads/IntegerEncoder.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/PayloadEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/PayloadEncoder.cs b/src/contrib/Analyzers/Payloads/PayloadEncoder.cs
index 5ff6637..5a8b6f6 100644
--- a/src/contrib/Analyzers/Payloads/PayloadEncoder.cs
+++ b/src/contrib/Analyzers/Payloads/PayloadEncoder.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Payloads/PayloadHelper.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Payloads/PayloadHelper.cs b/src/contrib/Analyzers/Payloads/PayloadHelper.cs
index fea6676..a3c5619 100644
--- a/src/contrib/Analyzers/Payloads/PayloadHelper.cs
+++ b/src/contrib/Analyzers/Payloads/PayloadHelper.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Properties/AssemblyInfo.cs b/src/contrib/Analyzers/Properties/AssemblyInfo.cs
index 8eb8d02..1263583 100644
--- a/src/contrib/Analyzers/Properties/AssemblyInfo.cs
+++ b/src/contrib/Analyzers/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs b/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
index 54e4755..ac358c5 100644
--- a/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
+++ b/src/contrib/Analyzers/Query/QueryAutoStopWordAnalyzer.cs
@@ -294,7 +294,7 @@ public class QueryAutoStopWordAnalyzer : Analyzer {
       }
     }
     return allStopWords.ToArray();
-	}
+    }
 
 }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Ru/RussianAnalyzer.cs b/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
index b37eade..21ad541 100644
--- a/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
+++ b/src/contrib/Analyzers/Ru/RussianAnalyzer.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs b/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs
index a5cec14..fb5f59d 100644
--- a/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs
+++ b/src/contrib/Analyzers/Shingle/Codec/OneDimensionalNonWeightedTokenSettingsCodec.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs b/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs
index 5edb112..cdd401e 100644
--- a/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs
+++ b/src/contrib/Analyzers/Shingle/Codec/SimpleThreeDimensionalTokenSettingsCodec.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs b/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs
index b25a787..47777d5 100644
--- a/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs
+++ b/src/contrib/Analyzers/Shingle/Codec/TokenSettingsCodec.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs b/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs
index 5300bef..446cf26 100644
--- a/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs
+++ b/src/contrib/Analyzers/Shingle/Codec/TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Matrix/Column.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Matrix/Column.cs b/src/contrib/Analyzers/Shingle/Matrix/Column.cs
index 1680f9a..8d44300 100644
--- a/src/contrib/Analyzers/Shingle/Matrix/Column.cs
+++ b/src/contrib/Analyzers/Shingle/Matrix/Column.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs b/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs
index f0a8ec9..0431026 100644
--- a/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs
+++ b/src/contrib/Analyzers/Shingle/Matrix/Matrix.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs b/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs
index 1bc6f32..2790236 100644
--- a/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs
+++ b/src/contrib/Analyzers/Shingle/Matrix/MatrixPermutationIterator.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/Matrix/Row.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/Matrix/Row.cs b/src/contrib/Analyzers/Shingle/Matrix/Row.cs
index 464bf11..a841f50 100644
--- a/src/contrib/Analyzers/Shingle/Matrix/Row.cs
+++ b/src/contrib/Analyzers/Shingle/Matrix/Row.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs b/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
index c057768..afa3d0b 100644
--- a/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
+++ b/src/contrib/Analyzers/Shingle/ShingleAnalyzerWrapper.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/ShingleFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/ShingleFilter.cs b/src/contrib/Analyzers/Shingle/ShingleFilter.cs
index 38c5eec..28de576 100644
--- a/src/contrib/Analyzers/Shingle/ShingleFilter.cs
+++ b/src/contrib/Analyzers/Shingle/ShingleFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs b/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs
index 0b242e5..f9130e8 100644
--- a/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs
+++ b/src/contrib/Analyzers/Shingle/ShingleMatrixFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/Shingle/TokenPositioner.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Shingle/TokenPositioner.cs b/src/contrib/Analyzers/Shingle/TokenPositioner.cs
index 8ca65c7..9146888 100644
--- a/src/contrib/Analyzers/Shingle/TokenPositioner.cs
+++ b/src/contrib/Analyzers/Shingle/TokenPositioner.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Analyzers/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/WordlistLoader.cs b/src/contrib/Analyzers/WordlistLoader.cs
index fe5d24e..16fdbae 100644
--- a/src/contrib/Analyzers/WordlistLoader.cs
+++ b/src/contrib/Analyzers/WordlistLoader.cs
@@ -71,7 +71,7 @@
 //            if ( wordfile == null ) 
 //            {
 //                return new Hashtable();
-//            }			
+//            }            
 //            StreamReader lnr = new StreamReader(wordfile.FullName);
 //            return GetWordSet(lnr);
 //        }
@@ -86,9 +86,9 @@
 //        /// <returns>A Hashtable with the reader's words</returns>
 //        public static Hashtable GetWordSet(TextReader reader)
 //        {
-//            Hashtable result = new Hashtable();			
+//            Hashtable result = new Hashtable();            
 //            try 
-//            {				
+//            {                
 //                ArrayList stopWords = new ArrayList();
 //                String word = null;
 //                while ( ( word = reader.ReadLine() ) != null ) 


[23/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs b/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
index 7e313ce..6f6fe48 100644
--- a/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IPayloadAttribute.cs
@@ -21,11 +21,11 @@ using Payload = Lucene.Net.Index.Payload;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
-	public interface IPayloadAttribute:IAttribute
-	{
-	    /// <summary> Returns this Token's payload.</summary>
-	    Payload Payload { get; set; }
-	}
+    
+    /// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
+    public interface IPayloadAttribute:IAttribute
+    {
+        /// <summary> Returns this Token's payload.</summary>
+        Payload Payload { get; set; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs b/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
index 6c2a131..2bb9af0 100644
--- a/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IPositionIncrementAttribute.cs
@@ -20,40 +20,40 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary>The positionIncrement determines the position of this token
-	/// relative to the previous Token in a TokenStream, used in phrase
-	/// searching.
-	/// 
-	/// <p/>The default value is one.
-	/// 
-	/// <p/>Some common uses for this are:<list>
-	/// 
-	/// <item>Set it to zero to put multiple terms in the same position.  This is
-	/// useful if, e.g., a word has multiple stems.  Searches for phrases
-	/// including either stem will match.  In this case, all but the first stem's
-	/// increment should be set to zero: the increment of the first instance
-	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</item>
-	/// 
-	/// <item>Set it to values greater than one to inhibit exact phrase matches.
-	/// If, for example, one does not want phrases to match across removed stop
-	/// words, then one could build a stop word filter that removes stop words and
-	/// also sets the increment to the number of stop words removed before each
-	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</item>
-	/// 
-	/// </list>
-	/// 
-	/// </summary>
-	/// <seealso cref="Lucene.Net.Index.TermPositions">
-	/// </seealso>
-	public interface IPositionIncrementAttribute:IAttribute
-	{
-	    /// <summary>Gets or sets the position increment. The default value is one.
-	    /// 
-	    /// </summary>
-	    /// <value> the distance from the prior term </value>
-	    int PositionIncrement { set; get; }
-	}
+    
+    /// <summary>The positionIncrement determines the position of this token
+    /// relative to the previous Token in a TokenStream, used in phrase
+    /// searching.
+    /// 
+    /// <p/>The default value is one.
+    /// 
+    /// <p/>Some common uses for this are:<list>
+    /// 
+    /// <item>Set it to zero to put multiple terms in the same position.  This is
+    /// useful if, e.g., a word has multiple stems.  Searches for phrases
+    /// including either stem will match.  In this case, all but the first stem's
+    /// increment should be set to zero: the increment of the first instance
+    /// should be one.  Repeating a token with an increment of zero can also be
+    /// used to boost the scores of matches on that token.</item>
+    /// 
+    /// <item>Set it to values greater than one to inhibit exact phrase matches.
+    /// If, for example, one does not want phrases to match across removed stop
+    /// words, then one could build a stop word filter that removes stop words and
+    /// also sets the increment to the number of stop words removed before each
+    /// non-stop word.  Then exact phrase queries will only match when the terms
+    /// occur with no intervening stop words.</item>
+    /// 
+    /// </list>
+    /// 
+    /// </summary>
+    /// <seealso cref="Lucene.Net.Index.TermPositions">
+    /// </seealso>
+    public interface IPositionIncrementAttribute:IAttribute
+    {
+        /// <summary>Gets or sets the position increment. The default value is one.
+        /// 
+        /// </summary>
+        /// <value> the distance from the prior term </value>
+        int PositionIncrement { set; get; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/ITermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/ITermAttribute.cs b/src/core/Analysis/Tokenattributes/ITermAttribute.cs
index 8f9b030..2e7db2a 100644
--- a/src/core/Analysis/Tokenattributes/ITermAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/ITermAttribute.cs
@@ -20,85 +20,85 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The term text of a Token.</summary>
-	public interface ITermAttribute:IAttribute
-	{
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()" />
-	    /// directly instead.  If you really need a
-	    /// String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    string Term { get; }
+    
+    /// <summary> The term text of a Token.</summary>
+    public interface ITermAttribute:IAttribute
+    {
+        /// <summary>Returns the Token's term text.
+        /// 
+        /// This method has a performance penalty
+        /// because the text is stored internally in a char[].  If
+        /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()" />
+        /// directly instead.  If you really need a
+        /// String, use this method, which is nothing more than
+        /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+        /// </summary>
+        string Term { get; }
 
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(char[] buffer, int offset, int length);
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer);
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer, int offset, int length);
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		char[] TermBuffer();
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		char[] ResizeTermBuffer(int newSize);
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		int TermLength();
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		void  SetTermLength(int length);
-	}
+        /// <summary>Copies the contents of buffer, starting at offset for
+        /// length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        void  SetTermBuffer(char[] buffer, int offset, int length);
+        
+        /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        void  SetTermBuffer(System.String buffer);
+        
+        /// <summary>Copies the contents of buffer, starting at offset and continuing
+        /// for length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        void  SetTermBuffer(System.String buffer, int offset, int length);
+        
+        /// <summary>Returns the internal termBuffer character array which
+        /// you can then directly alter.  If the array is too
+        /// small for your token, use <see cref="ResizeTermBuffer(int)" />
+        /// to increase it.  After
+        /// altering the buffer be sure to call <see cref="SetTermLength" />
+        /// to record the number of valid
+        /// characters that were placed into the termBuffer. 
+        /// </summary>
+        char[] TermBuffer();
+        
+        /// <summary>Grows the termBuffer to at least size newSize, preserving the
+        /// existing content. Note: If the next operation is to change
+        /// the contents of the term buffer use
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="SetTermBuffer(String)" />, or
+        /// <see cref="SetTermBuffer(String, int, int)" />
+        /// to optimally combine the resize with the setting of the termBuffer.
+        /// </summary>
+        /// <param name="newSize">minimum size of the new termBuffer
+        /// </param>
+        /// <returns> newly created termBuffer with length >= newSize
+        /// </returns>
+        char[] ResizeTermBuffer(int newSize);
+        
+        /// <summary>Return number of valid characters (length of the term)
+        /// in the termBuffer array. 
+        /// </summary>
+        int TermLength();
+        
+        /// <summary>Set number of valid characters (length of the term) in
+        /// the termBuffer array. Use this to truncate the termBuffer
+        /// or to synchronize with external manipulation of the termBuffer.
+        /// Note: to grow the size of the array,
+        /// use <see cref="ResizeTermBuffer(int)" /> first.
+        /// </summary>
+        /// <param name="length">the truncated length
+        /// </param>
+        void  SetTermLength(int length);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/ITypeAttribute.cs b/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
index 48bcc10..81ccc62 100644
--- a/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/ITypeAttribute.cs
@@ -20,11 +20,11 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> A Token's lexical type. The Default value is "word". </summary>
-	public interface ITypeAttribute:IAttribute
-	{
-	    /// <summary>Gets or sets this Token's lexical type.  Defaults to "word". </summary>
-	    string Type { get; set; }
-	}
+    
+    /// <summary> A Token's lexical type. The Default value is "word". </summary>
+    public interface ITypeAttribute:IAttribute
+    {
+        /// <summary>Gets or sets this Token's lexical type.  Defaults to "word". </summary>
+        string Type { get; set; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/OffsetAttribute.cs b/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
index 5149559..f329b03 100644
--- a/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/OffsetAttribute.cs
@@ -20,87 +20,87 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The start and end character offset of a Token. </summary>
-	[Serializable]
-	public class OffsetAttribute:Attribute, IOffsetAttribute, System.ICloneable
-	{
-		private int startOffset;
-		private int endOffset;
+    
+    /// <summary> The start and end character offset of a Token. </summary>
+    [Serializable]
+    public class OffsetAttribute:Attribute, IOffsetAttribute, System.ICloneable
+    {
+        private int startOffset;
+        private int endOffset;
 
-	    /// <summary>Returns this Token's starting offset, the position of the first character
-	    /// corresponding to this token in the source text.
-	    /// Note that the difference between endOffset() and startOffset() may not be
-	    /// equal to termText.length(), as the term text may have been altered by a
-	    /// stemmer or some other filter. 
-	    /// </summary>
-	    public virtual int StartOffset
-	    {
-	        get { return startOffset; }
-	    }
+        /// <summary>Returns this Token's starting offset, the position of the first character
+        /// corresponding to this token in the source text.
+        /// Note that the difference between endOffset() and startOffset() may not be
+        /// equal to termText.length(), as the term text may have been altered by a
+        /// stemmer or some other filter. 
+        /// </summary>
+        public virtual int StartOffset
+        {
+            get { return startOffset; }
+        }
 
 
-	    /// <summary>Set the starting and ending offset.
+        /// <summary>Set the starting and ending offset.
         /// See StartOffset() and EndOffset()
         /// </summary>
-		public virtual void  SetOffset(int startOffset, int endOffset)
-		{
-			this.startOffset = startOffset;
-			this.endOffset = endOffset;
-		}
+        public virtual void  SetOffset(int startOffset, int endOffset)
+        {
+            this.startOffset = startOffset;
+            this.endOffset = endOffset;
+        }
 
 
-	    /// <summary>Returns this Token's ending offset, one greater than the position of the
-	    /// last character corresponding to this token in the source text. The length
-	    /// of the token in the source text is (endOffset - startOffset). 
-	    /// </summary>
-	    public virtual int EndOffset
-	    {
-	        get { return endOffset; }
-	    }
+        /// <summary>Returns this Token's ending offset, one greater than the position of the
+        /// last character corresponding to this token in the source text. The length
+        /// of the token in the source text is (endOffset - startOffset). 
+        /// </summary>
+        public virtual int EndOffset
+        {
+            get { return endOffset; }
+        }
 
 
-	    public override void  Clear()
-		{
-			startOffset = 0;
-			endOffset = 0;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is OffsetAttribute)
-			{
-				OffsetAttribute o = (OffsetAttribute) other;
-				return o.startOffset == startOffset && o.endOffset == endOffset;
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			int code = startOffset;
-			code = code * 31 + endOffset;
-			return code;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IOffsetAttribute t = (IOffsetAttribute) target;
-			t.SetOffset(startOffset, endOffset);
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            startOffset = 0;
+            endOffset = 0;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is OffsetAttribute)
+            {
+                OffsetAttribute o = (OffsetAttribute) other;
+                return o.startOffset == startOffset && o.endOffset == endOffset;
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            int code = startOffset;
+            code = code * 31 + endOffset;
+            return code;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IOffsetAttribute t = (IOffsetAttribute) target;
+            t.SetOffset(startOffset, endOffset);
+        }
+        
+        override public System.Object Clone()
+        {
             OffsetAttribute impl = new OffsetAttribute();
             impl.endOffset = endOffset;
             impl.startOffset = startOffset;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/PayloadAttribute.cs b/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
index ae1c4d9..7bd7cbe 100644
--- a/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/PayloadAttribute.cs
@@ -21,80 +21,80 @@ using Payload = Lucene.Net.Index.Payload;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
-	[Serializable]
-	public class PayloadAttribute:Attribute, IPayloadAttribute, System.ICloneable
-	{
-		private Payload payload;
-		
-		/// <summary> Initialize this attribute with no payload.</summary>
-		public PayloadAttribute()
-		{
-		}
-		
-		/// <summary> Initialize this attribute with the given payload. </summary>
-		public PayloadAttribute(Payload payload)
-		{
-			this.payload = payload;
-		}
+    
+    /// <summary> The payload of a Token. See also <see cref="Payload" />.</summary>
+    [Serializable]
+    public class PayloadAttribute:Attribute, IPayloadAttribute, System.ICloneable
+    {
+        private Payload payload;
+        
+        /// <summary> Initialize this attribute with no payload.</summary>
+        public PayloadAttribute()
+        {
+        }
+        
+        /// <summary> Initialize this attribute with the given payload. </summary>
+        public PayloadAttribute(Payload payload)
+        {
+            this.payload = payload;
+        }
 
-	    /// <summary> Returns this Token's payload.</summary>
-	    public virtual Payload Payload
-	    {
-	        get { return this.payload; }
-	        set { this.payload = value; }
-	    }
+        /// <summary> Returns this Token's payload.</summary>
+        public virtual Payload Payload
+        {
+            get { return this.payload; }
+            set { this.payload = value; }
+        }
 
-	    public override void  Clear()
-		{
-			payload = null;
-		}
-		
-		public override System.Object Clone()
-		{
-		    var clone = (PayloadAttribute) base.Clone();
+        public override void  Clear()
+        {
+            payload = null;
+        }
+        
+        public override System.Object Clone()
+        {
+            var clone = (PayloadAttribute) base.Clone();
             if (payload != null)
             {
                 clone.payload = (Payload) payload.Clone();
             }
-		    return clone;
+            return clone;
             // TODO: This code use to be as below.  Any reason why?  the if(payload!=null) was missing...
-		    //PayloadAttributeImpl impl = new PayloadAttributeImpl();
-		    //impl.payload = new Payload(this.payload.data, this.payload.offset, this.payload.length);
-		    //return impl;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is IPayloadAttribute)
-			{
-				PayloadAttribute o = (PayloadAttribute) other;
-				if (o.payload == null || payload == null)
-				{
-					return o.payload == null && payload == null;
-				}
-				
-				return o.payload.Equals(payload);
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return (payload == null)?0:payload.GetHashCode();
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IPayloadAttribute t = (IPayloadAttribute) target;
-			t.Payload = (payload == null)?null:(Payload) payload.Clone();
-		}
-	}
+            //PayloadAttributeImpl impl = new PayloadAttributeImpl();
+            //impl.payload = new Payload(this.payload.data, this.payload.offset, this.payload.length);
+            //return impl;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is IPayloadAttribute)
+            {
+                PayloadAttribute o = (PayloadAttribute) other;
+                if (o.payload == null || payload == null)
+                {
+                    return o.payload == null && payload == null;
+                }
+                
+                return o.payload.Equals(payload);
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return (payload == null)?0:payload.GetHashCode();
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IPayloadAttribute t = (IPayloadAttribute) target;
+            t.Payload = (payload == null)?null:(Payload) payload.Clone();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs b/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
index 4f7a04f..b2293ca 100644
--- a/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/PositionIncrementAttribute.cs
@@ -21,87 +21,87 @@ using TokenStream = Lucene.Net.Analysis.TokenStream;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary>The positionIncrement determines the position of this token
-	/// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
-	/// searching.
-	/// 
-	/// <p/>The default value is one.
-	/// 
-	/// <p/>Some common uses for this are:<list>
-	/// 
-	/// <item>Set it to zero to put multiple terms in the same position.  This is
-	/// useful if, e.g., a word has multiple stems.  Searches for phrases
-	/// including either stem will match.  In this case, all but the first stem's
-	/// increment should be set to zero: the increment of the first instance
-	/// should be one.  Repeating a token with an increment of zero can also be
-	/// used to boost the scores of matches on that token.</item>
-	/// 
-	/// <item>Set it to values greater than one to inhibit exact phrase matches.
-	/// If, for example, one does not want phrases to match across removed stop
-	/// words, then one could build a stop word filter that removes stop words and
-	/// also sets the increment to the number of stop words removed before each
-	/// non-stop word.  Then exact phrase queries will only match when the terms
-	/// occur with no intervening stop words.</item>
-	/// 
-	/// </list>
-	/// </summary>
-	[Serializable]
-	public class PositionIncrementAttribute:Attribute, IPositionIncrementAttribute, System.ICloneable
-	{
-		private int positionIncrement = 1;
+    
+    /// <summary>The positionIncrement determines the position of this token
+    /// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
+    /// searching.
+    /// 
+    /// <p/>The default value is one.
+    /// 
+    /// <p/>Some common uses for this are:<list>
+    /// 
+    /// <item>Set it to zero to put multiple terms in the same position.  This is
+    /// useful if, e.g., a word has multiple stems.  Searches for phrases
+    /// including either stem will match.  In this case, all but the first stem's
+    /// increment should be set to zero: the increment of the first instance
+    /// should be one.  Repeating a token with an increment of zero can also be
+    /// used to boost the scores of matches on that token.</item>
+    /// 
+    /// <item>Set it to values greater than one to inhibit exact phrase matches.
+    /// If, for example, one does not want phrases to match across removed stop
+    /// words, then one could build a stop word filter that removes stop words and
+    /// also sets the increment to the number of stop words removed before each
+    /// non-stop word.  Then exact phrase queries will only match when the terms
+    /// occur with no intervening stop words.</item>
+    /// 
+    /// </list>
+    /// </summary>
+    [Serializable]
+    public class PositionIncrementAttribute:Attribute, IPositionIncrementAttribute, System.ICloneable
+    {
+        private int positionIncrement = 1;
 
-	    /// <summary>Set the position increment. The default value is one.
-	    /// 
-	    /// </summary>
-	    /// <value> the distance from the prior term </value>
-	    public virtual int PositionIncrement
-	    {
-	        set
-	        {
-	            if (value < 0)
-	                throw new System.ArgumentException("Increment must be zero or greater: " + value);
-	            this.positionIncrement = value;
-	        }
-	        get { return positionIncrement; }
-	    }
+        /// <summary>Set the position increment. The default value is one.
+        /// 
+        /// </summary>
+        /// <value> the distance from the prior term </value>
+        public virtual int PositionIncrement
+        {
+            set
+            {
+                if (value < 0)
+                    throw new System.ArgumentException("Increment must be zero or greater: " + value);
+                this.positionIncrement = value;
+            }
+            get { return positionIncrement; }
+        }
 
-	    public override void  Clear()
-		{
-			this.positionIncrement = 1;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is PositionIncrementAttribute)
-			{
-				return positionIncrement == ((PositionIncrementAttribute) other).positionIncrement;
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return positionIncrement;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IPositionIncrementAttribute t = (IPositionIncrementAttribute) target;
-			t.PositionIncrement = positionIncrement;
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            this.positionIncrement = 1;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is PositionIncrementAttribute)
+            {
+                return positionIncrement == ((PositionIncrementAttribute) other).positionIncrement;
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return positionIncrement;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IPositionIncrementAttribute t = (IPositionIncrementAttribute) target;
+            t.PositionIncrement = positionIncrement;
+        }
+        
+        override public System.Object Clone()
+        {
             PositionIncrementAttribute impl = new PositionIncrementAttribute();
             impl.positionIncrement = positionIncrement;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/TermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/TermAttribute.cs b/src/core/Analysis/Tokenattributes/TermAttribute.cs
index f95402c..3dad641 100644
--- a/src/core/Analysis/Tokenattributes/TermAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/TermAttribute.cs
@@ -22,247 +22,247 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The term text of a Token.</summary>
-	[Serializable]
-	public class TermAttribute:Attribute, ITermAttribute, System.ICloneable
-	{
-		private static int MIN_BUFFER_SIZE = 10;
-		
-		private char[] termBuffer;
-		private int termLength;
+    
+    /// <summary> The term text of a Token.</summary>
+    [Serializable]
+    public class TermAttribute:Attribute, ITermAttribute, System.ICloneable
+    {
+        private static int MIN_BUFFER_SIZE = 10;
+        
+        private char[] termBuffer;
+        private int termLength;
 
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and 
-	    /// <see cref="TermLength()" /> directly instead.  If you 
-	    /// really need a String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    public virtual string Term
-	    {
-	        get
-	        {
-	            InitTermBuffer();
-	            return new System.String(termBuffer, 0, termLength);
-	        }
-	    }
+        /// <summary>Returns the Token's term text.
+        /// 
+        /// This method has a performance penalty
+        /// because the text is stored internally in a char[].  If
+        /// possible, use <see cref="TermBuffer()" /> and 
+        /// <see cref="TermLength()" /> directly instead.  If you 
+        /// really need a String, use this method, which is nothing more than
+        /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+        /// </summary>
+        public virtual string Term
+        {
+            get
+            {
+                InitTermBuffer();
+                return new System.String(termBuffer, 0, termLength);
+            }
+        }
 
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
-		{
-			GrowTermBuffer(length);
-			Array.Copy(buffer, offset, termBuffer, 0, length);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer)
-		{
-			int length = buffer.Length;
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
-		{
-			System.Diagnostics.Debug.Assert(offset <= buffer.Length);
-			System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		public virtual char[] TermBuffer()
-		{
-			InitTermBuffer();
-			return termBuffer;
-		}
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		public virtual char[] ResizeTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation and preserve content
-					char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-					Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
-					termBuffer = newCharBuffer;
-				}
-			}
-			return termBuffer;
-		}
-		
-		
-		/// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
-		/// its always used in places that set the content 
-		/// </summary>
-		/// <param name="newSize">minimum size of the buffer
-		/// </param>
-		private void  GrowTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation:
-					termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-				}
-			}
-		}
-		
-		private void  InitTermBuffer()
-		{
-			if (termBuffer == null)
-			{
-				termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
-				termLength = 0;
-			}
-		}
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		public virtual int TermLength()
-		{
-			return termLength;
-		}
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		public virtual void  SetTermLength(int length)
-		{
-			InitTermBuffer();
-			if (length > termBuffer.Length)
-				throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
-			termLength = length;
-		}
-		
-		public override int GetHashCode()
-		{
-			InitTermBuffer();
-			int code = termLength;
-			code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
-			return code;
-		}
-		
-		public override void  Clear()
-		{
-			termLength = 0;
-		}
-		
-		public override System.Object Clone()
-		{
-			TermAttribute t = (TermAttribute) base.Clone();
-			// Do a deep clone
-			if (termBuffer != null)
-			{
-				t.termBuffer = new char[termBuffer.Length];
-				termBuffer.CopyTo(t.termBuffer, 0);
-			}
-			return t;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is ITermAttribute)
-			{
-				InitTermBuffer();
-				TermAttribute o = ((TermAttribute) other);
-				o.InitTermBuffer();
-				
-				if (termLength != o.termLength)
-					return false;
-				for (int i = 0; i < termLength; i++)
-				{
-					if (termBuffer[i] != o.termBuffer[i])
-					{
-						return false;
-					}
-				}
-				return true;
-			}
-			
-			return false;
-		}
-		
-		public override System.String ToString()
-		{
-			InitTermBuffer();
-			return "term=" + new System.String(termBuffer, 0, termLength);
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			InitTermBuffer();
-			ITermAttribute t = (ITermAttribute) target;
-			t.SetTermBuffer(termBuffer, 0, termLength);
-		}
-	}
+        /// <summary>Copies the contents of buffer, starting at offset for
+        /// length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
+        {
+            GrowTermBuffer(length);
+            Array.Copy(buffer, offset, termBuffer, 0, length);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        public virtual void  SetTermBuffer(System.String buffer)
+        {
+            int length = buffer.Length;
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer, starting at offset and continuing
+        /// for length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
+        {
+            System.Diagnostics.Debug.Assert(offset <= buffer.Length);
+            System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Returns the internal termBuffer character array which
+        /// you can then directly alter.  If the array is too
+        /// small for your token, use <see cref="ResizeTermBuffer(int)" />
+        /// to increase it.  After
+        /// altering the buffer be sure to call <see cref="SetTermLength" />
+        /// to record the number of valid
+        /// characters that were placed into the termBuffer. 
+        /// </summary>
+        public virtual char[] TermBuffer()
+        {
+            InitTermBuffer();
+            return termBuffer;
+        }
+        
+        /// <summary>Grows the termBuffer to at least size newSize, preserving the
+        /// existing content. Note: If the next operation is to change
+        /// the contents of the term buffer use
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="SetTermBuffer(String)" />, or
+        /// <see cref="SetTermBuffer(String, int, int)" />
+        /// to optimally combine the resize with the setting of the termBuffer.
+        /// </summary>
+        /// <param name="newSize">minimum size of the new termBuffer
+        /// </param>
+        /// <returns> newly created termBuffer with length >= newSize
+        /// </returns>
+        public virtual char[] ResizeTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
+                // The buffer is always at least MIN_BUFFER_SIZE
+                termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation and preserve content
+                    char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                    Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
+                    termBuffer = newCharBuffer;
+                }
+            }
+            return termBuffer;
+        }
+        
+        
+        /// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
+        /// its always used in places that set the content 
+        /// </summary>
+        /// <param name="newSize">minimum size of the buffer
+        /// </param>
+        private void  GrowTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
+                // The buffer is always at least MIN_BUFFER_SIZE
+                termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation:
+                    termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                }
+            }
+        }
+        
+        private void  InitTermBuffer()
+        {
+            if (termBuffer == null)
+            {
+                termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
+                termLength = 0;
+            }
+        }
+        
+        /// <summary>Return number of valid characters (length of the term)
+        /// in the termBuffer array. 
+        /// </summary>
+        public virtual int TermLength()
+        {
+            return termLength;
+        }
+        
+        /// <summary>Set number of valid characters (length of the term) in
+        /// the termBuffer array. Use this to truncate the termBuffer
+        /// or to synchronize with external manipulation of the termBuffer.
+        /// Note: to grow the size of the array,
+        /// use <see cref="ResizeTermBuffer(int)" /> first.
+        /// </summary>
+        /// <param name="length">the truncated length
+        /// </param>
+        public virtual void  SetTermLength(int length)
+        {
+            InitTermBuffer();
+            if (length > termBuffer.Length)
+                throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
+            termLength = length;
+        }
+        
+        public override int GetHashCode()
+        {
+            InitTermBuffer();
+            int code = termLength;
+            code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
+            return code;
+        }
+        
+        public override void  Clear()
+        {
+            termLength = 0;
+        }
+        
+        public override System.Object Clone()
+        {
+            TermAttribute t = (TermAttribute) base.Clone();
+            // Do a deep clone
+            if (termBuffer != null)
+            {
+                t.termBuffer = new char[termBuffer.Length];
+                termBuffer.CopyTo(t.termBuffer, 0);
+            }
+            return t;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is ITermAttribute)
+            {
+                InitTermBuffer();
+                TermAttribute o = ((TermAttribute) other);
+                o.InitTermBuffer();
+                
+                if (termLength != o.termLength)
+                    return false;
+                for (int i = 0; i < termLength; i++)
+                {
+                    if (termBuffer[i] != o.termBuffer[i])
+                    {
+                        return false;
+                    }
+                }
+                return true;
+            }
+            
+            return false;
+        }
+        
+        public override System.String ToString()
+        {
+            InitTermBuffer();
+            return "term=" + new System.String(termBuffer, 0, termLength);
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            InitTermBuffer();
+            ITermAttribute t = (ITermAttribute) target;
+            t.SetTermBuffer(termBuffer, 0, termLength);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/TypeAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/TypeAttribute.cs b/src/core/Analysis/Tokenattributes/TypeAttribute.cs
index 1da1c50..cdd5901 100644
--- a/src/core/Analysis/Tokenattributes/TypeAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/TypeAttribute.cs
@@ -20,66 +20,66 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> A Token's lexical type. The Default value is "word". </summary>
-	[Serializable]
-	public class TypeAttribute:Attribute, ITypeAttribute, System.ICloneable
-	{
-		private System.String type;
-		public const System.String DEFAULT_TYPE = "word";
-		
-		public TypeAttribute():this(DEFAULT_TYPE)
-		{
-		}
-		
-		public TypeAttribute(System.String type)
-		{
-			this.type = type;
-		}
+    
+    /// <summary> A Token's lexical type. The Default value is "word". </summary>
+    [Serializable]
+    public class TypeAttribute:Attribute, ITypeAttribute, System.ICloneable
+    {
+        private System.String type;
+        public const System.String DEFAULT_TYPE = "word";
+        
+        public TypeAttribute():this(DEFAULT_TYPE)
+        {
+        }
+        
+        public TypeAttribute(System.String type)
+        {
+            this.type = type;
+        }
 
-	    /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-	    public virtual string Type
-	    {
-	        get { return type; }
-	        set { this.type = value; }
-	    }
+        /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+        public virtual string Type
+        {
+            get { return type; }
+            set { this.type = value; }
+        }
 
-	    public override void  Clear()
-		{
-			type = DEFAULT_TYPE;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is TypeAttribute)
-			{
-				return type.Equals(((TypeAttribute) other).type);
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return type.GetHashCode();
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			ITypeAttribute t = (ITypeAttribute) target;
-			t.Type = type;
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            type = DEFAULT_TYPE;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (other == this)
+            {
+                return true;
+            }
+            
+            if (other is TypeAttribute)
+            {
+                return type.Equals(((TypeAttribute) other).type);
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return type.GetHashCode();
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            ITypeAttribute t = (ITypeAttribute) target;
+            t.Type = type;
+        }
+        
+        override public System.Object Clone()
+        {
             TypeAttribute impl = new TypeAttribute();
             impl.type = type;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenizer.cs b/src/core/Analysis/Tokenizer.cs
index 5ab741e..9860141 100644
--- a/src/core/Analysis/Tokenizer.cs
+++ b/src/core/Analysis/Tokenizer.cs
@@ -19,55 +19,55 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> A Tokenizer is a TokenStream whose input is a Reader.
-	/// <p/>
-	/// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />
-	/// <p/>
+    
+    /// <summary> A Tokenizer is a TokenStream whose input is a Reader.
+    /// <p/>
+    /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />
+    /// <p/>
     /// NOTE: Subclasses overriding <see cref="TokenStream.IncrementToken()" /> must call
-	/// <see cref="AttributeSource.ClearAttributes()" /> before setting attributes.
-	/// </summary>
-	
-	public abstract class Tokenizer:TokenStream
-	{
-		/// <summary>The text source for this Tokenizer. </summary>
-		protected internal System.IO.TextReader input;
+    /// <see cref="AttributeSource.ClearAttributes()" /> before setting attributes.
+    /// </summary>
+    
+    public abstract class Tokenizer:TokenStream
+    {
+        /// <summary>The text source for this Tokenizer. </summary>
+        protected internal System.IO.TextReader input;
 
-	    private bool isDisposed;
-		
-		/// <summary>Construct a tokenizer with null input. </summary>
-		protected internal Tokenizer()
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input. </summary>
-		protected internal Tokenizer(System.IO.TextReader input)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
-		/// <summary>Construct a tokenizer with null input using the given AttributeFactory. </summary>
-		protected internal Tokenizer(AttributeFactory factory):base(factory)
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeFactory. </summary>
-		protected internal Tokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
-		protected internal Tokenizer(AttributeSource source):base(source)
-		{
-		}
-		
-		/// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
-		protected internal Tokenizer(AttributeSource source, System.IO.TextReader input):base(source)
-		{
-			this.input = CharReader.Get(input);
-		}
-		
+        private bool isDisposed;
+        
+        /// <summary>Construct a tokenizer with null input. </summary>
+        protected internal Tokenizer()
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input. </summary>
+        protected internal Tokenizer(System.IO.TextReader input)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
+        /// <summary>Construct a tokenizer with null input using the given AttributeFactory. </summary>
+        protected internal Tokenizer(AttributeFactory factory):base(factory)
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeFactory. </summary>
+        protected internal Tokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+        protected internal Tokenizer(AttributeSource source):base(source)
+        {
+        }
+        
+        /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+        protected internal Tokenizer(AttributeSource source, System.IO.TextReader input):base(source)
+        {
+            this.input = CharReader.Get(input);
+        }
+        
         protected override void Dispose(bool disposing)
         {
             if (isDisposed) return;
@@ -86,27 +86,27 @@ namespace Lucene.Net.Analysis
             isDisposed = true;
         }
   
-		/// <summary>Return the corrected offset. If <see cref="input" /> is a <see cref="CharStream" /> subclass
-		/// this method calls <see cref="CharStream.CorrectOffset" />, else returns <c>currentOff</c>.
-		/// </summary>
-		/// <param name="currentOff">offset as seen in the output
-		/// </param>
-		/// <returns> corrected offset based on the input
-		/// </returns>
-		/// <seealso cref="CharStream.CorrectOffset">
-		/// </seealso>
-		protected internal int CorrectOffset(int currentOff)
-		{
-			return (input is CharStream)?((CharStream) input).CorrectOffset(currentOff):currentOff;
-		}
-		
-		/// <summary>Expert: Reset the tokenizer to a new reader.  Typically, an
-		/// analyzer (in its reusableTokenStream method) will use
-		/// this to re-use a previously created tokenizer. 
-		/// </summary>
-		public virtual void  Reset(System.IO.TextReader input)
-		{
-			this.input = input;
-		}
-	}
+        /// <summary>Return the corrected offset. If <see cref="input" /> is a <see cref="CharStream" /> subclass
+        /// this method calls <see cref="CharStream.CorrectOffset" />, else returns <c>currentOff</c>.
+        /// </summary>
+        /// <param name="currentOff">offset as seen in the output
+        /// </param>
+        /// <returns> corrected offset based on the input
+        /// </returns>
+        /// <seealso cref="CharStream.CorrectOffset">
+        /// </seealso>
+        protected internal int CorrectOffset(int currentOff)
+        {
+            return (input is CharStream)?((CharStream) input).CorrectOffset(currentOff):currentOff;
+        }
+        
+        /// <summary>Expert: Reset the tokenizer to a new reader.  Typically, an
+        /// analyzer (in its reusableTokenStream method) will use
+        /// this to re-use a previously created tokenizer. 
+        /// </summary>
+        public virtual void  Reset(System.IO.TextReader input)
+        {
+            this.input = input;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WhitespaceAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WhitespaceAnalyzer.cs b/src/core/Analysis/WhitespaceAnalyzer.cs
index 77dbaa3..ae94c44 100644
--- a/src/core/Analysis/WhitespaceAnalyzer.cs
+++ b/src/core/Analysis/WhitespaceAnalyzer.cs
@@ -17,27 +17,27 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>An Analyzer that uses <see cref="WhitespaceTokenizer" />. </summary>
-	
-	public sealed class WhitespaceAnalyzer:Analyzer
-	{
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			return new WhitespaceTokenizer(reader);
-		}
-		
-		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			var tokenizer = (Tokenizer) PreviousTokenStream;
-			if (tokenizer == null)
-			{
-				tokenizer = new WhitespaceTokenizer(reader);
-				PreviousTokenStream = tokenizer;
-			}
-			else
-				tokenizer.Reset(reader);
-			return tokenizer;
-		}
-	}
+    
+    /// <summary>An Analyzer that uses <see cref="WhitespaceTokenizer" />. </summary>
+    
+    public sealed class WhitespaceAnalyzer:Analyzer
+    {
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            return new WhitespaceTokenizer(reader);
+        }
+        
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            var tokenizer = (Tokenizer) PreviousTokenStream;
+            if (tokenizer == null)
+            {
+                tokenizer = new WhitespaceTokenizer(reader);
+                PreviousTokenStream = tokenizer;
+            }
+            else
+                tokenizer.Reset(reader);
+            return tokenizer;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WhitespaceTokenizer.cs b/src/core/Analysis/WhitespaceTokenizer.cs
index c96ad50..ba19da9 100644
--- a/src/core/Analysis/WhitespaceTokenizer.cs
+++ b/src/core/Analysis/WhitespaceTokenizer.cs
@@ -19,37 +19,37 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
-	/// Adjacent sequences of non-Whitespace characters form tokens. 
-	/// </summary>
-	
-	public class WhitespaceTokenizer:CharTokenizer
-	{
-		/// <summary>Construct a new WhitespaceTokenizer. </summary>
-		public WhitespaceTokenizer(System.IO.TextReader @in)
-			: base(@in)
-		{
-		}
-		
-		/// <summary>Construct a new WhitespaceTokenizer using a given <see cref="AttributeSource" />. </summary>
-		public WhitespaceTokenizer(AttributeSource source, System.IO.TextReader @in)
-			: base(source, @in)
-		{
-		}
-		
-		/// <summary>Construct a new WhitespaceTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
-		public WhitespaceTokenizer(AttributeFactory factory, System.IO.TextReader @in)
-			: base(factory, @in)
-		{
-		}
-		
-		/// <summary>Collects only characters which do not satisfy
+    
+    /// <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
+    /// Adjacent sequences of non-Whitespace characters form tokens. 
+    /// </summary>
+    
+    public class WhitespaceTokenizer:CharTokenizer
+    {
+        /// <summary>Construct a new WhitespaceTokenizer. </summary>
+        public WhitespaceTokenizer(System.IO.TextReader @in)
+            : base(@in)
+        {
+        }
+        
+        /// <summary>Construct a new WhitespaceTokenizer using a given <see cref="AttributeSource" />. </summary>
+        public WhitespaceTokenizer(AttributeSource source, System.IO.TextReader @in)
+            : base(source, @in)
+        {
+        }
+        
+        /// <summary>Construct a new WhitespaceTokenizer using a given <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />. </summary>
+        public WhitespaceTokenizer(AttributeFactory factory, System.IO.TextReader @in)
+            : base(factory, @in)
+        {
+        }
+        
+        /// <summary>Collects only characters which do not satisfy
         /// <see cref="char.IsWhiteSpace(char)" />.
-		/// </summary>
-		protected internal override bool IsTokenChar(char c)
-		{
-			return !System.Char.IsWhiteSpace(c);
-		}
-	}
+        /// </summary>
+        protected internal override bool IsTokenChar(char c)
+        {
+            return !System.Char.IsWhiteSpace(c);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/WordlistLoader.cs b/src/core/Analysis/WordlistLoader.cs
index bfd1b07..d3abfe6 100644
--- a/src/core/Analysis/WordlistLoader.cs
+++ b/src/core/Analysis/WordlistLoader.cs
@@ -19,128 +19,128 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Loader for text files that represent a list of stopwords.</summary>
-	public class WordlistLoader
-	{
-		
-		/// <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the file should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="wordfile">File containing the wordlist</param>
-		/// <returns> A HashSet with the file's words</returns>
-		public static ISet<string> GetWordSet(System.IO.FileInfo wordfile)
-		{
+    
+    /// <summary> Loader for text files that represent a list of stopwords.</summary>
+    public class WordlistLoader
+    {
+        
+        /// <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the file should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="wordfile">File containing the wordlist</param>
+        /// <returns> A HashSet with the file's words</returns>
+        public static ISet<string> GetWordSet(System.IO.FileInfo wordfile)
+        {
             using (var reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default))
             {
                 return GetWordSet(reader);
             }
-		}
-		
-		/// <summary> Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the file should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="wordfile">File containing the wordlist</param>
-		/// <param name="comment">The comment string to ignore</param>
-		/// <returns> A HashSet with the file's words</returns>
-		public static ISet<string> GetWordSet(System.IO.FileInfo wordfile, System.String comment)
-		{
+        }
+        
+        /// <summary> Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the file should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="wordfile">File containing the wordlist</param>
+        /// <param name="comment">The comment string to ignore</param>
+        /// <returns> A HashSet with the file's words</returns>
+        public static ISet<string> GetWordSet(System.IO.FileInfo wordfile, System.String comment)
+        {
             using (var reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default))
             {
                 return GetWordSet(reader, comment);
             }
-		}
-		
-		
-		/// <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the Reader should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// </summary>
-		/// <param name="reader">Reader containing the wordlist</param>
-		/// <returns>A HashSet with the reader's words</returns>
-		public static ISet<string> GetWordSet(System.IO.TextReader reader)
-		{
+        }
+        
+        
+        /// <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the Reader should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// </summary>
+        /// <param name="reader">Reader containing the wordlist</param>
+        /// <returns>A HashSet with the reader's words</returns>
+        public static ISet<string> GetWordSet(System.IO.TextReader reader)
+        {
             var result = Support.Compatibility.SetFactory.CreateHashSet<string>();
 
-			System.String word;
-			while ((word = reader.ReadLine()) != null)
-			{
-				result.Add(word.Trim());
-			}
+            System.String word;
+            while ((word = reader.ReadLine()) != null)
+            {
+                result.Add(word.Trim());
+            }
 
-			return result;
-		}
+            return result;
+        }
 
-		/// <summary> Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
-		/// leading and trailing whitespace). Every line of the Reader should contain only
-		/// one word. The words need to be in lowercase if you make use of an
-		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// 
-		/// </summary>
-		/// <param name="reader">Reader containing the wordlist
-		/// </param>
-		/// <param name="comment">The string representing a comment.
-		/// </param>
-		/// <returns> A HashSet with the reader's words
-		/// </returns>
-		public static ISet<string> GetWordSet(System.IO.TextReader reader, System.String comment)
-		{
+        /// <summary> Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
+        /// leading and trailing whitespace). Every line of the Reader should contain only
+        /// one word. The words need to be in lowercase if you make use of an
+        /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+        /// 
+        /// </summary>
+        /// <param name="reader">Reader containing the wordlist
+        /// </param>
+        /// <param name="comment">The string representing a comment.
+        /// </param>
+        /// <returns> A HashSet with the reader's words
+        /// </returns>
+        public static ISet<string> GetWordSet(System.IO.TextReader reader, System.String comment)
+        {
             var result = Support.Compatibility.SetFactory.CreateHashSet<string>();
 
             System.String word = null;
-			while ((word = reader.ReadLine()) != null)
-			{
-				if (word.StartsWith(comment) == false)
-				{
-					result.Add(word.Trim());
-				}
-			}
+            while ((word = reader.ReadLine()) != null)
+            {
+                if (word.StartsWith(comment) == false)
+                {
+                    result.Add(word.Trim());
+                }
+            }
 
-			return result;
-		}
+            return result;
+        }
 
 
 
-		/// <summary> Reads a stem dictionary. Each line contains:
-		/// <c>word<b>\t</b>stem</c>
-		/// (i.e. two tab seperated words)
-		/// 
-		/// </summary>
-		/// <returns> stem dictionary that overrules the stemming algorithm
-		/// </returns>
-		/// <throws>  IOException  </throws>
-		public static Dictionary<string, string> GetStemDict(System.IO.FileInfo wordstemfile)
-		{
-			if (wordstemfile == null)
-				throw new System.NullReferenceException("wordstemfile may not be null");
+        /// <summary> Reads a stem dictionary. Each line contains:
+        /// <c>word<b>\t</b>stem</c>
+        /// (i.e. two tab seperated words)
+        /// 
+        /// </summary>
+        /// <returns> stem dictionary that overrules the stemming algorithm
+        /// </returns>
+        /// <throws>  IOException  </throws>
+        public static Dictionary<string, string> GetStemDict(System.IO.FileInfo wordstemfile)
+        {
+            if (wordstemfile == null)
+                throw new System.NullReferenceException("wordstemfile may not be null");
             var result = new Dictionary<string, string>();
-			System.IO.StreamReader br = null;
-			System.IO.StreamReader fr = null;
-			try
-			{
-				fr = new System.IO.StreamReader(wordstemfile.FullName, System.Text.Encoding.Default);
-				br = new System.IO.StreamReader(fr.BaseStream, fr.CurrentEncoding);
-				System.String line;
+            System.IO.StreamReader br = null;
+            System.IO.StreamReader fr = null;
+            try
+            {
+                fr = new System.IO.StreamReader(wordstemfile.FullName, System.Text.Encoding.Default);
+                br = new System.IO.StreamReader(fr.BaseStream, fr.CurrentEncoding);
+                System.String line;
                 char[] tab = {'\t'};
-				while ((line = br.ReadLine()) != null)
-				{
-					System.String[] wordstem = line.Split(tab, 2);
-					result[wordstem[0]] = wordstem[1];
-				}
-			}
-			finally
-			{
-				if (fr != null)
-					fr.Close();
-				if (br != null)
-					br.Close();
-			}
-			return result;
-		}
-	}
+                while ((line = br.ReadLine()) != null)
+                {
+                    System.String[] wordstem = line.Split(tab, 2);
+                    result[wordstem[0]] = wordstem[1];
+                }
+            }
+            finally
+            {
+                if (fr != null)
+                    fr.Close();
+                if (br != null)
+                    br.Close();
+            }
+            return result;
+        }
+    }
 }
\ No newline at end of file


[36/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
index 6340627..d764f7f 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/RomanianStemmer.cs
@@ -65,248 +65,248 @@ namespace SF.Snowball.Ext
         public RomanianStemmer()
         {
             a_0 = new Among[] {
-				new Among("", -1, 3, "", null),
-				new Among("I", 0, 1, "", null),
-				new Among("U", 0, 2, "", null)
-			};
+                new Among("", -1, 3, "", null),
+                new Among("I", 0, 1, "", null),
+                new Among("U", 0, 2, "", null)
+            };
 
             a_1 = new Among[] {
-				new Among("ea", -1, 3, "", null),
-				new Among("a\u0163ia", -1, 7, "", null),
-				new Among("aua", -1, 2, "", null),
-				new Among("iua", -1, 4, "", null),
-				new Among("a\u0163ie", -1, 7, "", null),
-				new Among("ele", -1, 3, "", null),
-				new Among("ile", -1, 5, "", null),
-				new Among("iile", 6, 4, "", null),
-				new Among("iei", -1, 4, "", null),
-				new Among("atei", -1, 6, "", null),
-				new Among("ii", -1, 4, "", null),
-				new Among("ului", -1, 1, "", null),
-				new Among("ul", -1, 1, "", null),
-				new Among("elor", -1, 3, "", null),
-				new Among("ilor", -1, 4, "", null),
-				new Among("iilor", 14, 4, "", null)
-			};
+                new Among("ea", -1, 3, "", null),
+                new Among("a\u0163ia", -1, 7, "", null),
+                new Among("aua", -1, 2, "", null),
+                new Among("iua", -1, 4, "", null),
+                new Among("a\u0163ie", -1, 7, "", null),
+                new Among("ele", -1, 3, "", null),
+                new Among("ile", -1, 5, "", null),
+                new Among("iile", 6, 4, "", null),
+                new Among("iei", -1, 4, "", null),
+                new Among("atei", -1, 6, "", null),
+                new Among("ii", -1, 4, "", null),
+                new Among("ului", -1, 1, "", null),
+                new Among("ul", -1, 1, "", null),
+                new Among("elor", -1, 3, "", null),
+                new Among("ilor", -1, 4, "", null),
+                new Among("iilor", 14, 4, "", null)
+            };
 
             a_2 = new Among[] {
-				new Among("icala", -1, 4, "", null),
-				new Among("iciva", -1, 4, "", null),
-				new Among("ativa", -1, 5, "", null),
-				new Among("itiva", -1, 6, "", null),
-				new Among("icale", -1, 4, "", null),
-				new Among("a\u0163iune", -1, 5, "", null),
-				new Among("i\u0163iune", -1, 6, "", null),
-				new Among("atoare", -1, 5, "", null),
-				new Among("itoare", -1, 6, "", null),
-				new Among("\u0103toare", -1, 5, "", null),
-				new Among("icitate", -1, 4, "", null),
-				new Among("abilitate", -1, 1, "", null),
-				new Among("ibilitate", -1, 2, "", null),
-				new Among("ivitate", -1, 3, "", null),
-				new Among("icive", -1, 4, "", null),
-				new Among("ative", -1, 5, "", null),
-				new Among("itive", -1, 6, "", null),
-				new Among("icali", -1, 4, "", null),
-				new Among("atori", -1, 5, "", null),
-				new Among("icatori", 18, 4, "", null),
-				new Among("itori", -1, 6, "", null),
-				new Among("\u0103tori", -1, 5, "", null),
-				new Among("icitati", -1, 4, "", null),
-				new Among("abilitati", -1, 1, "", null),
-				new Among("ivitati", -1, 3, "", null),
-				new Among("icivi", -1, 4, "", null),
-				new Among("ativi", -1, 5, "", null),
-				new Among("itivi", -1, 6, "", null),
-				new Among("icit\u0103i", -1, 4, "", null),
-				new Among("abilit\u0103i", -1, 1, "", null),
-				new Among("ivit\u0103i", -1, 3, "", null),
-				new Among("icit\u0103\u0163i", -1, 4, "", null),
-				new Among("abilit\u0103\u0163i", -1, 1, "", null),
-				new Among("ivit\u0103\u0163i", -1, 3, "", null),
-				new Among("ical", -1, 4, "", null),
-				new Among("ator", -1, 5, "", null),
-				new Among("icator", 35, 4, "", null),
-				new Among("itor", -1, 6, "", null),
-				new Among("\u0103tor", -1, 5, "", null),
-				new Among("iciv", -1, 4, "", null),
-				new Among("ativ", -1, 5, "", null),
-				new Among("itiv", -1, 6, "", null),
-				new Among("ical\u0103", -1, 4, "", null),
-				new Among("iciv\u0103", -1, 4, "", null),
-				new Among("ativ\u0103", -1, 5, "", null),
-				new Among("itiv\u0103", -1, 6, "", null)
-			};
+                new Among("icala", -1, 4, "", null),
+                new Among("iciva", -1, 4, "", null),
+                new Among("ativa", -1, 5, "", null),
+                new Among("itiva", -1, 6, "", null),
+                new Among("icale", -1, 4, "", null),
+                new Among("a\u0163iune", -1, 5, "", null),
+                new Among("i\u0163iune", -1, 6, "", null),
+                new Among("atoare", -1, 5, "", null),
+                new Among("itoare", -1, 6, "", null),
+                new Among("\u0103toare", -1, 5, "", null),
+                new Among("icitate", -1, 4, "", null),
+                new Among("abilitate", -1, 1, "", null),
+                new Among("ibilitate", -1, 2, "", null),
+                new Among("ivitate", -1, 3, "", null),
+                new Among("icive", -1, 4, "", null),
+                new Among("ative", -1, 5, "", null),
+                new Among("itive", -1, 6, "", null),
+                new Among("icali", -1, 4, "", null),
+                new Among("atori", -1, 5, "", null),
+                new Among("icatori", 18, 4, "", null),
+                new Among("itori", -1, 6, "", null),
+                new Among("\u0103tori", -1, 5, "", null),
+                new Among("icitati", -1, 4, "", null),
+                new Among("abilitati", -1, 1, "", null),
+                new Among("ivitati", -1, 3, "", null),
+                new Among("icivi", -1, 4, "", null),
+                new Among("ativi", -1, 5, "", null),
+                new Among("itivi", -1, 6, "", null),
+                new Among("icit\u0103i", -1, 4, "", null),
+                new Among("abilit\u0103i", -1, 1, "", null),
+                new Among("ivit\u0103i", -1, 3, "", null),
+                new Among("icit\u0103\u0163i", -1, 4, "", null),
+                new Among("abilit\u0103\u0163i", -1, 1, "", null),
+                new Among("ivit\u0103\u0163i", -1, 3, "", null),
+                new Among("ical", -1, 4, "", null),
+                new Among("ator", -1, 5, "", null),
+                new Among("icator", 35, 4, "", null),
+                new Among("itor", -1, 6, "", null),
+                new Among("\u0103tor", -1, 5, "", null),
+                new Among("iciv", -1, 4, "", null),
+                new Among("ativ", -1, 5, "", null),
+                new Among("itiv", -1, 6, "", null),
+                new Among("ical\u0103", -1, 4, "", null),
+                new Among("iciv\u0103", -1, 4, "", null),
+                new Among("ativ\u0103", -1, 5, "", null),
+                new Among("itiv\u0103", -1, 6, "", null)
+            };
 
             a_3 = new Among[] {
-				new Among("ica", -1, 1, "", null),
-				new Among("abila", -1, 1, "", null),
-				new Among("ibila", -1, 1, "", null),
-				new Among("oasa", -1, 1, "", null),
-				new Among("ata", -1, 1, "", null),
-				new Among("ita", -1, 1, "", null),
-				new Among("anta", -1, 1, "", null),
-				new Among("ista", -1, 3, "", null),
-				new Among("uta", -1, 1, "", null),
-				new Among("iva", -1, 1, "", null),
-				new Among("ic", -1, 1, "", null),
-				new Among("ice", -1, 1, "", null),
-				new Among("abile", -1, 1, "", null),
-				new Among("ibile", -1, 1, "", null),
-				new Among("isme", -1, 3, "", null),
-				new Among("iune", -1, 2, "", null),
-				new Among("oase", -1, 1, "", null),
-				new Among("ate", -1, 1, "", null),
-				new Among("itate", 17, 1, "", null),
-				new Among("ite", -1, 1, "", null),
-				new Among("ante", -1, 1, "", null),
-				new Among("iste", -1, 3, "", null),
-				new Among("ute", -1, 1, "", null),
-				new Among("ive", -1, 1, "", null),
-				new Among("ici", -1, 1, "", null),
-				new Among("abili", -1, 1, "", null),
-				new Among("ibili", -1, 1, "", null),
-				new Among("iuni", -1, 2, "", null),
-				new Among("atori", -1, 1, "", null),
-				new Among("osi", -1, 1, "", null),
-				new Among("ati", -1, 1, "", null),
-				new Among("itati", 30, 1, "", null),
-				new Among("iti", -1, 1, "", null),
-				new Among("anti", -1, 1, "", null),
-				new Among("isti", -1, 3, "", null),
-				new Among("uti", -1, 1, "", null),
-				new Among("i\u015Fti", -1, 3, "", null),
-				new Among("ivi", -1, 1, "", null),
-				new Among("it\u0103i", -1, 1, "", null),
-				new Among("o\u015Fi", -1, 1, "", null),
-				new Among("it\u0103\u0163i", -1, 1, "", null),
-				new Among("abil", -1, 1, "", null),
-				new Among("ibil", -1, 1, "", null),
-				new Among("ism", -1, 3, "", null),
-				new Among("ator", -1, 1, "", null),
-				new Among("os", -1, 1, "", null),
-				new Among("at", -1, 1, "", null),
-				new Among("it", -1, 1, "", null),
-				new Among("ant", -1, 1, "", null),
-				new Among("ist", -1, 3, "", null),
-				new Among("ut", -1, 1, "", null),
-				new Among("iv", -1, 1, "", null),
-				new Among("ic\u0103", -1, 1, "", null),
-				new Among("abil\u0103", -1, 1, "", null),
-				new Among("ibil\u0103", -1, 1, "", null),
-				new Among("oas\u0103", -1, 1, "", null),
-				new Among("at\u0103", -1, 1, "", null),
-				new Among("it\u0103", -1, 1, "", null),
-				new Among("ant\u0103", -1, 1, "", null),
-				new Among("ist\u0103", -1, 3, "", null),
-				new Among("ut\u0103", -1, 1, "", null),
-				new Among("iv\u0103", -1, 1, "", null)
-			};
+                new Among("ica", -1, 1, "", null),
+                new Among("abila", -1, 1, "", null),
+                new Among("ibila", -1, 1, "", null),
+                new Among("oasa", -1, 1, "", null),
+                new Among("ata", -1, 1, "", null),
+                new Among("ita", -1, 1, "", null),
+                new Among("anta", -1, 1, "", null),
+                new Among("ista", -1, 3, "", null),
+                new Among("uta", -1, 1, "", null),
+                new Among("iva", -1, 1, "", null),
+                new Among("ic", -1, 1, "", null),
+                new Among("ice", -1, 1, "", null),
+                new Among("abile", -1, 1, "", null),
+                new Among("ibile", -1, 1, "", null),
+                new Among("isme", -1, 3, "", null),
+                new Among("iune", -1, 2, "", null),
+                new Among("oase", -1, 1, "", null),
+                new Among("ate", -1, 1, "", null),
+                new Among("itate", 17, 1, "", null),
+                new Among("ite", -1, 1, "", null),
+                new Among("ante", -1, 1, "", null),
+                new Among("iste", -1, 3, "", null),
+                new Among("ute", -1, 1, "", null),
+                new Among("ive", -1, 1, "", null),
+                new Among("ici", -1, 1, "", null),
+                new Among("abili", -1, 1, "", null),
+                new Among("ibili", -1, 1, "", null),
+                new Among("iuni", -1, 2, "", null),
+                new Among("atori", -1, 1, "", null),
+                new Among("osi", -1, 1, "", null),
+                new Among("ati", -1, 1, "", null),
+                new Among("itati", 30, 1, "", null),
+                new Among("iti", -1, 1, "", null),
+                new Among("anti", -1, 1, "", null),
+                new Among("isti", -1, 3, "", null),
+                new Among("uti", -1, 1, "", null),
+                new Among("i\u015Fti", -1, 3, "", null),
+                new Among("ivi", -1, 1, "", null),
+                new Among("it\u0103i", -1, 1, "", null),
+                new Among("o\u015Fi", -1, 1, "", null),
+                new Among("it\u0103\u0163i", -1, 1, "", null),
+                new Among("abil", -1, 1, "", null),
+                new Among("ibil", -1, 1, "", null),
+                new Among("ism", -1, 3, "", null),
+                new Among("ator", -1, 1, "", null),
+                new Among("os", -1, 1, "", null),
+                new Among("at", -1, 1, "", null),
+                new Among("it", -1, 1, "", null),
+                new Among("ant", -1, 1, "", null),
+                new Among("ist", -1, 3, "", null),
+                new Among("ut", -1, 1, "", null),
+                new Among("iv", -1, 1, "", null),
+                new Among("ic\u0103", -1, 1, "", null),
+                new Among("abil\u0103", -1, 1, "", null),
+                new Among("ibil\u0103", -1, 1, "", null),
+                new Among("oas\u0103", -1, 1, "", null),
+                new Among("at\u0103", -1, 1, "", null),
+                new Among("it\u0103", -1, 1, "", null),
+                new Among("ant\u0103", -1, 1, "", null),
+                new Among("ist\u0103", -1, 3, "", null),
+                new Among("ut\u0103", -1, 1, "", null),
+                new Among("iv\u0103", -1, 1, "", null)
+            };
 
             a_4 = new Among[] {
-				new Among("ea", -1, 1, "", null),
-				new Among("ia", -1, 1, "", null),
-				new Among("esc", -1, 1, "", null),
-				new Among("\u0103sc", -1, 1, "", null),
-				new Among("ind", -1, 1, "", null),
-				new Among("\u00E2nd", -1, 1, "", null),
-				new Among("are", -1, 1, "", null),
-				new Among("ere", -1, 1, "", null),
-				new Among("ire", -1, 1, "", null),
-				new Among("\u00E2re", -1, 1, "", null),
-				new Among("se", -1, 2, "", null),
-				new Among("ase", 10, 1, "", null),
-				new Among("sese", 10, 2, "", null),
-				new Among("ise", 10, 1, "", null),
-				new Among("use", 10, 1, "", null),
-				new Among("\u00E2se", 10, 1, "", null),
-				new Among("e\u015Fte", -1, 1, "", null),
-				new Among("\u0103\u015Fte", -1, 1, "", null),
-				new Among("eze", -1, 1, "", null),
-				new Among("ai", -1, 1, "", null),
-				new Among("eai", 19, 1, "", null),
-				new Among("iai", 19, 1, "", null),
-				new Among("sei", -1, 2, "", null),
-				new Among("e\u015Fti", -1, 1, "", null),
-				new Among("\u0103\u015Fti", -1, 1, "", null),
-				new Among("ui", -1, 1, "", null),
-				new Among("ezi", -1, 1, "", null),
-				new Among("\u00E2i", -1, 1, "", null),
-				new Among("a\u015Fi", -1, 1, "", null),
-				new Among("se\u015Fi", -1, 2, "", null),
-				new Among("ase\u015Fi", 29, 1, "", null),
-				new Among("sese\u015Fi", 29, 2, "", null),
-				new Among("ise\u015Fi", 29, 1, "", null),
-				new Among("use\u015Fi", 29, 1, "", null),
-				new Among("\u00E2se\u015Fi", 29, 1, "", null),
-				new Among("i\u015Fi", -1, 1, "", null),
-				new Among("u\u015Fi", -1, 1, "", null),
-				new Among("\u00E2\u015Fi", -1, 1, "", null),
-				new Among("a\u0163i", -1, 2, "", null),
-				new Among("ea\u0163i", 38, 1, "", null),
-				new Among("ia\u0163i", 38, 1, "", null),
-				new Among("e\u0163i", -1, 2, "", null),
-				new Among("i\u0163i", -1, 2, "", null),
-				new Among("\u00E2\u0163i", -1, 2, "", null),
-				new Among("ar\u0103\u0163i", -1, 1, "", null),
-				new Among("ser\u0103\u0163i", -1, 2, "", null),
-				new Among("aser\u0103\u0163i", 45, 1, "", null),
-				new Among("seser\u0103\u0163i", 45, 2, "", null),
-				new Among("iser\u0103\u0163i", 45, 1, "", null),
-				new Among("user\u0103\u0163i", 45, 1, "", null),
-				new Among("\u00E2ser\u0103\u0163i", 45, 1, "", null),
-				new Among("ir\u0103\u0163i", -1, 1, "", null),
-				new Among("ur\u0103\u0163i", -1, 1, "", null),
-				new Among("\u00E2r\u0103\u0163i", -1, 1, "", null),
-				new Among("am", -1, 1, "", null),
-				new Among("eam", 54, 1, "", null),
-				new Among("iam", 54, 1, "", null),
-				new Among("em", -1, 2, "", null),
-				new Among("asem", 57, 1, "", null),
-				new Among("sesem", 57, 2, "", null),
-				new Among("isem", 57, 1, "", null),
-				new Among("usem", 57, 1, "", null),
-				new Among("\u00E2sem", 57, 1, "", null),
-				new Among("im", -1, 2, "", null),
-				new Among("\u00E2m", -1, 2, "", null),
-				new Among("\u0103m", -1, 2, "", null),
-				new Among("ar\u0103m", 65, 1, "", null),
-				new Among("ser\u0103m", 65, 2, "", null),
-				new Among("aser\u0103m", 67, 1, "", null),
-				new Among("seser\u0103m", 67, 2, "", null),
-				new Among("iser\u0103m", 67, 1, "", null),
-				new Among("user\u0103m", 67, 1, "", null),
-				new Among("\u00E2ser\u0103m", 67, 1, "", null),
-				new Among("ir\u0103m", 65, 1, "", null),
-				new Among("ur\u0103m", 65, 1, "", null),
-				new Among("\u00E2r\u0103m", 65, 1, "", null),
-				new Among("au", -1, 1, "", null),
-				new Among("eau", 76, 1, "", null),
-				new Among("iau", 76, 1, "", null),
-				new Among("indu", -1, 1, "", null),
-				new Among("\u00E2ndu", -1, 1, "", null),
-				new Among("ez", -1, 1, "", null),
-				new Among("easc\u0103", -1, 1, "", null),
-				new Among("ar\u0103", -1, 1, "", null),
-				new Among("ser\u0103", -1, 2, "", null),
-				new Among("aser\u0103", 84, 1, "", null),
-				new Among("seser\u0103", 84, 2, "", null),
-				new Among("iser\u0103", 84, 1, "", null),
-				new Among("user\u0103", 84, 1, "", null),
-				new Among("\u00E2ser\u0103", 84, 1, "", null),
-				new Among("ir\u0103", -1, 1, "", null),
-				new Among("ur\u0103", -1, 1, "", null),
-				new Among("\u00E2r\u0103", -1, 1, "", null),
-				new Among("eaz\u0103", -1, 1, "", null)
-			};
+                new Among("ea", -1, 1, "", null),
+                new Among("ia", -1, 1, "", null),
+                new Among("esc", -1, 1, "", null),
+                new Among("\u0103sc", -1, 1, "", null),
+                new Among("ind", -1, 1, "", null),
+                new Among("\u00E2nd", -1, 1, "", null),
+                new Among("are", -1, 1, "", null),
+                new Among("ere", -1, 1, "", null),
+                new Among("ire", -1, 1, "", null),
+                new Among("\u00E2re", -1, 1, "", null),
+                new Among("se", -1, 2, "", null),
+                new Among("ase", 10, 1, "", null),
+                new Among("sese", 10, 2, "", null),
+                new Among("ise", 10, 1, "", null),
+                new Among("use", 10, 1, "", null),
+                new Among("\u00E2se", 10, 1, "", null),
+                new Among("e\u015Fte", -1, 1, "", null),
+                new Among("\u0103\u015Fte", -1, 1, "", null),
+                new Among("eze", -1, 1, "", null),
+                new Among("ai", -1, 1, "", null),
+                new Among("eai", 19, 1, "", null),
+                new Among("iai", 19, 1, "", null),
+                new Among("sei", -1, 2, "", null),
+                new Among("e\u015Fti", -1, 1, "", null),
+                new Among("\u0103\u015Fti", -1, 1, "", null),
+                new Among("ui", -1, 1, "", null),
+                new Among("ezi", -1, 1, "", null),
+                new Among("\u00E2i", -1, 1, "", null),
+                new Among("a\u015Fi", -1, 1, "", null),
+                new Among("se\u015Fi", -1, 2, "", null),
+                new Among("ase\u015Fi", 29, 1, "", null),
+                new Among("sese\u015Fi", 29, 2, "", null),
+                new Among("ise\u015Fi", 29, 1, "", null),
+                new Among("use\u015Fi", 29, 1, "", null),
+                new Among("\u00E2se\u015Fi", 29, 1, "", null),
+                new Among("i\u015Fi", -1, 1, "", null),
+                new Among("u\u015Fi", -1, 1, "", null),
+                new Among("\u00E2\u015Fi", -1, 1, "", null),
+                new Among("a\u0163i", -1, 2, "", null),
+                new Among("ea\u0163i", 38, 1, "", null),
+                new Among("ia\u0163i", 38, 1, "", null),
+                new Among("e\u0163i", -1, 2, "", null),
+                new Among("i\u0163i", -1, 2, "", null),
+                new Among("\u00E2\u0163i", -1, 2, "", null),
+                new Among("ar\u0103\u0163i", -1, 1, "", null),
+                new Among("ser\u0103\u0163i", -1, 2, "", null),
+                new Among("aser\u0103\u0163i", 45, 1, "", null),
+                new Among("seser\u0103\u0163i", 45, 2, "", null),
+                new Among("iser\u0103\u0163i", 45, 1, "", null),
+                new Among("user\u0103\u0163i", 45, 1, "", null),
+                new Among("\u00E2ser\u0103\u0163i", 45, 1, "", null),
+                new Among("ir\u0103\u0163i", -1, 1, "", null),
+                new Among("ur\u0103\u0163i", -1, 1, "", null),
+                new Among("\u00E2r\u0103\u0163i", -1, 1, "", null),
+                new Among("am", -1, 1, "", null),
+                new Among("eam", 54, 1, "", null),
+                new Among("iam", 54, 1, "", null),
+                new Among("em", -1, 2, "", null),
+                new Among("asem", 57, 1, "", null),
+                new Among("sesem", 57, 2, "", null),
+                new Among("isem", 57, 1, "", null),
+                new Among("usem", 57, 1, "", null),
+                new Among("\u00E2sem", 57, 1, "", null),
+                new Among("im", -1, 2, "", null),
+                new Among("\u00E2m", -1, 2, "", null),
+                new Among("\u0103m", -1, 2, "", null),
+                new Among("ar\u0103m", 65, 1, "", null),
+                new Among("ser\u0103m", 65, 2, "", null),
+                new Among("aser\u0103m", 67, 1, "", null),
+                new Among("seser\u0103m", 67, 2, "", null),
+                new Among("iser\u0103m", 67, 1, "", null),
+                new Among("user\u0103m", 67, 1, "", null),
+                new Among("\u00E2ser\u0103m", 67, 1, "", null),
+                new Among("ir\u0103m", 65, 1, "", null),
+                new Among("ur\u0103m", 65, 1, "", null),
+                new Among("\u00E2r\u0103m", 65, 1, "", null),
+                new Among("au", -1, 1, "", null),
+                new Among("eau", 76, 1, "", null),
+                new Among("iau", 76, 1, "", null),
+                new Among("indu", -1, 1, "", null),
+                new Among("\u00E2ndu", -1, 1, "", null),
+                new Among("ez", -1, 1, "", null),
+                new Among("easc\u0103", -1, 1, "", null),
+                new Among("ar\u0103", -1, 1, "", null),
+                new Among("ser\u0103", -1, 2, "", null),
+                new Among("aser\u0103", 84, 1, "", null),
+                new Among("seser\u0103", 84, 2, "", null),
+                new Among("iser\u0103", 84, 1, "", null),
+                new Among("user\u0103", 84, 1, "", null),
+                new Among("\u00E2ser\u0103", 84, 1, "", null),
+                new Among("ir\u0103", -1, 1, "", null),
+                new Among("ur\u0103", -1, 1, "", null),
+                new Among("\u00E2r\u0103", -1, 1, "", null),
+                new Among("eaz\u0103", -1, 1, "", null)
+            };
 
             a_5 = new Among[] {
-				new Among("a", -1, 1, "", null),
-				new Among("e", -1, 1, "", null),
-				new Among("ie", 1, 1, "", null),
-				new Among("i", -1, 1, "", null),
-				new Among("\u0103", -1, 1, "", null)
-			};
+                new Among("a", -1, 1, "", null),
+                new Among("e", -1, 1, "", null),
+                new Among("ie", 1, 1, "", null),
+                new Among("i", -1, 1, "", null),
+                new Among("\u0103", -1, 1, "", null)
+            };
 
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/RussianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/RussianStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/RussianStemmer.cs
index dad28aa..1322527 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/RussianStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/RussianStemmer.cs
@@ -24,752 +24,752 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
     
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class RussianStemmer : SnowballProgram
-	{
-		public RussianStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("\u00D7\u00DB\u00C9", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB\u00C9", 0, 2, "", this), new Among("\u00D9\u00D7\u00DB\u00C9", 0, 2, "", this), new Among("\u00D7", - 1, 1, "", this), new Among("\u00C9\u00D7", 3, 2, "", this), new Among("\u00D9\u00D7", 3, 2, "", this), new Among("\u00D7\u00DB\u00C9\u00D3\u00D8", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB\u00C9\u00D3\u00D8", 6, 2, "", this), new Among("\u00D9\u00D7\u00DB\u00C9\u00D3\u00D8", 6, 2, "", this)};
-			a_1 = new Among[]{new Among("\u00C0\u00C0", - 1, 1, "", this), new Among("\u00C5\u00C0", - 1, 1, "", this), new Among("\u00CF\u00C0", - 1, 1, "", this), new Among("\u00D5\u00C0", - 1, 1, "", this), new Among("\u00C5\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C5", - 1, 1, "", this), new Among("\u00CF\u00C5", - 1, 1, "", this), new Among("\u00D9\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C8", - 1, 1, "", this), new Among("\u00D9\u00C8", - 1, 1, "", this), new Among("\u00C9\u00CD\u00C9", - 1, 1, "", this), new Among("\u00D9\u00CD\u00C9", - 1, 1, "", this), new Among("\u00C5\u00CA", - 1, 1, "", this), new Among("\u00C9\u00CA", - 1, 1, "", this), new Among("\u00CF\u00CA", - 1, 1, "", this), new Among("\u00D9\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00CD", - 1, 1, "", this), new Among("\u00CF\u00CD", - 1, 1, "", this), new Among("\u00D9\u00CD", - 1, 1, "", this), new Among("\u00C5\u00C7\u00CF", - 1, 1, "", this), new Among("\u
 00CF\u00C7\u00CF", - 1, 1, "", this), new Among("\u00C1\u00D1", - 1, 1, "", this), new Among("\u00D1\u00D1", - 1, 1, "", this), new Among("\u00C5\u00CD\u00D5", - 1, 1, "", this), new Among("\u00CF\u00CD\u00D5", - 1, 1, "", this)};
-			a_2 = new Among[]{new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00CE\u00CE", - 1, 1, "", this), new Among("\u00D7\u00DB", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB", 2, 2, "", this), new Among("\u00D9\u00D7\u00DB", 2, 2, "", this), new Among("\u00DD", - 1, 1, "", this), new Among("\u00C0\u00DD", 5, 1, "", this), new Among("\u00D5\u00C0\u00DD", 6, 2, "", this)};
-			a_3 = new Among[]{new Among("\u00D3\u00D1", - 1, 1, "", this), new Among("\u00D3\u00D8", - 1, 1, "", this)};
-			a_4 = new Among[]{new Among("\u00C0", - 1, 2, "", this), new Among("\u00D5\u00C0", 0, 2, "", this), new Among("\u00CC\u00C1", - 1, 1, "", this), new Among("\u00C9\u00CC\u00C1", 2, 2, "", this), new Among("\u00D9\u00CC\u00C1", 2, 2, "", this), new Among("\u00CE\u00C1", - 1, 1, "", this), new Among("\u00C5\u00CE\u00C1", 5, 2, "", this), new Among("\u00C5\u00D4\u00C5", - 1, 1, "", this), new Among("\u00C9\u00D4\u00C5", - 1, 2, "", this), new Among("\u00CA\u00D4\u00C5", - 1, 1, "", this), new Among("\u00C5\u00CA\u00D4\u00C5", 9, 2, "", this), new Among("\u00D5\u00CA\u00D4\u00C5", 9, 2, "", this), new Among("\u00CC\u00C9", - 1, 1, "", this), new Among("\u00C9\u00CC\u00C9", 12, 2, "", this), new Among("\u00D9\u00CC\u00C9", 12, 2, "", this), new Among("\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CA", 15, 2, "", this), new Among("\u00D5\u00CA", 15, 2, "", this), new Among("\u00CC", - 1, 1, "", this), new Among("\u00C9\u00CC", 18, 2, "", this), new Among("\u00D9\u00CC", 18, 2, "", th
 is), new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00CD", - 1, 2, "", this), new Among("\u00D9\u00CD", - 1, 2, "", this), new Among("\u00CE", - 1, 1, "", this), new Among("\u00C5\u00CE", 24, 2, "", this), new Among("\u00CC\u00CF", - 1, 1, "", this), new Among("\u00C9\u00CC\u00CF", 26, 2, "", this), new Among("\u00D9\u00CC\u00CF", 26, 2, "", this), new Among("\u00CE\u00CF", - 1, 1, "", this), new Among("\u00C5\u00CE\u00CF", 29, 2, "", this), new Among("\u00CE\u00CE\u00CF", 29, 1, "", this), new Among("\u00C0\u00D4", - 1, 1, "", this), new Among("\u00D5\u00C0\u00D4", 32, 2, "", this), new Among("\u00C5\u00D4", - 1, 1, "", this), new Among("\u00D5\u00C5\u00D4", 34, 2, "", this), new Among("\u00C9\u00D4", - 1, 2, "", this), new Among("\u00D1\u00D4", - 1, 2, "", this), new Among("\u00D9\u00D4", - 1, 2, "", this), new Among("\u00D4\u00D8", - 1, 1, "", this), new Among("\u00C9\u00D4\u00D8", 39, 2, "", this), new Among("\u00D9\u00D4\u00D8", 39, 2, "", this), new Among("\u0
 0C5\u00DB\u00D8", - 1, 1, "", this), 
-				new Among("\u00C9\u00DB\u00D8", - 1, 2, "", this), new Among("\u00CE\u00D9", - 1, 1, "", this), new Among("\u00C5\u00CE\u00D9", 44, 2, "", this)};
-			a_5 = new Among[]{new Among("\u00C0", - 1, 1, "", this), new Among("\u00C9\u00C0", 0, 1, "", this), new Among("\u00D8\u00C0", 0, 1, "", this), new Among("\u00C1", - 1, 1, "", this), new Among("\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C5", 4, 1, "", this), new Among("\u00D8\u00C5", 4, 1, "", this), new Among("\u00C1\u00C8", - 1, 1, "", this), new Among("\u00D1\u00C8", - 1, 1, "", this), new Among("\u00C9\u00D1\u00C8", 8, 1, "", this), new Among("\u00C9", - 1, 1, "", this), new Among("\u00C5\u00C9", 10, 1, "", this), new Among("\u00C9\u00C9", 10, 1, "", this), new Among("\u00C1\u00CD\u00C9", 10, 1, "", this), new Among("\u00D1\u00CD\u00C9", 10, 1, "", this), new Among("\u00C9\u00D1\u00CD\u00C9", 14, 1, "", this), new Among("\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CA", 16, 1, "", this), new Among("\u00C9\u00C5\u00CA", 17, 1, "", this), new Among("\u00C9\u00CA", 16, 1, "", this), new Among("\u00CF\u00CA", 16, 1, "", this), new Among("\u00C1\u00CD", - 1, 1, "", this),
  new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00C5\u00CD", 22, 1, "", this), new Among("\u00CF\u00CD", - 1, 1, "", this), new Among("\u00D1\u00CD", - 1, 1, "", this), new Among("\u00C9\u00D1\u00CD", 25, 1, "", this), new Among("\u00CF", - 1, 1, "", this), new Among("\u00D1", - 1, 1, "", this), new Among("\u00C9\u00D1", 28, 1, "", this), new Among("\u00D8\u00D1", 28, 1, "", this), new Among("\u00D5", - 1, 1, "", this), new Among("\u00C5\u00D7", - 1, 1, "", this), new Among("\u00CF\u00D7", - 1, 1, "", this), new Among("\u00D8", - 1, 1, "", this), new Among("\u00D9", - 1, 1, "", this)};
-			a_6 = new Among[]{new Among("\u00CF\u00D3\u00D4", - 1, 1, "", this), new Among("\u00CF\u00D3\u00D4\u00D8", - 1, 1, "", this)};
-			a_7 = new Among[]{new Among("\u00C5\u00CA\u00DB\u00C5", - 1, 1, "", this), new Among("\u00CE", - 1, 2, "", this), new Among("\u00D8", - 1, 3, "", this), new Among("\u00C5\u00CA\u00DB", - 1, 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private static readonly char[] g_v = new char[]{(char) (35), (char) (130), (char) (34), (char) (18)};
-		
-		private int I_p2;
-		private int I_pV;
-		
-		protected internal virtual void  copy_from(RussianStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_pV = other.I_pV;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			// (, line 96
-			I_pV = limit;
-			I_p2 = limit;
-			// do, line 100
-			v_1 = cursor;
-			do 
-			{
-				// (, line 100
-				// gopast, line 101
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 192, 220)))
-						{
-							goto lab2_brk;
-						}
-						goto golab1_brk;
-					}
-					while (false);
+    public class RussianStemmer : SnowballProgram
+    {
+        public RussianStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("\u00D7\u00DB\u00C9", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB\u00C9", 0, 2, "", this), new Among("\u00D9\u00D7\u00DB\u00C9", 0, 2, "", this), new Among("\u00D7", - 1, 1, "", this), new Among("\u00C9\u00D7", 3, 2, "", this), new Among("\u00D9\u00D7", 3, 2, "", this), new Among("\u00D7\u00DB\u00C9\u00D3\u00D8", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB\u00C9\u00D3\u00D8", 6, 2, "", this), new Among("\u00D9\u00D7\u00DB\u00C9\u00D3\u00D8", 6, 2, "", this)};
+            a_1 = new Among[]{new Among("\u00C0\u00C0", - 1, 1, "", this), new Among("\u00C5\u00C0", - 1, 1, "", this), new Among("\u00CF\u00C0", - 1, 1, "", this), new Among("\u00D5\u00C0", - 1, 1, "", this), new Among("\u00C5\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C5", - 1, 1, "", this), new Among("\u00CF\u00C5", - 1, 1, "", this), new Among("\u00D9\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C8", - 1, 1, "", this), new Among("\u00D9\u00C8", - 1, 1, "", this), new Among("\u00C9\u00CD\u00C9", - 1, 1, "", this), new Among("\u00D9\u00CD\u00C9", - 1, 1, "", this), new Among("\u00C5\u00CA", - 1, 1, "", this), new Among("\u00C9\u00CA", - 1, 1, "", this), new Among("\u00CF\u00CA", - 1, 1, "", this), new Among("\u00D9\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00CD", - 1, 1, "", this), new Among("\u00CF\u00CD", - 1, 1, "", this), new Among("\u00D9\u00CD", - 1, 1, "", this), new Among("\u00C5\u00C7\u00CF", - 1, 1, "", this), new 
 Among("\u00CF\u00C7\u00CF", - 1, 1, "", this), new Among("\u00C1\u00D1", - 1, 1, "", this), new Among("\u00D1\u00D1", - 1, 1, "", this), new Among("\u00C5\u00CD\u00D5", - 1, 1, "", this), new Among("\u00CF\u00CD\u00D5", - 1, 1, "", this)};
+            a_2 = new Among[]{new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00CE\u00CE", - 1, 1, "", this), new Among("\u00D7\u00DB", - 1, 1, "", this), new Among("\u00C9\u00D7\u00DB", 2, 2, "", this), new Among("\u00D9\u00D7\u00DB", 2, 2, "", this), new Among("\u00DD", - 1, 1, "", this), new Among("\u00C0\u00DD", 5, 1, "", this), new Among("\u00D5\u00C0\u00DD", 6, 2, "", this)};
+            a_3 = new Among[]{new Among("\u00D3\u00D1", - 1, 1, "", this), new Among("\u00D3\u00D8", - 1, 1, "", this)};
+            a_4 = new Among[]{new Among("\u00C0", - 1, 2, "", this), new Among("\u00D5\u00C0", 0, 2, "", this), new Among("\u00CC\u00C1", - 1, 1, "", this), new Among("\u00C9\u00CC\u00C1", 2, 2, "", this), new Among("\u00D9\u00CC\u00C1", 2, 2, "", this), new Among("\u00CE\u00C1", - 1, 1, "", this), new Among("\u00C5\u00CE\u00C1", 5, 2, "", this), new Among("\u00C5\u00D4\u00C5", - 1, 1, "", this), new Among("\u00C9\u00D4\u00C5", - 1, 2, "", this), new Among("\u00CA\u00D4\u00C5", - 1, 1, "", this), new Among("\u00C5\u00CA\u00D4\u00C5", 9, 2, "", this), new Among("\u00D5\u00CA\u00D4\u00C5", 9, 2, "", this), new Among("\u00CC\u00C9", - 1, 1, "", this), new Among("\u00C9\u00CC\u00C9", 12, 2, "", this), new Among("\u00D9\u00CC\u00C9", 12, 2, "", this), new Among("\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CA", 15, 2, "", this), new Among("\u00D5\u00CA", 15, 2, "", this), new Among("\u00CC", - 1, 1, "", this), new Among("\u00C9\u00CC", 18, 2, "", this), new Among("\u00D9\u00CC", 18, 
 2, "", this), new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00CD", - 1, 2, "", this), new Among("\u00D9\u00CD", - 1, 2, "", this), new Among("\u00CE", - 1, 1, "", this), new Among("\u00C5\u00CE", 24, 2, "", this), new Among("\u00CC\u00CF", - 1, 1, "", this), new Among("\u00C9\u00CC\u00CF", 26, 2, "", this), new Among("\u00D9\u00CC\u00CF", 26, 2, "", this), new Among("\u00CE\u00CF", - 1, 1, "", this), new Among("\u00C5\u00CE\u00CF", 29, 2, "", this), new Among("\u00CE\u00CE\u00CF", 29, 1, "", this), new Among("\u00C0\u00D4", - 1, 1, "", this), new Among("\u00D5\u00C0\u00D4", 32, 2, "", this), new Among("\u00C5\u00D4", - 1, 1, "", this), new Among("\u00D5\u00C5\u00D4", 34, 2, "", this), new Among("\u00C9\u00D4", - 1, 2, "", this), new Among("\u00D1\u00D4", - 1, 2, "", this), new Among("\u00D9\u00D4", - 1, 2, "", this), new Among("\u00D4\u00D8", - 1, 1, "", this), new Among("\u00C9\u00D4\u00D8", 39, 2, "", this), new Among("\u00D9\u00D4\u00D8", 39, 2, "", this), new A
 mong("\u00C5\u00DB\u00D8", - 1, 1, "", this), 
+                new Among("\u00C9\u00DB\u00D8", - 1, 2, "", this), new Among("\u00CE\u00D9", - 1, 1, "", this), new Among("\u00C5\u00CE\u00D9", 44, 2, "", this)};
+            a_5 = new Among[]{new Among("\u00C0", - 1, 1, "", this), new Among("\u00C9\u00C0", 0, 1, "", this), new Among("\u00D8\u00C0", 0, 1, "", this), new Among("\u00C1", - 1, 1, "", this), new Among("\u00C5", - 1, 1, "", this), new Among("\u00C9\u00C5", 4, 1, "", this), new Among("\u00D8\u00C5", 4, 1, "", this), new Among("\u00C1\u00C8", - 1, 1, "", this), new Among("\u00D1\u00C8", - 1, 1, "", this), new Among("\u00C9\u00D1\u00C8", 8, 1, "", this), new Among("\u00C9", - 1, 1, "", this), new Among("\u00C5\u00C9", 10, 1, "", this), new Among("\u00C9\u00C9", 10, 1, "", this), new Among("\u00C1\u00CD\u00C9", 10, 1, "", this), new Among("\u00D1\u00CD\u00C9", 10, 1, "", this), new Among("\u00C9\u00D1\u00CD\u00C9", 14, 1, "", this), new Among("\u00CA", - 1, 1, "", this), new Among("\u00C5\u00CA", 16, 1, "", this), new Among("\u00C9\u00C5\u00CA", 17, 1, "", this), new Among("\u00C9\u00CA", 16, 1, "", this), new Among("\u00CF\u00CA", 16, 1, "", this), new Among("\u00C1\u00CD", - 1, 1, "
 ", this), new Among("\u00C5\u00CD", - 1, 1, "", this), new Among("\u00C9\u00C5\u00CD", 22, 1, "", this), new Among("\u00CF\u00CD", - 1, 1, "", this), new Among("\u00D1\u00CD", - 1, 1, "", this), new Among("\u00C9\u00D1\u00CD", 25, 1, "", this), new Among("\u00CF", - 1, 1, "", this), new Among("\u00D1", - 1, 1, "", this), new Among("\u00C9\u00D1", 28, 1, "", this), new Among("\u00D8\u00D1", 28, 1, "", this), new Among("\u00D5", - 1, 1, "", this), new Among("\u00C5\u00D7", - 1, 1, "", this), new Among("\u00CF\u00D7", - 1, 1, "", this), new Among("\u00D8", - 1, 1, "", this), new Among("\u00D9", - 1, 1, "", this)};
+            a_6 = new Among[]{new Among("\u00CF\u00D3\u00D4", - 1, 1, "", this), new Among("\u00CF\u00D3\u00D4\u00D8", - 1, 1, "", this)};
+            a_7 = new Among[]{new Among("\u00C5\u00CA\u00DB\u00C5", - 1, 1, "", this), new Among("\u00CE", - 1, 2, "", this), new Among("\u00D8", - 1, 3, "", this), new Among("\u00C5\u00CA\u00DB", - 1, 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private static readonly char[] g_v = new char[]{(char) (35), (char) (130), (char) (34), (char) (18)};
+        
+        private int I_p2;
+        private int I_pV;
+        
+        protected internal virtual void  copy_from(RussianStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_pV = other.I_pV;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            // (, line 96
+            I_pV = limit;
+            I_p2 = limit;
+            // do, line 100
+            v_1 = cursor;
+            do 
+            {
+                // (, line 100
+                // gopast, line 101
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 192, 220)))
+                        {
+                            goto lab2_brk;
+                        }
+                        goto golab1_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab1_brk: ;
-				
-				// setmark pV, line 101
-				I_pV = cursor;
-				// gopast, line 101
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 192, 220)))
-						{
-							goto lab4_brk;
-						}
-						goto golab3_brk;
-					}
-					while (false);
+                
+                // setmark pV, line 101
+                I_pV = cursor;
+                // gopast, line 101
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 192, 220)))
+                        {
+                            goto lab4_brk;
+                        }
+                        goto golab3_brk;
+                    }
+                    while (false);
 
 lab4_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab3_brk: ;
-				
-				// gopast, line 102
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 192, 220)))
-						{
-							goto lab6_brk;
-						}
-						goto golab5_brk;
-					}
-					while (false);
+                
+                // gopast, line 102
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 192, 220)))
+                        {
+                            goto lab6_brk;
+                        }
+                        goto golab5_brk;
+                    }
+                    while (false);
 
 lab6_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab5_brk: ;
-				
-				// gopast, line 102
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 192, 220)))
-						{
-							goto lab8_brk;
-						}
-						goto golab7_brk;
-					}
-					while (false);
+                
+                // gopast, line 102
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 192, 220)))
+                        {
+                            goto lab8_brk;
+                        }
+                        goto golab7_brk;
+                    }
+                    while (false);
 
 lab8_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab7_brk: ;
-				
-				// setmark p2, line 102
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 102
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_perfective_gerund()
-		{
-			int among_var;
-			int v_1;
-			// (, line 110
-			// [, line 111
-			ket = cursor;
-			// substring, line 111
-			among_var = find_among_b(a_0, 9);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 111
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 115
-					// or, line 115
+            
+            cursor = v_1;
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_perfective_gerund()
+        {
+            int among_var;
+            int v_1;
+            // (, line 110
+            // [, line 111
+            ket = cursor;
+            // substring, line 111
+            among_var = find_among_b(a_0, 9);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 111
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 115
+                    // or, line 115
 lab1: 
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// literal, line 115
-							if (!(eq_s_b(1, "\u00C1")))
-							{
-								goto lab1_brk;
-							}
-							goto lab1_brk;
-						}
-						while (false);
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 115
+                            if (!(eq_s_b(1, "\u00C1")))
+                            {
+                                goto lab1_brk;
+                            }
+                            goto lab1_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						cursor = limit - v_1;
-						// literal, line 115
-						if (!(eq_s_b(1, "\u00D1")))
-						{
-							return false;
-						}
-					}
-					while (false);
-					// delete, line 115
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 122
-					// delete, line 122
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_adjective()
-		{
-			int among_var;
-			// (, line 126
-			// [, line 127
-			ket = cursor;
-			// substring, line 127
-			among_var = find_among_b(a_1, 26);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 127
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 136
-					// delete, line 136
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_adjectival()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 140
-			// call adjective, line 141
-			if (!r_adjective())
-			{
-				return false;
-			}
-			// try, line 148
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 148
-				// [, line 149
-				ket = cursor;
-				// substring, line 149
-				among_var = find_among_b(a_2, 8);
-				if (among_var == 0)
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// ], line 149
-				bra = cursor;
-				switch (among_var)
-				{
-					
-					case 0: 
-						cursor = limit - v_1;
-						goto lab0_brk;
-					
-					case 1: 
-						// (, line 154
-						// or, line 154
-						do 
-						{
-							v_2 = limit - cursor;
-							do 
-							{
-								// literal, line 154
-								if (!(eq_s_b(1, "\u00C1")))
-								{
-									goto lab2_brk;
-								}
-								goto lab1_brk;
-							}
-							while (false);
+                        
+                        cursor = limit - v_1;
+                        // literal, line 115
+                        if (!(eq_s_b(1, "\u00D1")))
+                        {
+                            return false;
+                        }
+                    }
+                    while (false);
+                    // delete, line 115
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 122
+                    // delete, line 122
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_adjective()
+        {
+            int among_var;
+            // (, line 126
+            // [, line 127
+            ket = cursor;
+            // substring, line 127
+            among_var = find_among_b(a_1, 26);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 127
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 136
+                    // delete, line 136
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_adjectival()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 140
+            // call adjective, line 141
+            if (!r_adjective())
+            {
+                return false;
+            }
+            // try, line 148
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 148
+                // [, line 149
+                ket = cursor;
+                // substring, line 149
+                among_var = find_among_b(a_2, 8);
+                if (among_var == 0)
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // ], line 149
+                bra = cursor;
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        cursor = limit - v_1;
+                        goto lab0_brk;
+                    
+                    case 1: 
+                        // (, line 154
+                        // or, line 154
+                        do 
+                        {
+                            v_2 = limit - cursor;
+                            do 
+                            {
+                                // literal, line 154
+                                if (!(eq_s_b(1, "\u00C1")))
+                                {
+                                    goto lab2_brk;
+                                }
+                                goto lab1_brk;
+                            }
+                            while (false);
 
 lab2_brk: ;
-							
-							cursor = limit - v_2;
-							// literal, line 154
-							if (!(eq_s_b(1, "\u00D1")))
-							{
-								cursor = limit - v_1;
-								goto lab0_brk;
-							}
-						}
-						while (false);
+                            
+                            cursor = limit - v_2;
+                            // literal, line 154
+                            if (!(eq_s_b(1, "\u00D1")))
+                            {
+                                cursor = limit - v_1;
+                                goto lab0_brk;
+                            }
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						// delete, line 154
-						slice_del();
-						break;
-					
-					case 2: 
-						// (, line 161
-						// delete, line 161
-						slice_del();
-						break;
-					}
-			}
-			while (false);
+                        
+                        // delete, line 154
+                        slice_del();
+                        break;
+                    
+                    case 2: 
+                        // (, line 161
+                        // delete, line 161
+                        slice_del();
+                        break;
+                    }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_reflexive()
-		{
-			int among_var;
-			// (, line 167
-			// [, line 168
-			ket = cursor;
-			// substring, line 168
-			among_var = find_among_b(a_3, 2);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 168
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 171
-					// delete, line 171
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_verb()
-		{
-			int among_var;
-			int v_1;
-			// (, line 175
-			// [, line 176
-			ket = cursor;
-			// substring, line 176
-			among_var = find_among_b(a_4, 46);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 176
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 182
-					// or, line 182
+            
+            return true;
+        }
+        
+        private bool r_reflexive()
+        {
+            int among_var;
+            // (, line 167
+            // [, line 168
+            ket = cursor;
+            // substring, line 168
+            among_var = find_among_b(a_3, 2);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 168
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 171
+                    // delete, line 171
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_verb()
+        {
+            int among_var;
+            int v_1;
+            // (, line 175
+            // [, line 176
+            ket = cursor;
+            // substring, line 176
+            among_var = find_among_b(a_4, 46);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 176
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 182
+                    // or, line 182
 lab3: 
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// literal, line 182
-							if (!(eq_s_b(1, "\u00C1")))
-							{
-								goto lab3_brk;
-							}
-							goto lab3_brk;
-						}
-						while (false);
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 182
+                            if (!(eq_s_b(1, "\u00C1")))
+                            {
+                                goto lab3_brk;
+                            }
+                            goto lab3_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = limit - v_1;
-						// literal, line 182
-						if (!(eq_s_b(1, "\u00D1")))
-						{
-							return false;
-						}
-					}
-					while (false);
-					// delete, line 182
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 190
-					// delete, line 190
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_noun()
-		{
-			int among_var;
-			// (, line 198
-			// [, line 199
-			ket = cursor;
-			// substring, line 199
-			among_var = find_among_b(a_5, 36);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 199
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 206
-					// delete, line 206
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_derivational()
-		{
-			int among_var;
-			// (, line 214
-			// [, line 215
-			ket = cursor;
-			// substring, line 215
-			among_var = find_among_b(a_6, 2);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 215
-			bra = cursor;
-			// call R2, line 215
-			if (!r_R2())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 218
-					// delete, line 218
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_tidy_up()
-		{
-			int among_var;
-			// (, line 222
-			// [, line 223
-			ket = cursor;
-			// substring, line 223
-			among_var = find_among_b(a_7, 4);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 223
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 227
-					// delete, line 227
-					slice_del();
-					// [, line 228
-					ket = cursor;
-					// literal, line 228
-					if (!(eq_s_b(1, "\u00CE")))
-					{
-						return false;
-					}
-					// ], line 228
-					bra = cursor;
-					// literal, line 228
-					if (!(eq_s_b(1, "\u00CE")))
-					{
-						return false;
-					}
-					// delete, line 228
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 231
-					// literal, line 231
-					if (!(eq_s_b(1, "\u00CE")))
-					{
-						return false;
-					}
-					// delete, line 231
-					slice_del();
-					break;
-				
-				case 3: 
-					// (, line 233
-					// delete, line 233
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			int v_10;
-			// (, line 238
-			// do, line 240
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 240
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+                        
+                        cursor = limit - v_1;
+                        // literal, line 182
+                        if (!(eq_s_b(1, "\u00D1")))
+                        {
+                            return false;
+                        }
+                    }
+                    while (false);
+                    // delete, line 182
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 190
+                    // delete, line 190
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_noun()
+        {
+            int among_var;
+            // (, line 198
+            // [, line 199
+            ket = cursor;
+            // substring, line 199
+            among_var = find_among_b(a_5, 36);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 199
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 206
+                    // delete, line 206
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_derivational()
+        {
+            int among_var;
+            // (, line 214
+            // [, line 215
+            ket = cursor;
+            // substring, line 215
+            among_var = find_among_b(a_6, 2);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 215
+            bra = cursor;
+            // call R2, line 215
+            if (!r_R2())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 218
+                    // delete, line 218
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_tidy_up()
+        {
+            int among_var;
+            // (, line 222
+            // [, line 223
+            ket = cursor;
+            // substring, line 223
+            among_var = find_among_b(a_7, 4);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 223
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 227
+                    // delete, line 227
+                    slice_del();
+                    // [, line 228
+                    ket = cursor;
+                    // literal, line 228
+                    if (!(eq_s_b(1, "\u00CE")))
+                    {
+                        return false;
+                    }
+                    // ], line 228
+                    bra = cursor;
+                    // literal, line 228
+                    if (!(eq_s_b(1, "\u00CE")))
+                    {
+                        return false;
+                    }
+                    // delete, line 228
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 231
+                    // literal, line 231
+                    if (!(eq_s_b(1, "\u00CE")))
+                    {
+                        return false;
+                    }
+                    // delete, line 231
+                    slice_del();
+                    break;
+                
+                case 3: 
+                    // (, line 233
+                    // delete, line 233
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            int v_10;
+            // (, line 238
+            // do, line 240
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 240
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// backwards, line 241
-			limit_backward = cursor; cursor = limit;
-			// setlimit, line 241
-			v_2 = limit - cursor;
-			// tomark, line 241
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_3 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_2;
-			// (, line 241
-			// do, line 242
-			v_4 = limit - cursor;
-			do 
-			{
-				// (, line 242
-				// or, line 243
-				do 
-				{
-					v_5 = limit - cursor;
-					do 
-					{
-						// call perfective_gerund, line 243
-						if (!r_perfective_gerund())
-						{
-							goto lab3_brk;
-						}
-						goto lab3_brk;
-					}
-					while (false);
+            
+            cursor = v_1;
+            // backwards, line 241
+            limit_backward = cursor; cursor = limit;
+            // setlimit, line 241
+            v_2 = limit - cursor;
+            // tomark, line 241
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_3 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_2;
+            // (, line 241
+            // do, line 242
+            v_4 = limit - cursor;
+            do 
+            {
+                // (, line 242
+                // or, line 243
+                do 
+                {
+                    v_5 = limit - cursor;
+                    do 
+                    {
+                        // call perfective_gerund, line 243
+                        if (!r_perfective_gerund())
+                        {
+                            goto lab3_brk;
+                        }
+                        goto lab3_brk;
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					cursor = limit - v_5;
-					// (, line 244
-					// try, line 244
-					v_6 = limit - cursor;
-					do 
-					{
-						// call reflexive, line 244
-						if (!r_reflexive())
-						{
-							cursor = limit - v_6;
-							goto lab4_brk;
-						}
-					}
-					while (false);
+                    
+                    cursor = limit - v_5;
+                    // (, line 244
+                    // try, line 244
+                    v_6 = limit - cursor;
+                    do 
+                    {
+                        // call reflexive, line 244
+                        if (!r_reflexive())
+                        {
+                            cursor = limit - v_6;
+                            goto lab4_brk;
+                        }
+                    }
+                    while (false);
 
 lab4_brk: ;
-					
-					// or, line 245
-					do 
-					{
-						v_7 = limit - cursor;
-						do 
-						{
-							// call adjectival, line 245
-							if (!r_adjectival())
-							{
-								goto lab6_brk;
-							}
-							goto lab5_brk;
-						}
-						while (false);
+                    
+                    // or, line 245
+                    do 
+                    {
+                        v_7 = limit - cursor;
+                        do 
+                        {
+                            // call adjectival, line 245
+                            if (!r_adjectival())
+                            {
+                                goto lab6_brk;
+                            }
+                            goto lab5_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						cursor = limit - v_7;
-						do 
-						{
-							// call verb, line 245
-							if (!r_verb())
-							{
-								goto lab7_brk;
-							}
-							goto lab5_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_7;
+                        do 
+                        {
+                            // call verb, line 245
+                            if (!r_verb())
+                            {
+                                goto lab7_brk;
+                            }
+                            goto lab5_brk;
+                        }
+                        while (false);
 
 lab7_brk: ;
-						
-						cursor = limit - v_7;
-						// call noun, line 245
-						if (!r_noun())
-						{
-							goto lab1_brk;
-						}
-					}
-					while (false);
+                        
+                        cursor = limit - v_7;
+                        // call noun, line 245
+                        if (!r_noun())
+                        {
+                            goto lab1_brk;
+                        }
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-				}
-				while (false);
+                    
+                }
+                while (false);
 
 lab2_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab1_brk: ;
 
-			cursor = limit - v_4;
-			// try, line 248
-			v_8 = limit - cursor;
-			do 
-			{
-				// (, line 248
-				// [, line 248
-				ket = cursor;
-				// literal, line 248
-				if (!(eq_s_b(1, "\u00C9")))
-				{
-					cursor = limit - v_8;
-					goto lab8_brk;
-				}
-				// ], line 248
-				bra = cursor;
-				// delete, line 248
-				slice_del();
-			}
-			while (false);
+            cursor = limit - v_4;
+            // try, line 248
+            v_8 = limit - cursor;
+            do 
+            {
+                // (, line 248
+                // [, line 248
+                ket = cursor;
+                // literal, line 248
+                if (!(eq_s_b(1, "\u00C9")))
+                {
+                    cursor = limit - v_8;
+                    goto lab8_brk;
+                }
+                // ], line 248
+                bra = cursor;
+                // delete, line 248
+                slice_del();
+            }
+            while (false);
 
 lab8_brk: ;
-			
-			// do, line 251
-			v_9 = limit - cursor;
-			do 
-			{
-				// call derivational, line 251
-				if (!r_derivational())
-				{
-					goto lab9_brk;
-				}
-			}
-			while (false);
+            
+            // do, line 251
+            v_9 = limit - cursor;
+            do 
+            {
+                // call derivational, line 251
+                if (!r_derivational())
+                {
+                    goto lab9_brk;
+                }
+            }
+            while (false);
 
 lab9_brk: ;
-			
-			cursor = limit - v_9;
-			// do, line 252
-			v_10 = limit - cursor;
-			do 
-			{
-				// call tidy_up, line 252
-				if (!r_tidy_up())
-				{
-					goto lab10_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_9;
+            // do, line 252
+            v_10 = limit - cursor;
+            do 
+            {
+                // call tidy_up, line 252
+                if (!r_tidy_up())
+                {
+                    goto lab10_brk;
+                }
+            }
+            while (false);
 
 lab10_brk: ;
-			
-			cursor = limit - v_10;
-			limit_backward = v_3;
-			cursor = limit_backward; return true;
-		}
-	}
+            
+            cursor = limit - v_10;
+            limit_backward = v_3;
+            cursor = limit_backward; return true;
+        }
+    }
 }


[15/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocumentsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriter.cs b/src/core/Index/DocumentsWriter.cs
index 6545d11..c5d9f40 100644
--- a/src/core/Index/DocumentsWriter.cs
+++ b/src/core/Index/DocumentsWriter.cs
@@ -34,171 +34,171 @@ using Weight = Lucene.Net.Search.Weight;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This class accepts multiple added documents and directly
-	/// writes a single segment file.  It does this more
-	/// efficiently than creating a single segment per document
-	/// (with DocumentWriter) and doing standard merges on those
-	/// segments.
-	/// 
-	/// Each added document is passed to the <see cref="DocConsumer" />,
-	/// which in turn processes the document and interacts with
-	/// other consumers in the indexing chain.  Certain
-	/// consumers, like <see cref="StoredFieldsWriter" /> and <see cref="TermVectorsTermsWriter" />
-	///, digest a document and
-	/// immediately write bytes to the "doc store" files (ie,
-	/// they do not consume RAM per document, except while they
-	/// are processing the document).
-	/// 
-	/// Other consumers, eg <see cref="FreqProxTermsWriter" /> and
-	/// <see cref="NormsWriter" />, buffer bytes in RAM and flush only
-	/// when a new segment is produced.
-	/// Once we have used our allowed RAM buffer, or the number
-	/// of added docs is large enough (in the case we are
-	/// flushing by doc count instead of RAM usage), we create a
-	/// real segment and flush it to the Directory.
-	/// 
-	/// Threads:
-	/// 
-	/// Multiple threads are allowed into addDocument at once.
-	/// There is an initial synchronized call to getThreadState
-	/// which allocates a ThreadState for this thread.  The same
-	/// thread will get the same ThreadState over time (thread
-	/// affinity) so that if there are consistent patterns (for
-	/// example each thread is indexing a different content
-	/// source) then we make better use of RAM.  Then
-	/// processDocument is called on that ThreadState without
-	/// synchronization (most of the "heavy lifting" is in this
-	/// call).  Finally the synchronized "finishDocument" is
-	/// called to flush changes to the directory.
-	/// 
-	/// When flush is called by IndexWriter we forcefully idle 
-	/// all threads and flush only once they are all idle.  This
-	/// means you can call flush with a given thread even while
-	/// other threads are actively adding/deleting documents.
-	/// 
-	/// 
-	/// Exceptions:
-	/// 
-	/// Because this class directly updates in-memory posting
-	/// lists, and flushes stored fields and term vectors
-	/// directly to files in the directory, there are certain
-	/// limited times when an exception can corrupt this state.
-	/// For example, a disk full while flushing stored fields
-	/// leaves this file in a corrupt state.  Or, an OOM
-	/// exception while appending to the in-memory posting lists
-	/// can corrupt that posting list.  We call such exceptions
-	/// "aborting exceptions".  In these cases we must call
-	/// abort() to discard all docs added since the last flush.
-	/// 
-	/// All other exceptions ("non-aborting exceptions") can
-	/// still partially update the index structures.  These
-	/// updates are consistent, but, they represent only a part
-	/// of the document seen up until the exception was hit.
-	/// When this happens, we immediately mark the document as
-	/// deleted so that the document is always atomically ("all
-	/// or none") added to the index.
-	/// </summary>
-	
-	public sealed class DocumentsWriter : IDisposable
-	{
-		internal class AnonymousClassIndexingChain:IndexingChain
-		{
-			
-			internal override DocConsumer GetChain(DocumentsWriter documentsWriter)
-			{
-				/*
-				This is the current indexing chain:
-				
-				DocConsumer / DocConsumerPerThread
-				--> code: DocFieldProcessor / DocFieldProcessorPerThread
-				--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
-				--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
-				--> code: DocInverter / DocInverterPerThread / DocInverterPerField
-				--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
-				--> code: TermsHash / TermsHashPerThread / TermsHashPerField
-				--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
-				--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
-				--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
-				--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
-				--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
-				--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
-				*/
-				
-				// Build up indexing chain:
-				
-				TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
-				TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
-				
-				InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, new TermsHash(documentsWriter, false, termVectorsWriter, null));
-				NormsWriter normsWriter = new NormsWriter();
-				DocInverter docInverter = new DocInverter(termsHash, normsWriter);
-				return new DocFieldProcessor(documentsWriter, docInverter);
-			}
-		}
-		private void  InitBlock()
-		{
-			maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
-			maxBufferedDeleteTerms = IndexWriter.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
-			ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024);
-			waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
-			waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
-			freeTrigger = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 1.05);
-			freeLevel = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 0.95);
-			maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
-			skipDocWriter = new SkipDocWriter();
+    
+    /// <summary> This class accepts multiple added documents and directly
+    /// writes a single segment file.  It does this more
+    /// efficiently than creating a single segment per document
+    /// (with DocumentWriter) and doing standard merges on those
+    /// segments.
+    /// 
+    /// Each added document is passed to the <see cref="DocConsumer" />,
+    /// which in turn processes the document and interacts with
+    /// other consumers in the indexing chain.  Certain
+    /// consumers, like <see cref="StoredFieldsWriter" /> and <see cref="TermVectorsTermsWriter" />
+    ///, digest a document and
+    /// immediately write bytes to the "doc store" files (ie,
+    /// they do not consume RAM per document, except while they
+    /// are processing the document).
+    /// 
+    /// Other consumers, eg <see cref="FreqProxTermsWriter" /> and
+    /// <see cref="NormsWriter" />, buffer bytes in RAM and flush only
+    /// when a new segment is produced.
+    /// Once we have used our allowed RAM buffer, or the number
+    /// of added docs is large enough (in the case we are
+    /// flushing by doc count instead of RAM usage), we create a
+    /// real segment and flush it to the Directory.
+    /// 
+    /// Threads:
+    /// 
+    /// Multiple threads are allowed into addDocument at once.
+    /// There is an initial synchronized call to getThreadState
+    /// which allocates a ThreadState for this thread.  The same
+    /// thread will get the same ThreadState over time (thread
+    /// affinity) so that if there are consistent patterns (for
+    /// example each thread is indexing a different content
+    /// source) then we make better use of RAM.  Then
+    /// processDocument is called on that ThreadState without
+    /// synchronization (most of the "heavy lifting" is in this
+    /// call).  Finally the synchronized "finishDocument" is
+    /// called to flush changes to the directory.
+    /// 
+    /// When flush is called by IndexWriter we forcefully idle 
+    /// all threads and flush only once they are all idle.  This
+    /// means you can call flush with a given thread even while
+    /// other threads are actively adding/deleting documents.
+    /// 
+    /// 
+    /// Exceptions:
+    /// 
+    /// Because this class directly updates in-memory posting
+    /// lists, and flushes stored fields and term vectors
+    /// directly to files in the directory, there are certain
+    /// limited times when an exception can corrupt this state.
+    /// For example, a disk full while flushing stored fields
+    /// leaves this file in a corrupt state.  Or, an OOM
+    /// exception while appending to the in-memory posting lists
+    /// can corrupt that posting list.  We call such exceptions
+    /// "aborting exceptions".  In these cases we must call
+    /// abort() to discard all docs added since the last flush.
+    /// 
+    /// All other exceptions ("non-aborting exceptions") can
+    /// still partially update the index structures.  These
+    /// updates are consistent, but, they represent only a part
+    /// of the document seen up until the exception was hit.
+    /// When this happens, we immediately mark the document as
+    /// deleted so that the document is always atomically ("all
+    /// or none") added to the index.
+    /// </summary>
+    
+    public sealed class DocumentsWriter : IDisposable
+    {
+        internal class AnonymousClassIndexingChain:IndexingChain
+        {
+            
+            internal override DocConsumer GetChain(DocumentsWriter documentsWriter)
+            {
+                /*
+                This is the current indexing chain:
+                
+                DocConsumer / DocConsumerPerThread
+                --> code: DocFieldProcessor / DocFieldProcessorPerThread
+                --> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
+                --> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
+                --> code: DocInverter / DocInverterPerThread / DocInverterPerField
+                --> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
+                --> code: TermsHash / TermsHashPerThread / TermsHashPerField
+                --> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
+                --> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
+                --> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
+                --> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
+                --> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
+                --> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
+                */
+                
+                // Build up indexing chain:
+                
+                TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
+                TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
+                
+                InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, new TermsHash(documentsWriter, false, termVectorsWriter, null));
+                NormsWriter normsWriter = new NormsWriter();
+                DocInverter docInverter = new DocInverter(termsHash, normsWriter);
+                return new DocFieldProcessor(documentsWriter, docInverter);
+            }
+        }
+        private void  InitBlock()
+        {
+            maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
+            maxBufferedDeleteTerms = IndexWriter.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
+            ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024);
+            waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
+            waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
+            freeTrigger = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 1.05);
+            freeLevel = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 0.95);
+            maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
+            skipDocWriter = new SkipDocWriter();
             byteBlockAllocator = new ByteBlockAllocator(this, DocumentsWriter.BYTE_BLOCK_SIZE);
             perDocAllocator = new ByteBlockAllocator(this,DocumentsWriter.PER_DOC_BLOCK_SIZE);
-			waitQueue = new WaitQueue(this);
-		}
-		
-		internal IndexWriter writer;
-		internal Directory directory;
-		
-		internal System.String segment;             // Current segment we are working on
-		private System.String docStoreSegment;      // Current doc-store segment we are writing
-		private int docStoreOffset;                 // Current starting doc-store offset of current segment
-		
-		private int nextDocID;                      // Next docID to be added
-		private int numDocsInRAM;                   // # docs buffered in RAM
-		internal int numDocsInStore;                // # docs written to doc stores
-		
-		// Max # ThreadState instances; if there are more threads
-		// than this they share ThreadStates
-		private const int MAX_THREAD_STATE = 5;
-		private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
+            waitQueue = new WaitQueue(this);
+        }
+        
+        internal IndexWriter writer;
+        internal Directory directory;
+        
+        internal System.String segment;             // Current segment we are working on
+        private System.String docStoreSegment;      // Current doc-store segment we are writing
+        private int docStoreOffset;                 // Current starting doc-store offset of current segment
+        
+        private int nextDocID;                      // Next docID to be added
+        private int numDocsInRAM;                   // # docs buffered in RAM
+        internal int numDocsInStore;                // # docs written to doc stores
+        
+        // Max # ThreadState instances; if there are more threads
+        // than this they share ThreadStates
+        private const int MAX_THREAD_STATE = 5;
+        private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
         private HashMap<ThreadClass, DocumentsWriterThreadState> threadBindings = new HashMap<ThreadClass, DocumentsWriterThreadState>();
-		
-		private int pauseThreads; // Non-zero when we need all threads to
-		// pause (eg to flush)
-		internal bool flushPending; // True when a thread has decided to flush
-		internal bool bufferIsFull; // True when it's time to write segment
-		private bool aborting; // True if an abort is pending
-		
-		private DocFieldProcessor docFieldProcessor;
-		
-		internal System.IO.StreamWriter infoStream;
-		internal int maxFieldLength;
-		internal Similarity similarity;
-		
-		internal IList<string> newFiles;
-		
-		internal class DocState
-		{
-			internal DocumentsWriter docWriter;
-			internal Analyzer analyzer;
-			internal int maxFieldLength;
-			internal System.IO.StreamWriter infoStream;
-			internal Similarity similarity;
-			internal int docID;
-			internal Document doc;
-			internal System.String maxTermPrefix;
-			
-			// Only called by asserts
-			public bool TestPoint(System.String name)
-			{
-				return docWriter.writer.TestPoint(name);
-			}
+        
+        private int pauseThreads; // Non-zero when we need all threads to
+        // pause (eg to flush)
+        internal bool flushPending; // True when a thread has decided to flush
+        internal bool bufferIsFull; // True when it's time to write segment
+        private bool aborting; // True if an abort is pending
+        
+        private DocFieldProcessor docFieldProcessor;
+        
+        internal System.IO.StreamWriter infoStream;
+        internal int maxFieldLength;
+        internal Similarity similarity;
+        
+        internal IList<string> newFiles;
+        
+        internal class DocState
+        {
+            internal DocumentsWriter docWriter;
+            internal Analyzer analyzer;
+            internal int maxFieldLength;
+            internal System.IO.StreamWriter infoStream;
+            internal Similarity similarity;
+            internal int docID;
+            internal Document doc;
+            internal System.String maxTermPrefix;
+            
+            // Only called by asserts
+            public bool TestPoint(System.String name)
+            {
+                return docWriter.writer.TestPoint(name);
+            }
 
             public void Clear()
             {
@@ -207,26 +207,26 @@ namespace Lucene.Net.Index
                 doc = null;
                 analyzer = null;
             }
-		}
-		
-		/// <summary>Consumer returns this on each doc.  This holds any
-		/// state that must be flushed synchronized "in docID
-		/// order".  We gather these and flush them in order. 
-		/// </summary>
-		internal abstract class DocWriter
-		{
-			internal DocWriter next;
-			internal int docID;
-			public abstract void  Finish();
-			public abstract void  Abort();
-			public abstract long SizeInBytes();
-			
-			internal void  SetNext(DocWriter next)
-			{
-				this.next = next;
-			}
-		}
-		
+        }
+        
+        /// <summary>Consumer returns this on each doc.  This holds any
+        /// state that must be flushed synchronized "in docID
+        /// order".  We gather these and flush them in order. 
+        /// </summary>
+        internal abstract class DocWriter
+        {
+            internal DocWriter next;
+            internal int docID;
+            public abstract void  Finish();
+            public abstract void  Abort();
+            public abstract long SizeInBytes();
+            
+            internal void  SetNext(DocWriter next)
+            {
+                this.next = next;
+            }
+        }
+        
         /*
         * Create and return a new DocWriterBuffer.
         */
@@ -276,595 +276,595 @@ namespace Lucene.Net.Index
             }
         }
 
-		/// <summary> The IndexingChain must define the <see cref="GetChain(DocumentsWriter)" /> method
-		/// which returns the DocConsumer that the DocumentsWriter calls to process the
-		/// documents. 
-		/// </summary>
-		internal abstract class IndexingChain
-		{
-			internal abstract DocConsumer GetChain(DocumentsWriter documentsWriter);
-		}
-		
-		internal static readonly IndexingChain DefaultIndexingChain;
-		
-		internal DocConsumer consumer;
-		
-		// Deletes done after the last flush; these are discarded
-		// on abort
-		private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
-		
-		// Deletes done before the last flush; these are still
-		// kept on abort
-		private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
-		
-		// The max number of delete terms that can be buffered before
-		// they must be flushed to disk.
-		private int maxBufferedDeleteTerms;
-		
-		// How much RAM we can use before flushing.  This is 0 if
-		// we are flushing by doc count instead.
-		private long ramBufferSize;
-		private long waitQueuePauseBytes;
-		private long waitQueueResumeBytes;
-		
-		// If we've allocated 5% over our RAM budget, we then
-		// free down to 95%
-		private long freeTrigger;
-		private long freeLevel;
-		
-		// Flush @ this number of docs.  If ramBufferSize is
-		// non-zero we will flush by RAM usage instead.
-		private int maxBufferedDocs;
-		
-		private int flushedDocCount; // How many docs already flushed to index
-		
-		internal void  UpdateFlushedDocCount(int n)
-		{
-			lock (this)
-			{
-				flushedDocCount += n;
-			}
-		}
-		internal int GetFlushedDocCount()
-		{
-			lock (this)
-			{
-				return flushedDocCount;
-			}
-		}
-		internal void  SetFlushedDocCount(int n)
-		{
-			lock (this)
-			{
-				flushedDocCount = n;
-			}
-		}
-		
-		private bool closed;
-		
-		internal DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain)
-		{
-			InitBlock();
-			this.directory = directory;
-			this.writer = writer;
-			this.similarity = writer.Similarity;
-			flushedDocCount = writer.MaxDoc();
-			
-			consumer = indexingChain.GetChain(this);
-			if (consumer is DocFieldProcessor)
-			{
-				docFieldProcessor = (DocFieldProcessor) consumer;
-			}
-		}
-		
-		/// <summary>Returns true if any of the fields in the current
-		/// buffered docs have omitTermFreqAndPositions==false 
-		/// </summary>
-		internal bool HasProx()
-		{
-			return (docFieldProcessor != null)?docFieldProcessor.fieldInfos.HasProx():true;
-		}
-		
-		/// <summary>If non-null, various details of indexing are printed
-		/// here. 
-		/// </summary>
-		internal void  SetInfoStream(System.IO.StreamWriter infoStream)
-		{
-			lock (this)
-			{
-				this.infoStream = infoStream;
-				for (int i = 0; i < threadStates.Length; i++)
-					threadStates[i].docState.infoStream = infoStream;
-			}
-		}
-		
-		internal void  SetMaxFieldLength(int maxFieldLength)
-		{
-			lock (this)
-			{
-				this.maxFieldLength = maxFieldLength;
-				for (int i = 0; i < threadStates.Length; i++)
-					threadStates[i].docState.maxFieldLength = maxFieldLength;
-			}
-		}
-		
-		internal void  SetSimilarity(Similarity similarity)
-		{
-			lock (this)
-			{
-				this.similarity = similarity;
-				for (int i = 0; i < threadStates.Length; i++)
-					threadStates[i].docState.similarity = similarity;
-			}
-		}
-		
-		/// <summary>Set how much RAM we can use before flushing. </summary>
-		internal void  SetRAMBufferSizeMB(double mb)
-		{
-			lock (this)
-			{
-				if (mb == IndexWriter.DISABLE_AUTO_FLUSH)
-				{
-					ramBufferSize = IndexWriter.DISABLE_AUTO_FLUSH;
-					waitQueuePauseBytes = 4 * 1024 * 1024;
-					waitQueueResumeBytes = 2 * 1024 * 1024;
-				}
-				else
-				{
-					ramBufferSize = (long) (mb * 1024 * 1024);
-					waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
-					waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
-					freeTrigger = (long) (1.05 * ramBufferSize);
-					freeLevel = (long) (0.95 * ramBufferSize);
-				}
-			}
-		}
-		
-		internal double GetRAMBufferSizeMB()
-		{
-			lock (this)
-			{
-				if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH)
-				{
-					return ramBufferSize;
-				}
-				else
-				{
-					return ramBufferSize / 1024.0 / 1024.0;
-				}
-			}
-		}
+        /// <summary> The IndexingChain must define the <see cref="GetChain(DocumentsWriter)" /> method
+        /// which returns the DocConsumer that the DocumentsWriter calls to process the
+        /// documents. 
+        /// </summary>
+        internal abstract class IndexingChain
+        {
+            internal abstract DocConsumer GetChain(DocumentsWriter documentsWriter);
+        }
+        
+        internal static readonly IndexingChain DefaultIndexingChain;
+        
+        internal DocConsumer consumer;
+        
+        // Deletes done after the last flush; these are discarded
+        // on abort
+        private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
+        
+        // Deletes done before the last flush; these are still
+        // kept on abort
+        private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
+        
+        // The max number of delete terms that can be buffered before
+        // they must be flushed to disk.
+        private int maxBufferedDeleteTerms;
+        
+        // How much RAM we can use before flushing.  This is 0 if
+        // we are flushing by doc count instead.
+        private long ramBufferSize;
+        private long waitQueuePauseBytes;
+        private long waitQueueResumeBytes;
+        
+        // If we've allocated 5% over our RAM budget, we then
+        // free down to 95%
+        private long freeTrigger;
+        private long freeLevel;
+        
+        // Flush @ this number of docs.  If ramBufferSize is
+        // non-zero we will flush by RAM usage instead.
+        private int maxBufferedDocs;
+        
+        private int flushedDocCount; // How many docs already flushed to index
+        
+        internal void  UpdateFlushedDocCount(int n)
+        {
+            lock (this)
+            {
+                flushedDocCount += n;
+            }
+        }
+        internal int GetFlushedDocCount()
+        {
+            lock (this)
+            {
+                return flushedDocCount;
+            }
+        }
+        internal void  SetFlushedDocCount(int n)
+        {
+            lock (this)
+            {
+                flushedDocCount = n;
+            }
+        }
+        
+        private bool closed;
+        
+        internal DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain)
+        {
+            InitBlock();
+            this.directory = directory;
+            this.writer = writer;
+            this.similarity = writer.Similarity;
+            flushedDocCount = writer.MaxDoc();
+            
+            consumer = indexingChain.GetChain(this);
+            if (consumer is DocFieldProcessor)
+            {
+                docFieldProcessor = (DocFieldProcessor) consumer;
+            }
+        }
+        
+        /// <summary>Returns true if any of the fields in the current
+        /// buffered docs have omitTermFreqAndPositions==false 
+        /// </summary>
+        internal bool HasProx()
+        {
+            return (docFieldProcessor != null)?docFieldProcessor.fieldInfos.HasProx():true;
+        }
+        
+        /// <summary>If non-null, various details of indexing are printed
+        /// here. 
+        /// </summary>
+        internal void  SetInfoStream(System.IO.StreamWriter infoStream)
+        {
+            lock (this)
+            {
+                this.infoStream = infoStream;
+                for (int i = 0; i < threadStates.Length; i++)
+                    threadStates[i].docState.infoStream = infoStream;
+            }
+        }
+        
+        internal void  SetMaxFieldLength(int maxFieldLength)
+        {
+            lock (this)
+            {
+                this.maxFieldLength = maxFieldLength;
+                for (int i = 0; i < threadStates.Length; i++)
+                    threadStates[i].docState.maxFieldLength = maxFieldLength;
+            }
+        }
+        
+        internal void  SetSimilarity(Similarity similarity)
+        {
+            lock (this)
+            {
+                this.similarity = similarity;
+                for (int i = 0; i < threadStates.Length; i++)
+                    threadStates[i].docState.similarity = similarity;
+            }
+        }
+        
+        /// <summary>Set how much RAM we can use before flushing. </summary>
+        internal void  SetRAMBufferSizeMB(double mb)
+        {
+            lock (this)
+            {
+                if (mb == IndexWriter.DISABLE_AUTO_FLUSH)
+                {
+                    ramBufferSize = IndexWriter.DISABLE_AUTO_FLUSH;
+                    waitQueuePauseBytes = 4 * 1024 * 1024;
+                    waitQueueResumeBytes = 2 * 1024 * 1024;
+                }
+                else
+                {
+                    ramBufferSize = (long) (mb * 1024 * 1024);
+                    waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
+                    waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
+                    freeTrigger = (long) (1.05 * ramBufferSize);
+                    freeLevel = (long) (0.95 * ramBufferSize);
+                }
+            }
+        }
+        
+        internal double GetRAMBufferSizeMB()
+        {
+            lock (this)
+            {
+                if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH)
+                {
+                    return ramBufferSize;
+                }
+                else
+                {
+                    return ramBufferSize / 1024.0 / 1024.0;
+                }
+            }
+        }
 
-	    /// <summary>Gets or sets max buffered docs, which means we will flush by
-	    /// doc count instead of by RAM usage. 
-	    /// </summary>
-	    internal int MaxBufferedDocs
-	    {
-	        get { return maxBufferedDocs; }
-	        set { maxBufferedDocs = value; }
-	    }
+        /// <summary>Gets or sets max buffered docs, which means we will flush by
+        /// doc count instead of by RAM usage. 
+        /// </summary>
+        internal int MaxBufferedDocs
+        {
+            get { return maxBufferedDocs; }
+            set { maxBufferedDocs = value; }
+        }
 
-	    /// <summary>Get current segment name we are writing. </summary>
-	    internal string Segment
-	    {
-	        get { return segment; }
-	    }
+        /// <summary>Get current segment name we are writing. </summary>
+        internal string Segment
+        {
+            get { return segment; }
+        }
 
-	    /// <summary>Returns how many docs are currently buffered in RAM. </summary>
-	    internal int NumDocsInRAM
-	    {
-	        get { return numDocsInRAM; }
-	    }
+        /// <summary>Returns how many docs are currently buffered in RAM. </summary>
+        internal int NumDocsInRAM
+        {
+            get { return numDocsInRAM; }
+        }
 
-	    /// <summary>Returns the current doc store segment we are writing
-	    /// to. 
-	    /// </summary>
-	    internal string DocStoreSegment
-	    {
-	        get
-	        {
-	            lock (this)
-	            {
-	                return docStoreSegment;
-	            }
-	        }
-	    }
+        /// <summary>Returns the current doc store segment we are writing
+        /// to. 
+        /// </summary>
+        internal string DocStoreSegment
+        {
+            get
+            {
+                lock (this)
+                {
+                    return docStoreSegment;
+                }
+            }
+        }
 
-	    /// <summary>Returns the doc offset into the shared doc store for
-	    /// the current buffered docs. 
-	    /// </summary>
-	    internal int DocStoreOffset
-	    {
-	        get { return docStoreOffset; }
-	    }
+        /// <summary>Returns the doc offset into the shared doc store for
+        /// the current buffered docs. 
+        /// </summary>
+        internal int DocStoreOffset
+        {
+            get { return docStoreOffset; }
+        }
 
-	    /// <summary>Closes the current open doc stores an returns the doc
-		/// store segment name.  This returns null if there are *
-		/// no buffered documents. 
-		/// </summary>
-		internal System.String CloseDocStore()
-		{
-			lock (this)
-			{
-				
-				System.Diagnostics.Debug.Assert(AllThreadsIdle());
-				
-				if (infoStream != null)
-					Message("closeDocStore: " + openFiles.Count + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
-				
-				bool success = false;
-				
-				try
-				{
-					InitFlushState(true);
-					closedFiles.Clear();
-					
-					consumer.CloseDocStore(flushState);
-					System.Diagnostics.Debug.Assert(0 == openFiles.Count);
-					
-					System.String s = docStoreSegment;
-					docStoreSegment = null;
-					docStoreOffset = 0;
-					numDocsInStore = 0;
-					success = true;
-					return s;
-				}
-				finally
-				{
-					if (!success)
-					{
-						Abort();
-					}
-				}
-			}
-		}
-		
-		private ICollection<string> abortedFiles; // List of files that were written before last abort()
-		
-		private SegmentWriteState flushState;
+        /// <summary>Closes the current open doc stores an returns the doc
+        /// store segment name.  This returns null if there are *
+        /// no buffered documents. 
+        /// </summary>
+        internal System.String CloseDocStore()
+        {
+            lock (this)
+            {
+                
+                System.Diagnostics.Debug.Assert(AllThreadsIdle());
+                
+                if (infoStream != null)
+                    Message("closeDocStore: " + openFiles.Count + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
+                
+                bool success = false;
+                
+                try
+                {
+                    InitFlushState(true);
+                    closedFiles.Clear();
+                    
+                    consumer.CloseDocStore(flushState);
+                    System.Diagnostics.Debug.Assert(0 == openFiles.Count);
+                    
+                    System.String s = docStoreSegment;
+                    docStoreSegment = null;
+                    docStoreOffset = 0;
+                    numDocsInStore = 0;
+                    success = true;
+                    return s;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        Abort();
+                    }
+                }
+            }
+        }
+        
+        private ICollection<string> abortedFiles; // List of files that were written before last abort()
+        
+        private SegmentWriteState flushState;
 
         internal ICollection<string> AbortedFiles()
-		{
-			return abortedFiles;
-		}
-		
-		internal void  Message(System.String message)
-		{
-			if (infoStream != null)
-				writer.Message("DW: " + message);
-		}
+        {
+            return abortedFiles;
+        }
+        
+        internal void  Message(System.String message)
+        {
+            if (infoStream != null)
+                writer.Message("DW: " + message);
+        }
 
         internal IList<string> openFiles = new List<string>();
         internal IList<string> closedFiles = new List<string>();
-		
-		/* Returns Collection of files in use by this instance,
-		* including any flushed segments. */
-		internal IList<string> OpenFiles()
-		{
-			lock (this)
-			{
+        
+        /* Returns Collection of files in use by this instance,
+        * including any flushed segments. */
+        internal IList<string> OpenFiles()
+        {
+            lock (this)
+            {
                 // ToArray returns a copy
-			    return openFiles.ToArray();
-			}
-		}
-		
-		internal IList<string> ClosedFiles()
-		{
+                return openFiles.ToArray();
+            }
+        }
+        
+        internal IList<string> ClosedFiles()
+        {
             lock (this)
             {
                 // ToArray returns a copy
                 return closedFiles.ToArray();
             }
-		}
-		
-		internal void  AddOpenFile(System.String name)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(!openFiles.Contains(name));
-				openFiles.Add(name);
-			}
-		}
-		
-		internal void  RemoveOpenFile(System.String name)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(openFiles.Contains(name));
-				openFiles.Remove(name);
-				closedFiles.Add(name);
-			}
-		}
-		
-		internal void  SetAborting()
-		{
-			lock (this)
-			{
-				aborting = true;
-			}
-		}
-		
-		/// <summary>Called if we hit an exception at a bad time (when
-		/// updating the index files) and must discard all
-		/// currently buffered docs.  This resets our state,
-		/// discarding any docs added since last flush. 
-		/// </summary>
-		internal void  Abort()
-		{
-			lock (this)
-			{
-				try
-				{
+        }
+        
+        internal void  AddOpenFile(System.String name)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(!openFiles.Contains(name));
+                openFiles.Add(name);
+            }
+        }
+        
+        internal void  RemoveOpenFile(System.String name)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(openFiles.Contains(name));
+                openFiles.Remove(name);
+                closedFiles.Add(name);
+            }
+        }
+        
+        internal void  SetAborting()
+        {
+            lock (this)
+            {
+                aborting = true;
+            }
+        }
+        
+        /// <summary>Called if we hit an exception at a bad time (when
+        /// updating the index files) and must discard all
+        /// currently buffered docs.  This resets our state,
+        /// discarding any docs added since last flush. 
+        /// </summary>
+        internal void  Abort()
+        {
+            lock (this)
+            {
+                try
+                {
                     if (infoStream != null)
                     {
                         Message("docWriter: now abort");
                     }
 
-				    // Forcefully remove waiting ThreadStates from line
-					waitQueue.Abort();
-					
-					// Wait for all other threads to finish with
-					// DocumentsWriter:
-					PauseAllThreads();
-					
-					try
-					{
-						
-						System.Diagnostics.Debug.Assert(0 == waitQueue.numWaiting);
-						
-						waitQueue.waitingBytes = 0;
-						
-						try
-						{
-							abortedFiles = OpenFiles();
-						}
-						catch (System.Exception)
-						{
-							abortedFiles = null;
-						}
-						
-						deletesInRAM.Clear();
+                    // Forcefully remove waiting ThreadStates from line
+                    waitQueue.Abort();
+                    
+                    // Wait for all other threads to finish with
+                    // DocumentsWriter:
+                    PauseAllThreads();
+                    
+                    try
+                    {
+                        
+                        System.Diagnostics.Debug.Assert(0 == waitQueue.numWaiting);
+                        
+                        waitQueue.waitingBytes = 0;
+                        
+                        try
+                        {
+                            abortedFiles = OpenFiles();
+                        }
+                        catch (System.Exception)
+                        {
+                            abortedFiles = null;
+                        }
+                        
+                        deletesInRAM.Clear();
                         deletesFlushed.Clear();
-						openFiles.Clear();
-						
-						for (int i = 0; i < threadStates.Length; i++)
-							try
-							{
-								threadStates[i].consumer.Abort();
-							}
-							catch (System.Exception)
-							{
-							}
-						
-						try
-						{
-							consumer.Abort();
-						}
-						catch (System.Exception)
-						{
-						}
-						
-						docStoreSegment = null;
-						numDocsInStore = 0;
-						docStoreOffset = 0;
-						
-						// Reset all postings data
-						DoAfterFlush();
-					}
-					finally
-					{
-						ResumeAllThreads();
-					}
-				}
-				finally
-				{
-					aborting = false;
-					System.Threading.Monitor.PulseAll(this);
+                        openFiles.Clear();
+                        
+                        for (int i = 0; i < threadStates.Length; i++)
+                            try
+                            {
+                                threadStates[i].consumer.Abort();
+                            }
+                            catch (System.Exception)
+                            {
+                            }
+                        
+                        try
+                        {
+                            consumer.Abort();
+                        }
+                        catch (System.Exception)
+                        {
+                        }
+                        
+                        docStoreSegment = null;
+                        numDocsInStore = 0;
+                        docStoreOffset = 0;
+                        
+                        // Reset all postings data
+                        DoAfterFlush();
+                    }
+                    finally
+                    {
+                        ResumeAllThreads();
+                    }
+                }
+                finally
+                {
+                    aborting = false;
+                    System.Threading.Monitor.PulseAll(this);
                     if (infoStream != null)
                     {
                         Message("docWriter: done abort; abortedFiles=" + abortedFiles);
                     }
-				}
-			}
-		}
-		
-		/// <summary>Reset after a flush </summary>
-		private void  DoAfterFlush()
-		{
-			// All ThreadStates should be idle when we are called
-			System.Diagnostics.Debug.Assert(AllThreadsIdle());
-			threadBindings.Clear();
-			waitQueue.Reset();
-			segment = null;
-			numDocsInRAM = 0;
-			nextDocID = 0;
-			bufferIsFull = false;
-			flushPending = false;
-			for (int i = 0; i < threadStates.Length; i++)
-				threadStates[i].DoAfterFlush();
-			numBytesUsed = 0;
-		}
-		
-		// Returns true if an abort is in progress
-		internal bool PauseAllThreads()
-		{
-			lock (this)
-			{
-				pauseThreads++;
-				while (!AllThreadsIdle())
-				{
-					System.Threading.Monitor.Wait(this);
-				}
-				
-				return aborting;
-			}
-		}
-		
-		internal void  ResumeAllThreads()
-		{
-			lock (this)
-			{
-				pauseThreads--;
-				System.Diagnostics.Debug.Assert(pauseThreads >= 0);
-				if (0 == pauseThreads)
-					System.Threading.Monitor.PulseAll(this);
-			}
-		}
-		
-		private bool AllThreadsIdle()
-		{
-			lock (this)
-			{
-				for (int i = 0; i < threadStates.Length; i++)
-					if (!threadStates[i].isIdle)
-						return false;
-				return true;
-			}
-		}
+                }
+            }
+        }
+        
+        /// <summary>Reset after a flush </summary>
+        private void  DoAfterFlush()
+        {
+            // All ThreadStates should be idle when we are called
+            System.Diagnostics.Debug.Assert(AllThreadsIdle());
+            threadBindings.Clear();
+            waitQueue.Reset();
+            segment = null;
+            numDocsInRAM = 0;
+            nextDocID = 0;
+            bufferIsFull = false;
+            flushPending = false;
+            for (int i = 0; i < threadStates.Length; i++)
+                threadStates[i].DoAfterFlush();
+            numBytesUsed = 0;
+        }
+        
+        // Returns true if an abort is in progress
+        internal bool PauseAllThreads()
+        {
+            lock (this)
+            {
+                pauseThreads++;
+                while (!AllThreadsIdle())
+                {
+                    System.Threading.Monitor.Wait(this);
+                }
+                
+                return aborting;
+            }
+        }
+        
+        internal void  ResumeAllThreads()
+        {
+            lock (this)
+            {
+                pauseThreads--;
+                System.Diagnostics.Debug.Assert(pauseThreads >= 0);
+                if (0 == pauseThreads)
+                    System.Threading.Monitor.PulseAll(this);
+            }
+        }
+        
+        private bool AllThreadsIdle()
+        {
+            lock (this)
+            {
+                for (int i = 0; i < threadStates.Length; i++)
+                    if (!threadStates[i].isIdle)
+                        return false;
+                return true;
+            }
+        }
 
-	    internal bool AnyChanges
-	    {
-	        get
-	        {
-	            lock (this)
-	            {
-	                return numDocsInRAM != 0 || deletesInRAM.numTerms != 0 || deletesInRAM.docIDs.Count != 0 ||
-	                       deletesInRAM.queries.Count != 0;
-	            }
-	        }
-	    }
+        internal bool AnyChanges
+        {
+            get
+            {
+                lock (this)
+                {
+                    return numDocsInRAM != 0 || deletesInRAM.numTerms != 0 || deletesInRAM.docIDs.Count != 0 ||
+                           deletesInRAM.queries.Count != 0;
+                }
+            }
+        }
 
-	    private void  InitFlushState(bool onlyDocStore)
-		{
-			lock (this)
-			{
-				InitSegmentName(onlyDocStore);
-				flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.TermIndexInterval);
-			}
-		}
-		
-		/// <summary>Flush all pending docs to a new segment </summary>
-		internal int Flush(bool closeDocStore)
-		{
-			lock (this)
-			{
-				
-				System.Diagnostics.Debug.Assert(AllThreadsIdle());
-				
-				System.Diagnostics.Debug.Assert(numDocsInRAM > 0);
-				
-				System.Diagnostics.Debug.Assert(nextDocID == numDocsInRAM);
-				System.Diagnostics.Debug.Assert(waitQueue.numWaiting == 0);
-				System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
-				
-				InitFlushState(false);
-				
-				docStoreOffset = numDocsInStore;
-				
-				if (infoStream != null)
-					Message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
-				
-				bool success = false;
-				
-				try
-				{
-					
-					if (closeDocStore)
-					{
-						System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName != null);
-						System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName.Equals(flushState.segmentName));
-						CloseDocStore();
-						flushState.numDocsInStore = 0;
-					}
-					
-					ICollection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
-					for (int i = 0; i < threadStates.Length; i++)
-						threads.Add(threadStates[i].consumer);
-					consumer.Flush(threads, flushState);
-					
-					if (infoStream != null)
-					{
+        private void  InitFlushState(bool onlyDocStore)
+        {
+            lock (this)
+            {
+                InitSegmentName(onlyDocStore);
+                flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.TermIndexInterval);
+            }
+        }
+        
+        /// <summary>Flush all pending docs to a new segment </summary>
+        internal int Flush(bool closeDocStore)
+        {
+            lock (this)
+            {
+                
+                System.Diagnostics.Debug.Assert(AllThreadsIdle());
+                
+                System.Diagnostics.Debug.Assert(numDocsInRAM > 0);
+                
+                System.Diagnostics.Debug.Assert(nextDocID == numDocsInRAM);
+                System.Diagnostics.Debug.Assert(waitQueue.numWaiting == 0);
+                System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
+                
+                InitFlushState(false);
+                
+                docStoreOffset = numDocsInStore;
+                
+                if (infoStream != null)
+                    Message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
+                
+                bool success = false;
+                
+                try
+                {
+                    
+                    if (closeDocStore)
+                    {
+                        System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName != null);
+                        System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName.Equals(flushState.segmentName));
+                        CloseDocStore();
+                        flushState.numDocsInStore = 0;
+                    }
+                    
+                    ICollection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
+                    for (int i = 0; i < threadStates.Length; i++)
+                        threads.Add(threadStates[i].consumer);
+                    consumer.Flush(threads, flushState);
+                    
+                    if (infoStream != null)
+                    {
                         SegmentInfo si = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory);
                         long newSegmentSize = si.SizeInBytes();
                         System.String message = System.String.Format(nf, "  oldRAMSize={0:d} newFlushedSize={1:d} docs/MB={2:f} new/old={3:%}",
                             new System.Object[] { numBytesUsed, newSegmentSize, (numDocsInRAM / (newSegmentSize / 1024.0 / 1024.0)), (100.0 * newSegmentSize / numBytesUsed) });
-						Message(message);
-					}
-					
-					flushedDocCount += flushState.numDocs;
-					
-					DoAfterFlush();
-					
-					success = true;
-				}
-				finally
-				{
-					if (!success)
-					{
-						Abort();
-					}
-				}
-				
-				System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
-				
-				return flushState.numDocs;
-			}
-		}
+                        Message(message);
+                    }
+                    
+                    flushedDocCount += flushState.numDocs;
+                    
+                    DoAfterFlush();
+                    
+                    success = true;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        Abort();
+                    }
+                }
+                
+                System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
+                
+                return flushState.numDocs;
+            }
+        }
 
         internal ICollection<string> GetFlushedFiles()
         {
             return flushState.flushedFiles;
         }
-		
-		/// <summary>Build compound file for the segment we just flushed </summary>
-		internal void  CreateCompoundFile(System.String segment)
-		{
-			
-			CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
-			foreach(string flushedFile in flushState.flushedFiles)
-			{
+        
+        /// <summary>Build compound file for the segment we just flushed </summary>
+        internal void  CreateCompoundFile(System.String segment)
+        {
+            
+            CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+            foreach(string flushedFile in flushState.flushedFiles)
+            {
                 cfsWriter.AddFile(flushedFile);
-			}
-			
-			// Perform the merge
-			cfsWriter.Close();
-		}
-		
-		/// <summary>Set flushPending if it is not already set and returns
-		/// whether it was set. This is used by IndexWriter to
-		/// trigger a single flush even when multiple threads are
-		/// trying to do so. 
-		/// </summary>
-		internal bool SetFlushPending()
-		{
-			lock (this)
-			{
-				if (flushPending)
-					return false;
-				else
-				{
-					flushPending = true;
-					return true;
-				}
-			}
-		}
-		
-		internal void  ClearFlushPending()
-		{
-			lock (this)
-			{
-				flushPending = false;
-			}
-		}
-		
-		internal void  PushDeletes()
-		{
-			lock (this)
-			{
-				deletesFlushed.Update(deletesInRAM);
-			}
-		}
-		
+            }
+            
+            // Perform the merge
+            cfsWriter.Close();
+        }
+        
+        /// <summary>Set flushPending if it is not already set and returns
+        /// whether it was set. This is used by IndexWriter to
+        /// trigger a single flush even when multiple threads are
+        /// trying to do so. 
+        /// </summary>
+        internal bool SetFlushPending()
+        {
+            lock (this)
+            {
+                if (flushPending)
+                    return false;
+                else
+                {
+                    flushPending = true;
+                    return true;
+                }
+            }
+        }
+        
+        internal void  ClearFlushPending()
+        {
+            lock (this)
+            {
+                flushPending = false;
+            }
+        }
+        
+        internal void  PushDeletes()
+        {
+            lock (this)
+            {
+                deletesFlushed.Update(deletesInRAM);
+            }
+        }
+        
         public void Dispose()
         {
             // Move to protected method if class becomes unsealed
@@ -874,417 +874,417 @@ namespace Lucene.Net.Index
                 System.Threading.Monitor.PulseAll(this);
             }
         }
-		
-		internal void  InitSegmentName(bool onlyDocStore)
-		{
-			lock (this)
-			{
-				if (segment == null && (!onlyDocStore || docStoreSegment == null))
-				{
-					segment = writer.NewSegmentName();
-					System.Diagnostics.Debug.Assert(numDocsInRAM == 0);
-				}
-				if (docStoreSegment == null)
-				{
-					docStoreSegment = segment;
-					System.Diagnostics.Debug.Assert(numDocsInStore == 0);
-				}
-			}
-		}
-		
-		/// <summary>Returns a free (idle) ThreadState that may be used for
-		/// indexing this one document.  This call also pauses if a
-		/// flush is pending.  If delTerm is non-null then we
-		/// buffer this deleted term after the thread state has
-		/// been acquired. 
-		/// </summary>
-		internal DocumentsWriterThreadState GetThreadState(Document doc, Term delTerm)
-		{
-			lock (this)
-			{
-				
-				// First, find a thread state.  If this thread already
-				// has affinity to a specific ThreadState, use that one
-				// again.
-				DocumentsWriterThreadState state = threadBindings[ThreadClass.Current()];
-				if (state == null)
-				{
-					
-					// First time this thread has called us since last
-					// flush.  Find the least loaded thread state:
-					DocumentsWriterThreadState minThreadState = null;
-					for (int i = 0; i < threadStates.Length; i++)
-					{
-						DocumentsWriterThreadState ts = threadStates[i];
-						if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
-							minThreadState = ts;
-					}
-					if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.Length >= MAX_THREAD_STATE))
-					{
-						state = minThreadState;
-						state.numThreads++;
-					}
-					else
-					{
-						// Just create a new "private" thread state
-						DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1 + threadStates.Length];
-						if (threadStates.Length > 0)
-							Array.Copy(threadStates, 0, newArray, 0, threadStates.Length);
-						state = newArray[threadStates.Length] = new DocumentsWriterThreadState(this);
-						threadStates = newArray;
-					}
-					threadBindings[ThreadClass.Current()] = state;
-				}
-				
-				// Next, wait until my thread state is idle (in case
-				// it's shared with other threads) and for threads to
-				// not be paused nor a flush pending:
-				WaitReady(state);
-				
-				// Allocate segment name if this is the first doc since
-				// last flush:
-				InitSegmentName(false);
-				
-				state.isIdle = false;
-				
-				bool success = false;
-				try
-				{
-					state.docState.docID = nextDocID;
-					
-					System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init start"));
-					
-					if (delTerm != null)
-					{
-						AddDeleteTerm(delTerm, state.docState.docID);
-						state.doFlushAfter = TimeToFlushDeletes();
-					}
-					
-					System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init after delTerm"));
-					
-					nextDocID++;
-					numDocsInRAM++;
-					
-					// We must at this point commit to flushing to ensure we
-					// always get N docs when we flush by doc count, even if
-					// > 1 thread is adding documents:
-					if (!flushPending && maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH && numDocsInRAM >= maxBufferedDocs)
-					{
-						flushPending = true;
-						state.doFlushAfter = true;
-					}
-					
-					success = true;
-				}
-				finally
-				{
-					if (!success)
-					{
-						// Forcefully idle this ThreadState:
-						state.isIdle = true;
-						System.Threading.Monitor.PulseAll(this);
-						if (state.doFlushAfter)
-						{
-							state.doFlushAfter = false;
-							flushPending = false;
-						}
-					}
-				}
-				
-				return state;
-			}
-		}
-		
-		/// <summary>Returns true if the caller (IndexWriter) should now
-		/// flush. 
-		/// </summary>
-		internal bool AddDocument(Document doc, Analyzer analyzer)
-		{
-			return UpdateDocument(doc, analyzer, null);
-		}
-		
-		internal bool UpdateDocument(Term t, Document doc, Analyzer analyzer)
-		{
-			return UpdateDocument(doc, analyzer, t);
-		}
-		
-		internal bool UpdateDocument(Document doc, Analyzer analyzer, Term delTerm)
-		{
-			
-			// This call is synchronized but fast
-			DocumentsWriterThreadState state = GetThreadState(doc, delTerm);
-			
-			DocState docState = state.docState;
-			docState.doc = doc;
-			docState.analyzer = analyzer;
-
-            bool doReturnFalse = false; // {{Aroush-2.9}} to handle return from finally clause
-
-			bool success = false;
-			try
-			{
-				// This call is not synchronized and does all the
-				// work
-				DocWriter perDoc;
-                try
+        
+        internal void  InitSegmentName(bool onlyDocStore)
+        {
+            lock (this)
+            {
+                if (segment == null && (!onlyDocStore || docStoreSegment == null))
                 {
-                    perDoc = state.consumer.ProcessDocument();
+                    segment = writer.NewSegmentName();
+                    System.Diagnostics.Debug.Assert(numDocsInRAM == 0);
                 }
-                finally
+                if (docStoreSegment == null)
                 {
-                    docState.Clear();
+                    docStoreSegment = segment;
+                    System.Diagnostics.Debug.Assert(numDocsInStore == 0);
                 }
-				// This call is synchronized but fast
-				FinishDocument(state, perDoc);
-				success = true;
-			}
-			finally
-			{
-				if (!success)
-				{
-					lock (this)
-					{
-						
-						if (aborting)
-						{
-							state.isIdle = true;
-							System.Threading.Monitor.PulseAll(this);
-							Abort();
-						}
-						else
-						{
-							skipDocWriter.docID = docState.docID;
-							bool success2 = false;
-							try
-							{
-								waitQueue.Add(skipDocWriter);
-								success2 = true;
-							}
-							finally
-							{
-								if (!success2)
-								{
-									state.isIdle = true;
-									System.Threading.Monitor.PulseAll(this);
-									Abort();
-									// return false; // {{Aroush-2.9}} this 'return false' is move to outside finally
-                                    doReturnFalse = true;
-								}
-							}
-
-                            if (!doReturnFalse)   // {{Aroush-2.9}} added because of the above 'return false' removal
-                            {
-								state.isIdle = true;
-								System.Threading.Monitor.PulseAll(this);
-							
-								// If this thread state had decided to flush, we
-								// must clear it so another thread can flush
-								if (state.doFlushAfter)
-								{
-									state.doFlushAfter = false;
-									flushPending = false;
-									System.Threading.Monitor.PulseAll(this);
-								}
-								
-								// Immediately mark this document as deleted
-								// since likely it was partially added.  This
-								// keeps indexing as "all or none" (atomic) when
-								// adding a document:
-								AddDeleteDocID(state.docState.docID);
-                            }
-						}
-					}
-				}
-			}
-
-            if (doReturnFalse)  // {{Aroush-2.9}} see comment abouve
-            {
-                return false;
             }
-
-			return state.doFlushAfter || TimeToFlushDeletes();
-		}
-		
-		// for testing
-		internal int GetNumBufferedDeleteTerms()
-		{
-			lock (this)
-			{
-				return deletesInRAM.numTerms; 
-			}
-		}
-		
-		// for testing
-		internal IDictionary<Term, BufferedDeletes.Num> GetBufferedDeleteTerms()
-		{
-			lock (this)
-			{
-				return deletesInRAM.terms;
-			}
-		}
-		
-		/// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
-		internal void  RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
-		{
-			lock (this)
-			{
-				if (docMaps == null)
-				// The merged segments had no deletes so docIDs did not change and we have nothing to do
-					return ;
-				MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
-				deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-				deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
-				flushedDocCount -= mapper.docShift;
-			}
-		}
-		
-		private void  WaitReady(DocumentsWriterThreadState state)
-		{
-			lock (this)
-			{
-				
-				while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting))
-				{
-					System.Threading.Monitor.Wait(this);
-				}
-				
-				if (closed)
-					throw new AlreadyClosedException("this IndexWriter is closed");
-			}
-		}
-		
-		internal bool BufferDeleteTerms(Term[] terms)
-		{
-			lock (this)
-			{
-				WaitReady(null);
-				for (int i = 0; i < terms.Length; i++)
-					AddDeleteTerm(terms[i], numDocsInRAM);
-				return TimeToFlushDeletes();
-			}
-		}
-		
-		internal bool BufferDeleteTerm(Term term)
-		{
-			lock (this)
-			{
-				WaitReady(null);
-				AddDeleteTerm(term, numDocsInRAM);
-				return TimeToFlushDeletes();
-			}
-		}
-		
-		internal bool BufferDeleteQueries(Query[] queries)
-		{
-			lock (this)
-			{
-				WaitReady(null);
-				for (int i = 0; i < queries.Length; i++)
-					AddDeleteQuery(queries[i], numDocsInRAM);
-				return TimeToFlushDeletes();
-			}
-		}
-		
-		internal bool BufferDeleteQuery(Query query)
-		{
-			lock (this)
-			{
-				WaitReady(null);
-				AddDeleteQuery(query, numDocsInRAM);
-				return TimeToFlushDeletes();
-			}
-		}
-		
-		internal bool DeletesFull()
-		{
-			lock (this)
-			{
-				return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
-			}
-		}
-		
-		internal bool DoApplyDeletes()
-		{
-			lock (this)
-			{
-				// Very similar to deletesFull(), except we don't count
-				// numBytesAlloc, because we are checking whether
-				// deletes (alone) are consuming too many resources now
-				// and thus should be applied.  We apply deletes if RAM
-				// usage is > 1/2 of our allowed RAM buffer, to prevent
-				// too-frequent flushing of a long tail of tiny segments
-				// when merges (which always apply deletes) are
-				// infrequent.
-				return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize / 2) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
-			}
-		}
-		
-		private bool TimeToFlushDeletes()
-		{
-			lock (this)
-			{
-				return (bufferIsFull || DeletesFull()) && SetFlushPending();
-			}
-		}
-
-	    internal int MaxBufferedDeleteTerms
-	    {
-	        set { this.maxBufferedDeleteTerms = value; }
-	        get { return maxBufferedDeleteTerms; }
-	    }
-
-	    internal bool HasDeletes()
-		{
-			lock (this)
-			{
-				return deletesFlushed.Any();
-			}
-		}
-		
-		internal bool ApplyDeletes(SegmentInfos infos)
-		{
-			lock (this)
-			{
-				if (!HasDeletes())
-					return false;
-				
-				if (infoStream != null)
-					Message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " + deletesFlushed.docIDs.Count + " deleted docIDs and " + deletesFlushed.queries.Count + " deleted queries on " + (+ infos.Count) + " segments.");
-				
-				int infosEnd = infos.Count;
-				
-				int docStart = 0;
-				bool any = false;
-				for (int i = 0; i < infosEnd; i++)
-				{
-					
-					// Make sure we never attempt to apply deletes to
-					// segment in external dir
-					System.Diagnostics.Debug.Assert(infos.Info(i).dir == directory);
-					
-					SegmentReader reader = writer.readerPool.Get(infos.Info(i), false);
-					try
-					{
-						any |= ApplyDeletes(reader, docStart);
-						docStart += reader.MaxDoc;
-					}
-					finally
-					{
-						writer.readerPool.Release(reader);
-					}
-				}
-				
-				deletesFlushed.Clear();
-				
-				return any;
-			}
-		}
-
-        // used only by assert
-        private Term lastDeleteTerm;
-
-        // used only by assert
-        private bool CheckDeleteTerm(Term term) 
+        }
+        
+        /// <summary>Returns a free (idle) ThreadState that may be used for
+        /// indexing this one document.  This call also pauses if a
+        /// flush is pending.  If delTerm is non-null then we
+        /// buffer this deleted term after the thread state has
+        /// been acquired. 
+        /// </summary>
+        internal DocumentsWriterThreadState GetThreadState(Document doc, Term delTerm)
+        {
+            lock (this)
+            {
+                
+                // First, find a thread state.  If this thread already
+                // has affinity to a specific ThreadState, use that one
+                // again.
+                DocumentsWriterThreadState state = threadBindings[ThreadClass.Current()];
+                if (state == null)
+                {
+                    
+                    // First time this thread has called us since last
+                    // flush.  Find the least loaded thread state:
+                    DocumentsWriterThreadState minThreadState = null;
+                    for (int i = 0; i < threadStates.Length; i++)
+                    {
+                        DocumentsWriterThreadState ts = threadStates[i];
+                        if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
+                            minThreadState = ts;
+                    }
+                    if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.Length >= MAX_THREAD_STATE))
+                    {
+                        state = minThreadState;
+                        state.numThreads++;
+                    }
+                    else
+                    {
+                        // Just create a new "private" thread state
+                        DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1 + threadStates.Length];
+                        if (threadStates.Length > 0)
+                            Array.Copy(threadStates, 0, newArray, 0, threadStates.Length);
+                        state = newArray[threadStates.Length] = new DocumentsWriterThreadState(this);
+                        threadStates = newArray;
+                    }
+                    threadBindings[ThreadClass.Current()] = state;
+                }
+                
+                // Next, wait until my thread state is idle (in case
+                // it's shared with other threads) and for threads to
+                // not be paused nor a flush pending:
+                WaitReady(state);
+                
+                // Allocate segment name if this is the first doc since
+                // last flush:
+                InitSegmentName(false);
+                
+                state.isIdle = false;
+                
+                bool success = false;
+                try
+                {
+                    state.docState.docID = nextDocID;
+                    
+                    System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init start"));
+                    
+                    if (delTerm != null)
+                    {
+                        AddDeleteTerm(delTerm, state.docState.docID);
+                        state.doFlushAfter = TimeToFlushDeletes();
+                    }
+                    
+                    System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init after delTerm"));
+                    
+                    nextDocID++;
+                    numDocsInRAM++;
+                    
+                    // We must at this point commit to flushing to ensure we
+                    // always get N docs when we flush by doc count, even if
+                    // > 1 thread is adding documents:
+                    if (!flushPending && maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH && numDocsInRAM >= maxBufferedDocs)
+                    {
+                        flushPending = true;
+                        state.doFlushAfter = true;
+                    }
+                    
+                    success = true;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        // Forcefully idle this ThreadState:
+                        state.isIdle = true;
+                        System.Threading.Monitor.PulseAll(this);
+                        if (state.doFlushAfter)
+                        {
+                            state.doFlushAfter = false;
+                            flushPending = false;
+                        }
+                    }
+                }
+                
+                return state;
+            }
+        }
+        
+        /// <summary>Returns true if the caller (IndexWriter) should now
+        /// flush. 
+        /// </summary>
+        internal bool AddDocument(Document doc, Analyzer analyzer)
+        {
+            return UpdateDocument(doc, analyzer, null);
+        }
+        
+        internal bool UpdateDocument(Term t, Document doc, Analyzer analyzer)
+        {
+            return UpdateDocument(doc, analyzer, t);
+        }
+        
+        internal bool UpdateDocument(Document doc, Analyzer analyzer, Term delTerm)
+        {
+            
+            // This call is synchronized but fast
+            DocumentsWriterThreadState state = GetThreadState(doc, delTerm);
+            
+            DocState docState = state.docState;
+            docState.doc = doc;
+            docState.analyzer = analyzer;
+
+            bool doReturnFalse = false; // {{Aroush-2.9}} to handle return from finally clause
+
+            bool success = false;
+            try
+            {
+                // This call is not synchronized and does all the
+                // work
+                DocWriter perDoc;
+                try
+                {
+                    perDoc = state.consumer.ProcessDocument();
+                }
+                finally
+                {
+                    docState.Clear();
+                }
+                // This call is synchronized but fast
+                FinishDocument(state, perDoc);
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    lock (this)
+                    {
+                        
+                        if (aborting)
+                        {
+                            state.isIdle = true;
+                            System.Threading.Monitor.PulseAll(this);
+                            Abort();
+                        }
+                        else
+                        {
+                            skipDocWriter.docID = docState.docID;
+                            bool success2 = false;
+                            try
+                            {
+                                waitQueue.Add(skipDocWriter);
+                                success2 = true;
+                            }
+                            finally
+                            {
+                                if (!success2)
+                                {
+                                    state.isIdle = true;
+                                    System.Threading.Monitor.PulseAll(this);
+                                    Abort();
+                                    // return false; // {{Aroush-2.9}} this 'return false' is move to outside finally
+                                    doReturnFalse = true;
+                                }
+                            }
+
+                            if (!doReturnFalse)   // {{Aroush-2.9}} added because of the above 'return false' removal
+                            {
+                                state.isIdle = true;
+                                System.Threading.Monitor.PulseAll(this);
+                            
+                                // If this thread state had decided to flush, we
+                                // must clear it so another thread can flush
+                                if (state.doFlushAfter)
+                                {
+                                    state.doFlushAfter = false;
+                                    flushPending = false;
+                                    System.Threading.Monitor.PulseAll(this);
+                                }
+                                
+                                // Immediately mark this document as deleted
+                                // since likely it was partially added.  This
+                                // keeps indexing as "all or none" (atomic) when
+                                // adding a document:
+                                AddDeleteDocID(state.docState.docID);
+                            }
+                        }
+                    }
+                }
+            }
+
+            if (doReturnFalse)  // {{Aroush-2.9}} see comment abouve
+            {
+                return false;
+            }
+
+            return state.doFlushAfter || TimeToFlushDeletes();
+        }
+        
+        // for testing
+        internal int GetNumBufferedDeleteTerms()
+        {
+            lock (this)
+            {
+                return deletesInRAM.numTerms; 
+            }
+        }
+        
+        // for testing
+        internal IDictionary<Term, BufferedDeletes.Num> GetBufferedDeleteTerms()
+        {
+            lock (this)
+            {
+                return deletesInRAM.terms;
+            }
+        }
+        
+        /// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
+        internal void  RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
+        {
+            lock (this)
+            {
+                if (docMaps == null)
+                // The merged segments had no deletes so docIDs did not change and we have nothing to do
+                    return ;
+                MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
+                deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+                deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+                flushedDocCount -= mapper.docShift;
+            }
+        }
+        
+        private void  WaitReady(DocumentsWriterThreadState state)
+        {
+            lock (this)
+            {
+                
+                while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting))
+                {
+                    System.Threading.Monitor.Wait(this);
+                }
+                
+                if (closed)
+                    throw new AlreadyClosedException("this IndexWriter is closed");
+            }
+        }
+        
+        internal bool BufferDeleteTerms(Term[] terms)
+        {
+            lock (this)
+            {
+                WaitReady(null);
+                for (int i = 0; i < terms.Length; i++)
+                    AddDeleteTerm(terms[i], numDocsInRAM);
+                return TimeToFlushDeletes();
+            }
+        }
+        
+        internal bool BufferDeleteTerm(Term term)
+        {
+            lock (this)
+            {
+                WaitReady(null);
+                AddDeleteTerm(term, numDocsInRAM);
+                return TimeToFlushDeletes();
+            }
+        }
+        
+        internal bool BufferDeleteQueries(Query[] queries)
+        {
+            lock (this)
+            {
+                WaitReady(null);
+                for (int i = 0; i < queries.Length; i++)
+                    AddDeleteQuery(queries[i], numDocsInRAM);
+                return TimeToFlushDeletes();
+            }
+        }
+        
+        internal bool BufferDeleteQuery(Query query)
+        {
+            lock (this)
+            {
+                WaitReady(null);
+                AddDeleteQuery(query, numDocsInRAM);
+                return TimeToFlushDeletes();
+            }
+        }
+        
+        internal bool DeletesFull()
+        {
+            lock (this)
+            {
+                return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
+            }
+        }
+        
+        internal bool DoApplyDeletes()
+        {
+            lock (this)
+            {
+                // Very similar to deletesFull(), except we don't count
+                // numBytesAlloc, because we are checking whether
+                // deletes (alone) are consuming too many resources now
+                // and thus should be applied.  We apply deletes if RAM
+                // usage is > 1/2 of our allowed RAM buffer, to prevent
+                // too-frequent flushing of a long tail of tiny segments
+                // when merges (which always apply deletes) are
+                // infrequent.
+                return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize / 2) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
+            }
+        }
+        
+        private bool TimeToFlushDeletes()
+        {
+            lock (this)
+            {
+                return (bufferIsFull || DeletesFull()) && SetFlushPending();
+            }
+        }
+
+        internal int MaxBufferedDeleteTerms
+        {
+            set { this.maxBufferedDeleteTerms = value; }
+            get { return maxBufferedDeleteTerms; }
+        }
+
+        internal bool HasDeletes()
+        {
+            lock (this)
+            {
+                return deletesFlushed.Any();
+            }
+        }
+        
+        internal bool ApplyDeletes(SegmentInfos infos)
+        {
+            lock (this)
+            {
+                if (!HasDeletes())
+                    return false;
+                
+                if (infoStream != null)
+                    Message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " + deletesFlushed.docIDs.Count + " deleted docIDs and " + deletesFlushed.queries.Count + " deleted queries on " + (+ infos.Count) + " segments.");
+                
+                int infosEnd = infos.Count;
+                
+                int docStart = 0;
+                bool any = false;
+                for (int i = 0; i < infosEnd; i++)
+                {
+                    
+                    // Make sure we never attempt to apply deletes to
+                    // segment in external dir
+                    System.Diagnostics.Debug.Assert(infos.Info(i).dir == directory);
+                    
+                    SegmentReader reader = writer.readerPool.Get(infos.Info(i), false);
+                    try
+                    {
+                        any |= ApplyDeletes(reader, docStart);
+                        docStart += reader.MaxDoc;
+                    }
+                    finally
+                    {
+                        writer.readerPool.Release(reader);
+                    }
+                }
+                
+                deletesFlushed.Clear();
+                
+                return any;
+            }
+        }
+
+        // used only by assert
+        private Term lastDeleteTerm;
+
+        // used only by assert
+        private bool CheckDeleteTerm(Term term) 
         {
             if (term != null) {
                 System.Diagnostics.Debug.Assert(lastDeleteTerm == null || term.CompareTo(lastDeleteTerm) > 0, "lastTerm=" + lastDeleteTerm + " vs term=" + term);
@@ -1292,330 +1292,330 @@ namespace Lucene.Net.Index
             lastDeleteTerm = term;
             return true;
         }
-		
-		// Apply buffered delete terms, queries and docIDs to the
-		// provided reader
-		private bool ApplyDeletes(IndexReader reader, int docIDStart)
-		{
-			lock (this)
-			{
-				int docEnd = docIDStart + reader.MaxDoc;
-				bool any = false;
-				
+        
+        // Apply buffered delete terms, queries and docIDs to the
+        // provided reader
+        private bool ApplyDeletes(IndexReader reader, int docIDStart)
+        {
+            lock (this)
+            {
+                int docEnd = docIDStart + reader.MaxDoc;
+                bool any = false;
+                
                 System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
 
-				// Delete by term
-				TermDocs docs = reader.TermDocs();
-				try
-				{
-					foreach(KeyValuePair<Term, BufferedDeletes.Num> entry in deletesFlushed.terms)
-					{
-						Term term = entry.Key;
-						// LUCENE-2086: we should be iterating a TreeMap,
+                // Delete by term
+                TermDocs docs = reader.TermDocs();
+                try
+                {
+                    foreach(KeyValuePair<Term, BufferedDeletes.Num> entry in deletesFlushed.terms)
+                    {
+                        Term term = entry.Key;
+                        // LUCENE-2086: we should be iterating a TreeMap,
                         // here, so terms better be in order:
                         System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
-						docs.Seek(term);
-						int limit = entry.Value.GetNum();
-						while (docs.Next())
-						{
-							int docID = docs.Doc;
-							if (docIDStart + docID >= limit)
-								break;
-							reader.DeleteDocument(docID);
-							any = true;
-						}
-					}
-				}
-				finally
-				{
-					docs.Close();
-				}
-				
-				// Delete by docID
-				foreach(int docIdInt in deletesFlushed.docIDs)
-				{
-				    int docID = docIdInt;
-					if (docID >= docIDStart && docID < docEnd)
-					{
-						reader.DeleteDocument(docID - docIDStart);
-						any = true;
-					}
-				}
-				
-				// Delete by query
-				IndexSearcher searcher = new IndexSearcher(reader);
-				foreach(KeyValuePair<Query, int> entry in deletesFlushed.queries)
-				{
-					Query query = (Query) entry.Key;
-					int limit = (int)entry.Value;
-					Weight weight = query.Weight(searcher);
-					Scorer scorer = weight.Scorer(reader, true, false);
-					if (scorer != null)
-					{
-						while (true)
-						{
-							int doc = scorer.NextDoc();
-							if (((long) docIDStart) + doc >= limit)
-								break;
-							reader.DeleteDocument(doc);
-							any = true;
-						}
-					}
-				}
-				searcher.Close();
-				return any;
-			}
-		}
-		
-		// Buffer a term in bufferedDeleteTerms, which records the
-		// current number of documents buffered in ram so that the
-		// delete term will be applied to those documents as well
-		// as the disk segments.
-		private void  AddDeleteTerm(Term term, int docCount)
-		{
-			lock (this)
-			{
-				BufferedDeletes.Num num = deletesInRAM.terms[term];
-				int docIDUpto = flushedDocCount + docCount;
-				if (num == null)
-					deletesInRAM.terms[term] = new BufferedDeletes.Num(docIDUpto);
-				else
-					num.SetNum(docIDUpto);
-				deletesInRAM.numTerms++;
-				
-				deletesInRAM.AddBytesUsed(BYTES_PER_DEL_TERM + term.Text.Length * CHAR_NUM_BYTE);
-			}
-		}
-		
-		// Buffer a specific docID for deletion.  Currently only
-		// used when we hit a exception when adding a document
-		private void  AddDeleteDocID(int docID)
-		{
-			lock (this)
-			{
-			    deletesInRAM.docIDs.Add(flushedDocCount + docID);
+                        docs.Seek(term);
+                        int limit = entry.Value.GetNum();
+                        while (docs.Next())
+                        {
+                            int docID = docs.Doc;
+                            if (docIDStart + docID >= limit)
+                                break;
+                            reader.DeleteDocument(docID);
+                            any = true;
+                        }
+                    }
+                }
+                finally
+                {
+                    docs.Close();
+                }
+                
+                // Delete by docID
+                foreach(int docIdInt in deletesFlushed.docIDs)
+                {
+                    int docID = docIdInt;
+                    if (docID >= docIDStart && docID < docEnd)
+                    {
+                        reader.DeleteDocument(docID - docIDStart);
+                        any = true;
+                    }
+                }
+                
+                // Delete by query
+                IndexSearcher searcher = new IndexSearcher(reader);
+                foreach(KeyValuePair<Query, int> entry in deletesFlushed.queries)
+                {
+                    Query query = (Query) entry.Key;
+                    int limit = (int)entry.Value;
+                    Weight weight = query.Weight(searcher);
+                    Scorer scorer = weight.Scorer(reader, true, false);
+                    if (scorer != null)
+                    {
+                        while (true)
+                        {
+                            int doc = scorer.NextDoc();
+                            if (((long) docIDStart) + doc >= limit)
+                                break;
+                            reader.DeleteDocument(doc);
+                            any = true;
+                        }
+                    }
+                }
+                searcher.Close();
+                return any;
+            }
+        }
+        
+        // Buffer a term in bufferedDeleteTerms, which records the
+        // current number of documents buffered in ram so that the
+        // delete term will be applied to those documents as well
+        // as the disk segments.
+        private void  AddDeleteTerm(Term term, int docCount)
+        {
+            lock (this)
+            {
+                BufferedDeletes.Num num = deletesInRAM.terms[term];
+                int docIDUpto = flushedDocCount + docCount;
+                if (num == null)
+                    deletesInRAM.terms[term] = new BufferedDeletes.Num(docIDUpto);
+                else
+                    num.SetNum(docIDUpto);
+                deletesInRAM.numTerms++;
+                
+                deletesInRAM.AddBytesUsed(BYTES_PER_DEL_TERM + term.Text.Length * CHAR_NUM_BYTE);
+            }
+        }
+        
+        // Buffer a specific docID for deletion.  Currently only
+        // used when we hit a exception when adding a document
+        private void  AddDeleteDocID(int docID)
+        {
+            lock (this)
+            {
+                deletesInRAM.docIDs.Add(flushedDocCount + docID);
                 deletesInRAM.AddBytesUsed(BYTES_PER_DEL_DOCID);
-			}
-		}
-		
-		private void  AddDeleteQuery(Query query, int docID)
-		{
-			lock (this)
-			{
-				deletesInRAM.queries[query] = flushedDocCount + docID;
-				deletesInRAM.AddBytesUsed(BYTES_PER_DEL_QUERY);
-			}
-		}
-		
-		internal bool DoBalanceRAM()
-		{
-			lock (this)
-			{
-				return ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger);
-			}
-		}
-		
-		/// <summary>Does the synchronized work to finish/flush the
-		/// inverted document. 
-		/// </summary>
-		private void  FinishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter)
-		{
-			
-			if (DoBalanceRAM())
-			// Must call this w/o holding synchronized(this) else
-			// we'll hit deadlock:
-				BalanceRAM();
-			
-			lock (this)
-			{
-				
-				System.Diagnostics.Debug.Assert(docWriter == null || docWriter.docID == perThread.docState.docID);
-				
-				if (aborting)
-				{
-					
-					// We are currently aborting, and another thread is
-					// waiting for me to become idle.  We just forcefully
-					// idle this threadState; it will be fully reset by
-					// abort()
-					if (docWriter != null)
-						try
-						{
-							docWriter.Abort();
-						}
-						catch (System.Exception)
-						{
-						}
-					
-					perThread.isIdle = true;
-					System.Threading.Monitor.PulseAll(this);
-					return ;
-				}
-				
-				bool doPause;
-				
-				if (docWriter != null)
-					doPause = waitQueue.Add(docWriter);
-				else
-				{
-					skipDocWriter.docID = perThread.docState.docID;
-					doPause = waitQueue.Add(skipDocWriter);
-				}
-				
-				if (doPause)
-					WaitForWaitQueue();
-				
-				if (bufferIsFull && !flushPending)
-				{
-					flushPending = true;
-					perThread.doFlushAfter = true;
-				}
-				
-				perThread.isIdle = true;
-				System.Threading.Monitor.PulseAll(this);
-			}
-		}
-		
-		internal void  WaitForWaitQueue()
-		{
-			lock (this)
-			{
-				do 
-				{
-					System.Threading.Monitor.Wait(this);
-				}
-				while (!waitQueue.DoResume());
-			}
-		}
-		
-		internal class S

<TRUNCATED>

[08/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IntBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IntBlockPool.cs b/src/core/Index/IntBlockPool.cs
index 5fbee30..5b589e5 100644
--- a/src/core/Index/IntBlockPool.cs
+++ b/src/core/Index/IntBlockPool.cs
@@ -19,61 +19,61 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class IntBlockPool
-	{
-		private void  InitBlock()
-		{
-			intUpto = DocumentsWriter.INT_BLOCK_SIZE;
-		}
-		
-		public int[][] buffers = new int[10][];
-		
-		internal int bufferUpto = - 1; // Which buffer we are upto
-		public int intUpto; // Where we are in head buffer
-		
-		public int[] buffer; // Current head buffer
-		public int intOffset = - DocumentsWriter.INT_BLOCK_SIZE; // Current head offset
-		
-		private DocumentsWriter docWriter;
-		internal bool trackAllocations;
-		
-		public IntBlockPool(DocumentsWriter docWriter, bool trackAllocations)
-		{
-			InitBlock();
-			this.docWriter = docWriter;
-			this.trackAllocations = trackAllocations;
-		}
-		
-		public void  Reset()
-		{
-			if (bufferUpto != - 1)
-			{
-				if (bufferUpto > 0)
-				// Recycle all but the first buffer
-					docWriter.RecycleIntBlocks(buffers, 1, 1 + bufferUpto);
-				
-				// Reuse first buffer
-				bufferUpto = 0;
-				intUpto = 0;
-				intOffset = 0;
-				buffer = buffers[0];
-			}
-		}
-		
-		public void  NextBuffer()
-		{
-			if (1 + bufferUpto == buffers.Length)
-			{
-				int[][] newBuffers = new int[(int) (buffers.Length * 1.5)][];
-				Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
-				buffers = newBuffers;
-			}
-			buffer = buffers[1 + bufferUpto] = docWriter.GetIntBlock(trackAllocations);
-			bufferUpto++;
-			
-			intUpto = 0;
-			intOffset += DocumentsWriter.INT_BLOCK_SIZE;
-		}
-	}
+    
+    sealed class IntBlockPool
+    {
+        private void  InitBlock()
+        {
+            intUpto = DocumentsWriter.INT_BLOCK_SIZE;
+        }
+        
+        public int[][] buffers = new int[10][];
+        
+        internal int bufferUpto = - 1; // Which buffer we are upto
+        public int intUpto; // Where we are in head buffer
+        
+        public int[] buffer; // Current head buffer
+        public int intOffset = - DocumentsWriter.INT_BLOCK_SIZE; // Current head offset
+        
+        private DocumentsWriter docWriter;
+        internal bool trackAllocations;
+        
+        public IntBlockPool(DocumentsWriter docWriter, bool trackAllocations)
+        {
+            InitBlock();
+            this.docWriter = docWriter;
+            this.trackAllocations = trackAllocations;
+        }
+        
+        public void  Reset()
+        {
+            if (bufferUpto != - 1)
+            {
+                if (bufferUpto > 0)
+                // Recycle all but the first buffer
+                    docWriter.RecycleIntBlocks(buffers, 1, 1 + bufferUpto);
+                
+                // Reuse first buffer
+                bufferUpto = 0;
+                intUpto = 0;
+                intOffset = 0;
+                buffer = buffers[0];
+            }
+        }
+        
+        public void  NextBuffer()
+        {
+            if (1 + bufferUpto == buffers.Length)
+            {
+                int[][] newBuffers = new int[(int) (buffers.Length * 1.5)][];
+                Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                buffers = newBuffers;
+            }
+            buffer = buffers[1 + bufferUpto] = docWriter.GetIntBlock(trackAllocations);
+            bufferUpto++;
+            
+            intUpto = 0;
+            intOffset += DocumentsWriter.INT_BLOCK_SIZE;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocConsumer.cs b/src/core/Index/InvertedDocConsumer.cs
index bb9b2f8..2be2a70 100644
--- a/src/core/Index/InvertedDocConsumer.cs
+++ b/src/core/Index/InvertedDocConsumer.cs
@@ -20,34 +20,34 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocConsumer
-	{
-		
-		/// <summary>Add a new thread </summary>
-		internal abstract InvertedDocConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
-		
-		/// <summary>Abort (called after hitting AbortException) </summary>
-		public abstract void  Abort();
+    
+    abstract class InvertedDocConsumer
+    {
+        
+        /// <summary>Add a new thread </summary>
+        internal abstract InvertedDocConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
+        
+        /// <summary>Abort (called after hitting AbortException) </summary>
+        public abstract void  Abort();
 
-	    /// <summary>Flush a new segment </summary>
-	    internal abstract void Flush(
-	        IDictionary<InvertedDocConsumerPerThread, ICollection<InvertedDocConsumerPerField>> threadsAndFields,
-	        SegmentWriteState state);
-		
-		/// <summary>Close doc stores </summary>
-		internal abstract void  CloseDocStore(SegmentWriteState state);
-		
-		/// <summary>Attempt to free RAM, returning true if any RAM was
-		/// freed 
-		/// </summary>
-		public abstract bool FreeRAM();
-		
-		internal FieldInfos fieldInfos;
-		
-		internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			this.fieldInfos = fieldInfos;
-		}
-	}
+        /// <summary>Flush a new segment </summary>
+        internal abstract void Flush(
+            IDictionary<InvertedDocConsumerPerThread, ICollection<InvertedDocConsumerPerField>> threadsAndFields,
+            SegmentWriteState state);
+        
+        /// <summary>Close doc stores </summary>
+        internal abstract void  CloseDocStore(SegmentWriteState state);
+        
+        /// <summary>Attempt to free RAM, returning true if any RAM was
+        /// freed 
+        /// </summary>
+        public abstract bool FreeRAM();
+        
+        internal FieldInfos fieldInfos;
+        
+        internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocConsumerPerField.cs b/src/core/Index/InvertedDocConsumerPerField.cs
index 471d9b7..200afed 100644
--- a/src/core/Index/InvertedDocConsumerPerField.cs
+++ b/src/core/Index/InvertedDocConsumerPerField.cs
@@ -20,27 +20,27 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocConsumerPerField
-	{
-		
-		// Called once per field, and is given all Fieldable
-		// occurrences for this field in the document.  Return
-		// true if you wish to see inverted tokens for these
-		// fields:
-		internal abstract bool Start(IFieldable[] fields, int count);
-		
-		// Called before a field instance is being processed
-		internal abstract void  Start(IFieldable field);
-		
-		// Called once per inverted token
-		internal abstract void  Add();
-		
-		// Called once per field per document, after all Fieldable
-		// occurrences are inverted
-		internal abstract void  Finish();
-		
-		// Called on hitting an aborting exception
-		public abstract void  Abort();
-	}
+    
+    abstract class InvertedDocConsumerPerField
+    {
+        
+        // Called once per field, and is given all Fieldable
+        // occurrences for this field in the document.  Return
+        // true if you wish to see inverted tokens for these
+        // fields:
+        internal abstract bool Start(IFieldable[] fields, int count);
+        
+        // Called before a field instance is being processed
+        internal abstract void  Start(IFieldable field);
+        
+        // Called once per inverted token
+        internal abstract void  Add();
+        
+        // Called once per field per document, after all Fieldable
+        // occurrences are inverted
+        internal abstract void  Finish();
+        
+        // Called on hitting an aborting exception
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocConsumerPerThread.cs b/src/core/Index/InvertedDocConsumerPerThread.cs
index 49ed8df..5e0b6d1 100644
--- a/src/core/Index/InvertedDocConsumerPerThread.cs
+++ b/src/core/Index/InvertedDocConsumerPerThread.cs
@@ -19,12 +19,12 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocConsumerPerThread
-	{
-		public abstract void  StartDocument();
-		internal abstract InvertedDocConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
-		public abstract DocumentsWriter.DocWriter FinishDocument();
-		public abstract void  Abort();
-	}
+    
+    abstract class InvertedDocConsumerPerThread
+    {
+        public abstract void  StartDocument();
+        internal abstract InvertedDocConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+        public abstract DocumentsWriter.DocWriter FinishDocument();
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocEndConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocEndConsumer.cs b/src/core/Index/InvertedDocEndConsumer.cs
index fb0a69e..f9e9548 100644
--- a/src/core/Index/InvertedDocEndConsumer.cs
+++ b/src/core/Index/InvertedDocEndConsumer.cs
@@ -20,13 +20,13 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocEndConsumer
-	{
-		public abstract InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
+    
+    abstract class InvertedDocEndConsumer
+    {
+        public abstract InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
         public abstract void Flush(IDictionary<InvertedDocEndConsumerPerThread, ICollection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state);
-		internal abstract void  CloseDocStore(SegmentWriteState state);
-		public abstract void  Abort();
-		internal abstract void  SetFieldInfos(FieldInfos fieldInfos);
-	}
+        internal abstract void  CloseDocStore(SegmentWriteState state);
+        public abstract void  Abort();
+        internal abstract void  SetFieldInfos(FieldInfos fieldInfos);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocEndConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocEndConsumerPerField.cs b/src/core/Index/InvertedDocEndConsumerPerField.cs
index dfad1c9..2e82ad4 100644
--- a/src/core/Index/InvertedDocEndConsumerPerField.cs
+++ b/src/core/Index/InvertedDocEndConsumerPerField.cs
@@ -19,10 +19,10 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocEndConsumerPerField
-	{
-		internal abstract void  Finish();
-		internal abstract void  Abort();
-	}
+    
+    abstract class InvertedDocEndConsumerPerField
+    {
+        internal abstract void  Finish();
+        internal abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/InvertedDocEndConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/InvertedDocEndConsumerPerThread.cs b/src/core/Index/InvertedDocEndConsumerPerThread.cs
index 2f4fb5c..4721566 100644
--- a/src/core/Index/InvertedDocEndConsumerPerThread.cs
+++ b/src/core/Index/InvertedDocEndConsumerPerThread.cs
@@ -19,12 +19,12 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class InvertedDocEndConsumerPerThread
-	{
-		internal abstract void  StartDocument();
-		internal abstract InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
-		internal abstract void  FinishDocument();
-		internal abstract void  Abort();
-	}
+    
+    abstract class InvertedDocEndConsumerPerThread
+    {
+        internal abstract void  StartDocument();
+        internal abstract InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+        internal abstract void  FinishDocument();
+        internal abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
index 3775de1..7cb928b 100644
--- a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
+++ b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
@@ -19,33 +19,33 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This <see cref="IndexDeletionPolicy" /> implementation that
-	/// keeps only the most recent commit and immediately removes
-	/// all prior commits after a new commit is done.  This is
-	/// the default deletion policy.
-	/// </summary>
-	
-	public sealed class KeepOnlyLastCommitDeletionPolicy : IndexDeletionPolicy
-	{
-		
-		/// <summary> Deletes all commits except the most recent one.</summary>
-		public void  OnInit<T>(IList<T> commits) where T : IndexCommit
-		{
-			// Note that commits.size() should normally be 1:
-			OnCommit(commits);
-		}
-		
-		/// <summary> Deletes all commits except the most recent one.</summary>
-		public void  OnCommit<T>(IList<T> commits) where T : IndexCommit
-		{
-			// Note that commits.size() should normally be 2 (if not
-			// called by onInit above):
-			int size = commits.Count;
-			for (int i = 0; i < size - 1; i++)
-			{
-				commits[i].Delete();
-			}
-		}
-	}
+    
+    /// <summary> This <see cref="IndexDeletionPolicy" /> implementation that
+    /// keeps only the most recent commit and immediately removes
+    /// all prior commits after a new commit is done.  This is
+    /// the default deletion policy.
+    /// </summary>
+    
+    public sealed class KeepOnlyLastCommitDeletionPolicy : IndexDeletionPolicy
+    {
+        
+        /// <summary> Deletes all commits except the most recent one.</summary>
+        public void  OnInit<T>(IList<T> commits) where T : IndexCommit
+        {
+            // Note that commits.size() should normally be 1:
+            OnCommit(commits);
+        }
+        
+        /// <summary> Deletes all commits except the most recent one.</summary>
+        public void  OnCommit<T>(IList<T> commits) where T : IndexCommit
+        {
+            // Note that commits.size() should normally be 2 (if not
+            // called by onInit above):
+            int size = commits.Count;
+            for (int i = 0; i < size - 1; i++)
+            {
+                commits[i].Delete();
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/LogByteSizeMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/LogByteSizeMergePolicy.cs b/src/core/Index/LogByteSizeMergePolicy.cs
index 5d5c952..5f1b13d 100644
--- a/src/core/Index/LogByteSizeMergePolicy.cs
+++ b/src/core/Index/LogByteSizeMergePolicy.cs
@@ -19,34 +19,34 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is a <see cref="LogMergePolicy" /> that measures size of a
-	/// segment as the total byte size of the segment's files. 
-	/// </summary>
-	public class LogByteSizeMergePolicy : LogMergePolicy
-	{
-		
-		/// <seealso cref="MinMergeMB">
-		/// </seealso>
-		public const double DEFAULT_MIN_MERGE_MB = 1.6;
-		
-		/// <summary>Default maximum segment size.  A segment of this size</summary>
-		/// <seealso cref="MaxMergeMB">
-		/// </seealso>
-		public static readonly long DEFAULT_MAX_MERGE_MB = long.MaxValue;
-		
-		public LogByteSizeMergePolicy(IndexWriter writer)
+    
+    /// <summary>This is a <see cref="LogMergePolicy" /> that measures size of a
+    /// segment as the total byte size of the segment's files. 
+    /// </summary>
+    public class LogByteSizeMergePolicy : LogMergePolicy
+    {
+        
+        /// <seealso cref="MinMergeMB">
+        /// </seealso>
+        public const double DEFAULT_MIN_MERGE_MB = 1.6;
+        
+        /// <summary>Default maximum segment size.  A segment of this size</summary>
+        /// <seealso cref="MaxMergeMB">
+        /// </seealso>
+        public static readonly long DEFAULT_MAX_MERGE_MB = long.MaxValue;
+        
+        public LogByteSizeMergePolicy(IndexWriter writer)
             : base(writer)
-		{
-			minMergeSize = (long) (DEFAULT_MIN_MERGE_MB * 1024 * 1024);
+        {
+            minMergeSize = (long) (DEFAULT_MIN_MERGE_MB * 1024 * 1024);
             //mgarski - the line below causes an overflow in .NET, resulting in a negative number...
-			//maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB * 1024 * 1024);
+            //maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB * 1024 * 1024);
             maxMergeSize = DEFAULT_MAX_MERGE_MB;
-		}
-		protected internal override long Size(SegmentInfo info)
-		{
-			return SizeBytes(info);
-		}
+        }
+        protected internal override long Size(SegmentInfo info)
+        {
+            return SizeBytes(info);
+        }
 
         protected override void Dispose(bool disposing)
         {
@@ -54,46 +54,46 @@ namespace Lucene.Net.Index
         }
 
 
-	    /// <summary><p/>Gets or sets the largest segment (measured by total
-	    /// byte size of the segment's files, in MB) that may be
-	    /// merged with other segments.  Small values (e.g., less
-	    /// than 50 MB) are best for interactive indexing, as this
-	    /// limits the length of pauses while indexing to a few
-	    /// seconds.  Larger values are best for batched indexing
-	    /// and speedier searches.<p/>
-	    /// 
-	    /// <p/>Note that <see cref="IndexWriter.MaxMergeDocs" /> is also
-	    /// used to check whether a segment is too large for
-	    /// merging (it's either or).<p/>
-	    /// </summary>
-	    public virtual double MaxMergeMB
-	    {
-	        get { return maxMergeSize/1024d/1024d; }
-	        set
-	        {
-	            //mgarski: java gracefully overflows to Int64.MaxValue, .NET to MinValue...
-	            maxMergeSize = (long) (value*1024*1024);
-	            if (maxMergeSize < 0)
-	            {
-	                maxMergeSize = DEFAULT_MAX_MERGE_MB;
-	            }
-	        }
-	    }
+        /// <summary><p/>Gets or sets the largest segment (measured by total
+        /// byte size of the segment's files, in MB) that may be
+        /// merged with other segments.  Small values (e.g., less
+        /// than 50 MB) are best for interactive indexing, as this
+        /// limits the length of pauses while indexing to a few
+        /// seconds.  Larger values are best for batched indexing
+        /// and speedier searches.<p/>
+        /// 
+        /// <p/>Note that <see cref="IndexWriter.MaxMergeDocs" /> is also
+        /// used to check whether a segment is too large for
+        /// merging (it's either or).<p/>
+        /// </summary>
+        public virtual double MaxMergeMB
+        {
+            get { return maxMergeSize/1024d/1024d; }
+            set
+            {
+                //mgarski: java gracefully overflows to Int64.MaxValue, .NET to MinValue...
+                maxMergeSize = (long) (value*1024*1024);
+                if (maxMergeSize < 0)
+                {
+                    maxMergeSize = DEFAULT_MAX_MERGE_MB;
+                }
+            }
+        }
 
-	    /// <summary>Gets or sets the minimum size for the lowest level segments.
-	    /// Any segments below this size are considered to be on
-	    /// the same level (even if they vary drastically in size)
-	    /// and will be merged whenever there are mergeFactor of
-	    /// them.  This effectively truncates the "long tail" of
-	    /// small segments that would otherwise be created into a
-	    /// single level.  If you set this too large, it could
-	    /// greatly increase the merging cost during indexing (if
-	    /// you flush many small segments). 
-	    /// </summary>
-	    public virtual double MinMergeMB
-	    {
-	        get { return ((double) minMergeSize)/1024/1024; }
-	        set { minMergeSize = (long) (value*1024*1024); }
-	    }
-	}
+        /// <summary>Gets or sets the minimum size for the lowest level segments.
+        /// Any segments below this size are considered to be on
+        /// the same level (even if they vary drastically in size)
+        /// and will be merged whenever there are mergeFactor of
+        /// them.  This effectively truncates the "long tail" of
+        /// small segments that would otherwise be created into a
+        /// single level.  If you set this too large, it could
+        /// greatly increase the merging cost during indexing (if
+        /// you flush many small segments). 
+        /// </summary>
+        public virtual double MinMergeMB
+        {
+            get { return ((double) minMergeSize)/1024/1024; }
+            set { minMergeSize = (long) (value*1024*1024); }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/LogDocMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/LogDocMergePolicy.cs b/src/core/Index/LogDocMergePolicy.cs
index 55ee407..610b890 100644
--- a/src/core/Index/LogDocMergePolicy.cs
+++ b/src/core/Index/LogDocMergePolicy.cs
@@ -19,51 +19,51 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is a <see cref="LogMergePolicy" /> that measures size of a
-	/// segment as the number of documents (not taking deletions
-	/// into account). 
-	/// </summary>
-	
-	public class LogDocMergePolicy : LogMergePolicy
-	{
-		
-		/// <seealso cref="MinMergeDocs">
-		/// </seealso>
-		public const int DEFAULT_MIN_MERGE_DOCS = 1000;
-		
-		public LogDocMergePolicy(IndexWriter writer):base(writer)
-		{
-			minMergeSize = DEFAULT_MIN_MERGE_DOCS;
-			
-			// maxMergeSize is never used by LogDocMergePolicy; set
-			// it to Long.MAX_VALUE to disable it
-			maxMergeSize = System.Int64.MaxValue;
-		}
-		protected internal override long Size(SegmentInfo info)
-		{
-			return SizeDocs(info);
-		}
+    
+    /// <summary>This is a <see cref="LogMergePolicy" /> that measures size of a
+    /// segment as the number of documents (not taking deletions
+    /// into account). 
+    /// </summary>
+    
+    public class LogDocMergePolicy : LogMergePolicy
+    {
+        
+        /// <seealso cref="MinMergeDocs">
+        /// </seealso>
+        public const int DEFAULT_MIN_MERGE_DOCS = 1000;
+        
+        public LogDocMergePolicy(IndexWriter writer):base(writer)
+        {
+            minMergeSize = DEFAULT_MIN_MERGE_DOCS;
+            
+            // maxMergeSize is never used by LogDocMergePolicy; set
+            // it to Long.MAX_VALUE to disable it
+            maxMergeSize = System.Int64.MaxValue;
+        }
+        protected internal override long Size(SegmentInfo info)
+        {
+            return SizeDocs(info);
+        }
 
-	    protected override void Dispose(bool disposing)
+        protected override void Dispose(bool disposing)
         {
             // Do nothing.
         }
 
-	    /// <summary>Gets or sets the minimum size for the lowest level segments.
-	    /// Any segments below this size are considered to be on
-	    /// the same level (even if they vary drastically in size)
-	    /// and will be merged whenever there are mergeFactor of
-	    /// them.  This effectively truncates the "long tail" of
-	    /// small segments that would otherwise be created into a
-	    /// single level.  If you set this too large, it could
-	    /// greatly increase the merging cost during indexing (if
-	    /// you flush many small segments). 
-	    /// </summary>
-	    public virtual int MinMergeDocs
-	    {
-	        get { return (int) minMergeSize; }
-	        set { minMergeSize = value; }
-	    }
-	}
+        /// <summary>Gets or sets the minimum size for the lowest level segments.
+        /// Any segments below this size are considered to be on
+        /// the same level (even if they vary drastically in size)
+        /// and will be merged whenever there are mergeFactor of
+        /// them.  This effectively truncates the "long tail" of
+        /// small segments that would otherwise be created into a
+        /// single level.  If you set this too large, it could
+        /// greatly increase the merging cost during indexing (if
+        /// you flush many small segments). 
+        /// </summary>
+        public virtual int MinMergeDocs
+        {
+            get { return (int) minMergeSize; }
+            set { minMergeSize = value; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/LogMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/LogMergePolicy.cs b/src/core/Index/LogMergePolicy.cs
index c087835..5c65c92 100644
--- a/src/core/Index/LogMergePolicy.cs
+++ b/src/core/Index/LogMergePolicy.cs
@@ -20,508 +20,508 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary><p/>This class implements a <see cref="MergePolicy" /> that tries
-	/// to merge segments into levels of exponentially
-	/// increasing size, where each level has fewer segments than
-	/// the value of the merge factor. Whenever extra segments
-	/// (beyond the merge factor upper bound) are encountered,
-	/// all segments within the level are merged. You can get or
-	/// set the merge factor using <see cref="MergeFactor" /> and
-	/// <see cref="MergeFactor" /> respectively.<p/>
-	/// 
-	/// <p/>This class is abstract and requires a subclass to
-	/// define the <see cref="Size" /> method which specifies how a
-	/// segment's size is determined.  <see cref="LogDocMergePolicy" />
-	/// is one subclass that measures size by document count in
-	/// the segment.  <see cref="LogByteSizeMergePolicy" /> is another
-	/// subclass that measures size as the total byte size of the
-	/// file(s) for the segment.<p/>
-	/// </summary>
-	
-	public abstract class LogMergePolicy : MergePolicy
-	{
-		
-		/// <summary>Defines the allowed range of log(size) for each
-		/// level.  A level is computed by taking the max segment
-		/// log size, minus LEVEL_LOG_SPAN, and finding all
-		/// segments falling within that range. 
-		/// </summary>
-		public const double LEVEL_LOG_SPAN = 0.75;
-		
-		/// <summary>Default merge factor, which is how many segments are
-		/// merged at a time 
-		/// </summary>
-		public const int DEFAULT_MERGE_FACTOR = 10;
-		
-		/// <summary>Default maximum segment size.  A segment of this size</summary>
-		/// <seealso cref="MaxMergeDocs">
-		/// </seealso>
-		public static readonly int DEFAULT_MAX_MERGE_DOCS = System.Int32.MaxValue;
+    
+    /// <summary><p/>This class implements a <see cref="MergePolicy" /> that tries
+    /// to merge segments into levels of exponentially
+    /// increasing size, where each level has fewer segments than
+    /// the value of the merge factor. Whenever extra segments
+    /// (beyond the merge factor upper bound) are encountered,
+    /// all segments within the level are merged. You can get or
+    /// set the merge factor using <see cref="MergeFactor" /> and
+    /// <see cref="MergeFactor" /> respectively.<p/>
+    /// 
+    /// <p/>This class is abstract and requires a subclass to
+    /// define the <see cref="Size" /> method which specifies how a
+    /// segment's size is determined.  <see cref="LogDocMergePolicy" />
+    /// is one subclass that measures size by document count in
+    /// the segment.  <see cref="LogByteSizeMergePolicy" /> is another
+    /// subclass that measures size as the total byte size of the
+    /// file(s) for the segment.<p/>
+    /// </summary>
+    
+    public abstract class LogMergePolicy : MergePolicy
+    {
+        
+        /// <summary>Defines the allowed range of log(size) for each
+        /// level.  A level is computed by taking the max segment
+        /// log size, minus LEVEL_LOG_SPAN, and finding all
+        /// segments falling within that range. 
+        /// </summary>
+        public const double LEVEL_LOG_SPAN = 0.75;
+        
+        /// <summary>Default merge factor, which is how many segments are
+        /// merged at a time 
+        /// </summary>
+        public const int DEFAULT_MERGE_FACTOR = 10;
+        
+        /// <summary>Default maximum segment size.  A segment of this size</summary>
+        /// <seealso cref="MaxMergeDocs">
+        /// </seealso>
+        public static readonly int DEFAULT_MAX_MERGE_DOCS = System.Int32.MaxValue;
 
         /// <summary> Default noCFSRatio.  If a merge's size is >= 10% of
         ///  the index, then we disable compound file for it.
         ///  See <see cref="NoCFSRatio"/>
         ///  </summary>
         public static double DEFAULT_NO_CFS_RATIO = 0.1;
-		
-		private int mergeFactor = DEFAULT_MERGE_FACTOR;
-		
-		internal long minMergeSize;
-		internal long maxMergeSize;
-		internal int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
+        
+        private int mergeFactor = DEFAULT_MERGE_FACTOR;
+        
+        internal long minMergeSize;
+        internal long maxMergeSize;
+        internal int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
 
         protected double internalNoCFSRatio = DEFAULT_NO_CFS_RATIO;
-		
-		/* TODO 3.0: change this default to true */
-		protected internal bool internalCalibrateSizeByDeletes = true;
-		
-		private bool useCompoundFile = true;
-		private bool useCompoundDocStore = true;
+        
+        /* TODO 3.0: change this default to true */
+        protected internal bool internalCalibrateSizeByDeletes = true;
+        
+        private bool useCompoundFile = true;
+        private bool useCompoundDocStore = true;
 
-	    protected LogMergePolicy(IndexWriter writer):base(writer)
-		{
-		}
-		
-		protected internal virtual bool Verbose()
-		{
-			return writer != null && writer.Verbose;
-		}
+        protected LogMergePolicy(IndexWriter writer):base(writer)
+        {
+        }
+        
+        protected internal virtual bool Verbose()
+        {
+            return writer != null && writer.Verbose;
+        }
 
-	    public double NoCFSRatio
-	    {
-	        get { return internalNoCFSRatio; }
-	        set
-	        {
-	            if (value < 0.0 || value > 1.0)
-	            {
-	                throw new ArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + value);
-	            }
-	            this.internalNoCFSRatio = value;
-	        }
-	    }
+        public double NoCFSRatio
+        {
+            get { return internalNoCFSRatio; }
+            set
+            {
+                if (value < 0.0 || value > 1.0)
+                {
+                    throw new ArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + value);
+                }
+                this.internalNoCFSRatio = value;
+            }
+        }
 
-	    /* If a merged segment will be more than this percentage
+        /* If a merged segment will be more than this percentage
          *  of the total size of the index, leave the segment as
          *  non-compound file even if compound file is enabled.
          *  Set to 1.0 to always use CFS regardless of merge
          *  size. */
-	    private void  Message(System.String message)
-		{
-			if (Verbose())
-				writer.Message("LMP: " + message);
-		}
+        private void  Message(System.String message)
+        {
+            if (Verbose())
+                writer.Message("LMP: " + message);
+        }
 
 
-	    /// <summary>Gets or sets how often segment indices are merged by
-	    /// addDocument().  With smaller values, less RAM is used
-	    /// while indexing, and searches on unoptimized indices are
-	    /// faster, but indexing speed is slower.  With larger
-	    /// values, more RAM is used during indexing, and while
-	    /// searches on unoptimized indices are slower, indexing is
-	    /// faster.  Thus larger values (&gt; 10) are best for batch
-	    /// index creation, and smaller values (&lt; 10) for indices
-	    /// that are interactively maintained. 
-	    /// </summary>
-	    public virtual int MergeFactor
-	    {
-	        get { return mergeFactor; }
-	        set
-	        {
-	            if (value < 2)
-	                throw new System.ArgumentException("mergeFactor cannot be less than 2");
-	            this.mergeFactor = value;
-	        }
-	    }
+        /// <summary>Gets or sets how often segment indices are merged by
+        /// addDocument().  With smaller values, less RAM is used
+        /// while indexing, and searches on unoptimized indices are
+        /// faster, but indexing speed is slower.  With larger
+        /// values, more RAM is used during indexing, and while
+        /// searches on unoptimized indices are slower, indexing is
+        /// faster.  Thus larger values (&gt; 10) are best for batch
+        /// index creation, and smaller values (&lt; 10) for indices
+        /// that are interactively maintained. 
+        /// </summary>
+        public virtual int MergeFactor
+        {
+            get { return mergeFactor; }
+            set
+            {
+                if (value < 2)
+                    throw new System.ArgumentException("mergeFactor cannot be less than 2");
+                this.mergeFactor = value;
+            }
+        }
 
-		public override bool UseCompoundFile(SegmentInfos infos, SegmentInfo info)
-		{
-			return useCompoundFile;
-		}
-		
-		/// <summary>Gets or sets whether compound file format should be used for
-		/// newly flushed and newly merged segments. 
-		/// </summary>
-		public virtual void  SetUseCompoundFile(bool useCompoundFile)
-		{
-			this.useCompoundFile = useCompoundFile;
-		}
+        public override bool UseCompoundFile(SegmentInfos infos, SegmentInfo info)
+        {
+            return useCompoundFile;
+        }
+        
+        /// <summary>Gets or sets whether compound file format should be used for
+        /// newly flushed and newly merged segments. 
+        /// </summary>
+        public virtual void  SetUseCompoundFile(bool useCompoundFile)
+        {
+            this.useCompoundFile = useCompoundFile;
+        }
 
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public virtual bool GetUseCompoundFile()
-		{
-			return useCompoundFile;
-		}
-		
-		// Javadoc inherited
-		public override bool UseCompoundDocStore(SegmentInfos infos)
-		{
-			return useCompoundDocStore;
-		}
-		
-		/// <summary>Sets whether compound file format should be used for
-		/// newly flushed and newly merged doc store
-		/// segment files (term vectors and stored fields). 
-		/// </summary>
-		public virtual void  SetUseCompoundDocStore(bool useCompoundDocStore)
-		{
-			this.useCompoundDocStore = useCompoundDocStore;
-		}
-		
-		/// <summary>Returns true if newly flushed and newly merge doc
-		/// store segment files (term vectors and stored fields)
-		/// </summary>
+        {
+            return useCompoundFile;
+        }
+        
+        // Javadoc inherited
+        public override bool UseCompoundDocStore(SegmentInfos infos)
+        {
+            return useCompoundDocStore;
+        }
+        
+        /// <summary>Sets whether compound file format should be used for
+        /// newly flushed and newly merged doc store
+        /// segment files (term vectors and stored fields). 
+        /// </summary>
+        public virtual void  SetUseCompoundDocStore(bool useCompoundDocStore)
+        {
+            this.useCompoundDocStore = useCompoundDocStore;
+        }
+        
+        /// <summary>Returns true if newly flushed and newly merge doc
+        /// store segment files (term vectors and stored fields)
+        /// </summary>
         /// <seealso cref="SetUseCompoundDocStore ">
-		/// </seealso>
+        /// </seealso>
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public virtual bool GetUseCompoundDocStore()
-		{
-			return useCompoundDocStore;
-		}
+        {
+            return useCompoundDocStore;
+        }
 
-	    /// <summary>Gets or sets whether the segment size should be calibrated by
-	    /// the number of deletes when choosing segments for merge. 
-	    /// </summary>
-	    public virtual bool CalibrateSizeByDeletes
-	    {
-	        set { this.internalCalibrateSizeByDeletes = value; }
-	        get { return internalCalibrateSizeByDeletes; }
-	    }
+        /// <summary>Gets or sets whether the segment size should be calibrated by
+        /// the number of deletes when choosing segments for merge. 
+        /// </summary>
+        public virtual bool CalibrateSizeByDeletes
+        {
+            set { this.internalCalibrateSizeByDeletes = value; }
+            get { return internalCalibrateSizeByDeletes; }
+        }
 
-	    abstract protected internal long Size(SegmentInfo info);
-		
-		protected internal virtual long SizeDocs(SegmentInfo info)
-		{
-			if (internalCalibrateSizeByDeletes)
-			{
-				int delCount = writer.NumDeletedDocs(info);
-				return (info.docCount - (long) delCount);
-			}
-			else
-			{
-				return info.docCount;
-			}
-		}
-		
-		protected internal virtual long SizeBytes(SegmentInfo info)
-		{
-			long byteSize = info.SizeInBytes();
-			if (internalCalibrateSizeByDeletes)
-			{
-				int delCount = writer.NumDeletedDocs(info);
-				float delRatio = (info.docCount <= 0?0.0f:((float) delCount / (float) info.docCount));
-				return (info.docCount <= 0?byteSize:(long) (byteSize * (1.0f - delRatio)));
-			}
-			else
-			{
-				return byteSize;
-			}
-		}
-		
-		private bool IsOptimized(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
-		{
-			int numSegments = infos.Count;
-			int numToOptimize = 0;
-			SegmentInfo optimizeInfo = null;
-			for (int i = 0; i < numSegments && numToOptimize <= maxNumSegments; i++)
-			{
-				SegmentInfo info = infos.Info(i);
-				if (segmentsToOptimize.Contains(info))
-				{
-					numToOptimize++;
-					optimizeInfo = info;
-				}
-			}
-			
-			return numToOptimize <= maxNumSegments && (numToOptimize != 1 || IsOptimized(optimizeInfo));
-		}
-		
-		/// <summary>Returns true if this single info is optimized (has no
-		/// pending norms or deletes, is in the same dir as the
-		/// writer, and matches the current compound file setting 
-		/// </summary>
-		private bool IsOptimized(SegmentInfo info)
-		{
-			bool hasDeletions = writer.NumDeletedDocs(info) > 0;
-			return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.Directory &&
+        abstract protected internal long Size(SegmentInfo info);
+        
+        protected internal virtual long SizeDocs(SegmentInfo info)
+        {
+            if (internalCalibrateSizeByDeletes)
+            {
+                int delCount = writer.NumDeletedDocs(info);
+                return (info.docCount - (long) delCount);
+            }
+            else
+            {
+                return info.docCount;
+            }
+        }
+        
+        protected internal virtual long SizeBytes(SegmentInfo info)
+        {
+            long byteSize = info.SizeInBytes();
+            if (internalCalibrateSizeByDeletes)
+            {
+                int delCount = writer.NumDeletedDocs(info);
+                float delRatio = (info.docCount <= 0?0.0f:((float) delCount / (float) info.docCount));
+                return (info.docCount <= 0?byteSize:(long) (byteSize * (1.0f - delRatio)));
+            }
+            else
+            {
+                return byteSize;
+            }
+        }
+        
+        private bool IsOptimized(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
+        {
+            int numSegments = infos.Count;
+            int numToOptimize = 0;
+            SegmentInfo optimizeInfo = null;
+            for (int i = 0; i < numSegments && numToOptimize <= maxNumSegments; i++)
+            {
+                SegmentInfo info = infos.Info(i);
+                if (segmentsToOptimize.Contains(info))
+                {
+                    numToOptimize++;
+                    optimizeInfo = info;
+                }
+            }
+            
+            return numToOptimize <= maxNumSegments && (numToOptimize != 1 || IsOptimized(optimizeInfo));
+        }
+        
+        /// <summary>Returns true if this single info is optimized (has no
+        /// pending norms or deletes, is in the same dir as the
+        /// writer, and matches the current compound file setting 
+        /// </summary>
+        private bool IsOptimized(SegmentInfo info)
+        {
+            bool hasDeletions = writer.NumDeletedDocs(info) > 0;
+            return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.Directory &&
                 (info.GetUseCompoundFile() == useCompoundFile || internalNoCFSRatio < 1.0);
-		}
-		
-		/// <summary>Returns the merges necessary to optimize the index.
-		/// This merge policy defines "optimized" to mean only one
-		/// segment in the index, where that segment has no
-		/// deletions pending nor separate norms, and it is in
-		/// compound file format if the current useCompoundFile
-		/// setting is true.  This method returns multiple merges
-		/// (mergeFactor at a time) so the <see cref="MergeScheduler" />
-		/// in use may make use of concurrency. 
-		/// </summary>
-		public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
-		{
-			MergeSpecification spec;
-			
-			System.Diagnostics.Debug.Assert(maxNumSegments > 0);
-			
-			if (!IsOptimized(infos, maxNumSegments, segmentsToOptimize))
-			{
-				
-				// Find the newest (rightmost) segment that needs to
-				// be optimized (other segments may have been flushed
-				// since optimize started):
-				int last = infos.Count;
-				while (last > 0)
-				{
-					SegmentInfo info = infos.Info(--last);
-					if (segmentsToOptimize.Contains(info))
-					{
-						last++;
-						break;
-					}
-				}
-				
-				if (last > 0)
-				{
-					
-					spec = new MergeSpecification();
-					
-					// First, enroll all "full" merges (size
-					// mergeFactor) to potentially be run concurrently:
-					while (last - maxNumSegments + 1 >= mergeFactor)
-					{
+        }
+        
+        /// <summary>Returns the merges necessary to optimize the index.
+        /// This merge policy defines "optimized" to mean only one
+        /// segment in the index, where that segment has no
+        /// deletions pending nor separate norms, and it is in
+        /// compound file format if the current useCompoundFile
+        /// setting is true.  This method returns multiple merges
+        /// (mergeFactor at a time) so the <see cref="MergeScheduler" />
+        /// in use may make use of concurrency. 
+        /// </summary>
+        public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, ISet<SegmentInfo> segmentsToOptimize)
+        {
+            MergeSpecification spec;
+            
+            System.Diagnostics.Debug.Assert(maxNumSegments > 0);
+            
+            if (!IsOptimized(infos, maxNumSegments, segmentsToOptimize))
+            {
+                
+                // Find the newest (rightmost) segment that needs to
+                // be optimized (other segments may have been flushed
+                // since optimize started):
+                int last = infos.Count;
+                while (last > 0)
+                {
+                    SegmentInfo info = infos.Info(--last);
+                    if (segmentsToOptimize.Contains(info))
+                    {
+                        last++;
+                        break;
+                    }
+                }
+                
+                if (last > 0)
+                {
+                    
+                    spec = new MergeSpecification();
+                    
+                    // First, enroll all "full" merges (size
+                    // mergeFactor) to potentially be run concurrently:
+                    while (last - maxNumSegments + 1 >= mergeFactor)
+                    {
                         spec.Add(MakeOneMerge(infos, infos.Range(last - mergeFactor, last)));
-						last -= mergeFactor;
-					}
-					
-					// Only if there are no full merges pending do we
-					// add a final partial (< mergeFactor segments) merge:
-					if (0 == spec.merges.Count)
-					{
-						if (maxNumSegments == 1)
-						{
-							
-							// Since we must optimize down to 1 segment, the
-							// choice is simple:
-							if (last > 1 || !IsOptimized(infos.Info(0)))
+                        last -= mergeFactor;
+                    }
+                    
+                    // Only if there are no full merges pending do we
+                    // add a final partial (< mergeFactor segments) merge:
+                    if (0 == spec.merges.Count)
+                    {
+                        if (maxNumSegments == 1)
+                        {
+                            
+                            // Since we must optimize down to 1 segment, the
+                            // choice is simple:
+                            if (last > 1 || !IsOptimized(infos.Info(0)))
                                 spec.Add(MakeOneMerge(infos, infos.Range(0, last)));
-						}
-						else if (last > maxNumSegments)
-						{
-							
-							// Take care to pick a partial merge that is
-							// least cost, but does not make the index too
-							// lopsided.  If we always just picked the
-							// partial tail then we could produce a highly
-							// lopsided index over time:
-							
-							// We must merge this many segments to leave
-							// maxNumSegments in the index (from when
-							// optimize was first kicked off):
-							int finalMergeSize = last - maxNumSegments + 1;
-							
-							// Consider all possible starting points:
-							long bestSize = 0;
-							int bestStart = 0;
-							
-							for (int i = 0; i < last - finalMergeSize + 1; i++)
-							{
-								long sumSize = 0;
-								for (int j = 0; j < finalMergeSize; j++)
-									sumSize += Size(infos.Info(j + i));
-								if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize))
-								{
-									bestStart = i;
-									bestSize = sumSize;
-								}
-							}
+                        }
+                        else if (last > maxNumSegments)
+                        {
+                            
+                            // Take care to pick a partial merge that is
+                            // least cost, but does not make the index too
+                            // lopsided.  If we always just picked the
+                            // partial tail then we could produce a highly
+                            // lopsided index over time:
+                            
+                            // We must merge this many segments to leave
+                            // maxNumSegments in the index (from when
+                            // optimize was first kicked off):
+                            int finalMergeSize = last - maxNumSegments + 1;
+                            
+                            // Consider all possible starting points:
+                            long bestSize = 0;
+                            int bestStart = 0;
+                            
+                            for (int i = 0; i < last - finalMergeSize + 1; i++)
+                            {
+                                long sumSize = 0;
+                                for (int j = 0; j < finalMergeSize; j++)
+                                    sumSize += Size(infos.Info(j + i));
+                                if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize))
+                                {
+                                    bestStart = i;
+                                    bestSize = sumSize;
+                                }
+                            }
 
                             spec.Add(MakeOneMerge(infos, infos.Range(bestStart, bestStart + finalMergeSize)));
-						}
-					}
-				}
-				else
-					spec = null;
-			}
-			else
-				spec = null;
-			
-			return spec;
-		}
-		
-		/// <summary> Finds merges necessary to expunge all deletes from the
-		/// index.  We simply merge adjacent segments that have
-		/// deletes, up to mergeFactor at a time.
-		/// </summary>
-		public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos)
-		{
-			int numSegments = segmentInfos.Count;
-			
-			if (Verbose())
-				Message("findMergesToExpungeDeletes: " + numSegments + " segments");
-			
-			MergeSpecification spec = new MergeSpecification();
-			int firstSegmentWithDeletions = - 1;
-			for (int i = 0; i < numSegments; i++)
-			{
-				SegmentInfo info = segmentInfos.Info(i);
-				int delCount = writer.NumDeletedDocs(info);
-				if (delCount > 0)
-				{
-					if (Verbose())
-						Message("  segment " + info.name + " has deletions");
-					if (firstSegmentWithDeletions == - 1)
-						firstSegmentWithDeletions = i;
-					else if (i - firstSegmentWithDeletions == mergeFactor)
-					{
-						// We've seen mergeFactor segments in a row with
-						// deletions, so force a merge now:
-						if (Verbose())
-							Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                        }
+                    }
+                }
+                else
+                    spec = null;
+            }
+            else
+                spec = null;
+            
+            return spec;
+        }
+        
+        /// <summary> Finds merges necessary to expunge all deletes from the
+        /// index.  We simply merge adjacent segments that have
+        /// deletes, up to mergeFactor at a time.
+        /// </summary>
+        public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos)
+        {
+            int numSegments = segmentInfos.Count;
+            
+            if (Verbose())
+                Message("findMergesToExpungeDeletes: " + numSegments + " segments");
+            
+            MergeSpecification spec = new MergeSpecification();
+            int firstSegmentWithDeletions = - 1;
+            for (int i = 0; i < numSegments; i++)
+            {
+                SegmentInfo info = segmentInfos.Info(i);
+                int delCount = writer.NumDeletedDocs(info);
+                if (delCount > 0)
+                {
+                    if (Verbose())
+                        Message("  segment " + info.name + " has deletions");
+                    if (firstSegmentWithDeletions == - 1)
+                        firstSegmentWithDeletions = i;
+                    else if (i - firstSegmentWithDeletions == mergeFactor)
+                    {
+                        // We've seen mergeFactor segments in a row with
+                        // deletions, so force a merge now:
+                        if (Verbose())
+                            Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
                         spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i)));
-						firstSegmentWithDeletions = i;
-					}
-				}
-				else if (firstSegmentWithDeletions != - 1)
-				{
-					// End of a sequence of segments with deletions, so,
-					// merge those past segments even if it's fewer than
-					// mergeFactor segments
-					if (Verbose())
-						Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                        firstSegmentWithDeletions = i;
+                    }
+                }
+                else if (firstSegmentWithDeletions != - 1)
+                {
+                    // End of a sequence of segments with deletions, so,
+                    // merge those past segments even if it's fewer than
+                    // mergeFactor segments
+                    if (Verbose())
+                        Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
                     spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i)));
-					firstSegmentWithDeletions = - 1;
-				}
-			}
-			
-			if (firstSegmentWithDeletions != - 1)
-			{
-				if (Verbose())
-					Message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive");
+                    firstSegmentWithDeletions = - 1;
+                }
+            }
+            
+            if (firstSegmentWithDeletions != - 1)
+            {
+                if (Verbose())
+                    Message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive");
                 spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, numSegments)));
-			}
-			
-			return spec;
-		}
-		
-		/// <summary>Checks if any merges are now necessary and returns a
-		/// <see cref="MergePolicy.MergeSpecification" /> if so.  A merge
-		/// is necessary when there are more than <see cref="MergeFactor" />
-		/// segments at a given level.  When
-		/// multiple levels have too many segments, this method
-		/// will return multiple merges, allowing the <see cref="MergeScheduler" />
-		/// to use concurrency. 
-		/// </summary>
-		public override MergeSpecification FindMerges(SegmentInfos infos)
-		{
-			
-			int numSegments = infos.Count;
-			if (Verbose())
-				Message("findMerges: " + numSegments + " segments");
-			
-			// Compute levels, which is just log (base mergeFactor)
-			// of the size of each segment
-			float[] levels = new float[numSegments];
-			float norm = (float) System.Math.Log(mergeFactor);
-			
-			for (int i = 0; i < numSegments; i++)
-			{
-				SegmentInfo info = infos.Info(i);
-				long size = Size(info);
-				
-				// Floor tiny segments
-				if (size < 1)
-					size = 1;
-				levels[i] = (float) System.Math.Log(size) / norm;
-			}
-			
-			float levelFloor;
-			if (minMergeSize <= 0)
-				levelFloor = (float) 0.0;
-			else
-			{
-				levelFloor = (float) (System.Math.Log(minMergeSize) / norm);
-			}
-			
-			// Now, we quantize the log values into levels.  The
-			// first level is any segment whose log size is within
-			// LEVEL_LOG_SPAN of the max size, or, who has such as
-			// segment "to the right".  Then, we find the max of all
-			// other segments and use that to define the next level
-			// segment, etc.
-			
-			MergeSpecification spec = null;
-			
-			int start = 0;
-			while (start < numSegments)
-			{
-				
-				// Find max level of all segments not already
-				// quantized.
-				float maxLevel = levels[start];
-				for (int i = 1 + start; i < numSegments; i++)
-				{
-					float level = levels[i];
-					if (level > maxLevel)
-						maxLevel = level;
-				}
-				
-				// Now search backwards for the rightmost segment that
-				// falls into this level:
-				float levelBottom;
-				if (maxLevel < levelFloor)
-				// All remaining segments fall into the min level
-					levelBottom = - 1.0F;
-				else
-				{
-					levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
-					
-					// Force a boundary at the level floor
-					if (levelBottom < levelFloor && maxLevel >= levelFloor)
-						levelBottom = levelFloor;
-				}
-				
-				int upto = numSegments - 1;
-				while (upto >= start)
-				{
-					if (levels[upto] >= levelBottom)
-					{
-						break;
-					}
-					upto--;
-				}
-				if (Verbose())
-					Message("  level " + levelBottom + " to " + maxLevel + ": " + (1 + upto - start) + " segments");
-				
-				// Finally, record all merges that are viable at this level:
-				int end = start + mergeFactor;
-				while (end <= 1 + upto)
-				{
-					bool anyTooLarge = false;
-					for (int i = start; i < end; i++)
-					{
-						SegmentInfo info = infos.Info(i);
-						anyTooLarge |= (Size(info) >= maxMergeSize || SizeDocs(info) >= maxMergeDocs);
-					}
-					
-					if (!anyTooLarge)
-					{
-						if (spec == null)
-							spec = new MergeSpecification();
-						if (Verbose())
-							Message("    " + start + " to " + end + ": add this merge");
+            }
+            
+            return spec;
+        }
+        
+        /// <summary>Checks if any merges are now necessary and returns a
+        /// <see cref="MergePolicy.MergeSpecification" /> if so.  A merge
+        /// is necessary when there are more than <see cref="MergeFactor" />
+        /// segments at a given level.  When
+        /// multiple levels have too many segments, this method
+        /// will return multiple merges, allowing the <see cref="MergeScheduler" />
+        /// to use concurrency. 
+        /// </summary>
+        public override MergeSpecification FindMerges(SegmentInfos infos)
+        {
+            
+            int numSegments = infos.Count;
+            if (Verbose())
+                Message("findMerges: " + numSegments + " segments");
+            
+            // Compute levels, which is just log (base mergeFactor)
+            // of the size of each segment
+            float[] levels = new float[numSegments];
+            float norm = (float) System.Math.Log(mergeFactor);
+            
+            for (int i = 0; i < numSegments; i++)
+            {
+                SegmentInfo info = infos.Info(i);
+                long size = Size(info);
+                
+                // Floor tiny segments
+                if (size < 1)
+                    size = 1;
+                levels[i] = (float) System.Math.Log(size) / norm;
+            }
+            
+            float levelFloor;
+            if (minMergeSize <= 0)
+                levelFloor = (float) 0.0;
+            else
+            {
+                levelFloor = (float) (System.Math.Log(minMergeSize) / norm);
+            }
+            
+            // Now, we quantize the log values into levels.  The
+            // first level is any segment whose log size is within
+            // LEVEL_LOG_SPAN of the max size, or, who has such as
+            // segment "to the right".  Then, we find the max of all
+            // other segments and use that to define the next level
+            // segment, etc.
+            
+            MergeSpecification spec = null;
+            
+            int start = 0;
+            while (start < numSegments)
+            {
+                
+                // Find max level of all segments not already
+                // quantized.
+                float maxLevel = levels[start];
+                for (int i = 1 + start; i < numSegments; i++)
+                {
+                    float level = levels[i];
+                    if (level > maxLevel)
+                        maxLevel = level;
+                }
+                
+                // Now search backwards for the rightmost segment that
+                // falls into this level:
+                float levelBottom;
+                if (maxLevel < levelFloor)
+                // All remaining segments fall into the min level
+                    levelBottom = - 1.0F;
+                else
+                {
+                    levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
+                    
+                    // Force a boundary at the level floor
+                    if (levelBottom < levelFloor && maxLevel >= levelFloor)
+                        levelBottom = levelFloor;
+                }
+                
+                int upto = numSegments - 1;
+                while (upto >= start)
+                {
+                    if (levels[upto] >= levelBottom)
+                    {
+                        break;
+                    }
+                    upto--;
+                }
+                if (Verbose())
+                    Message("  level " + levelBottom + " to " + maxLevel + ": " + (1 + upto - start) + " segments");
+                
+                // Finally, record all merges that are viable at this level:
+                int end = start + mergeFactor;
+                while (end <= 1 + upto)
+                {
+                    bool anyTooLarge = false;
+                    for (int i = start; i < end; i++)
+                    {
+                        SegmentInfo info = infos.Info(i);
+                        anyTooLarge |= (Size(info) >= maxMergeSize || SizeDocs(info) >= maxMergeDocs);
+                    }
+                    
+                    if (!anyTooLarge)
+                    {
+                        if (spec == null)
+                            spec = new MergeSpecification();
+                        if (Verbose())
+                            Message("    " + start + " to " + end + ": add this merge");
                         spec.Add(MakeOneMerge(infos, infos.Range(start, end)));
-					}
-					else if (Verbose())
-						Message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
-					
-					start = end;
-					end = start + mergeFactor;
-				}
-				
-				start = 1 + upto;
-			}
-			
-			return spec;
-		}
+                    }
+                    else if (Verbose())
+                        Message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
+                    
+                    start = end;
+                    end = start + mergeFactor;
+                }
+                
+                start = 1 + upto;
+            }
+            
+            return spec;
+        }
         
         protected OneMerge MakeOneMerge(SegmentInfos infos, SegmentInfos infosToMerge)
         {
@@ -553,28 +553,28 @@ namespace Lucene.Net.Index
             return new OneMerge(infosToMerge, doCFS);
         }
 
-	    /// <summary>
-	    /// Gets or sets the largest segment (measured by document
-	    /// count) that may be merged with other segments.
-	    /// <p/>Determines the largest segment (measured by
-	    /// document count) that may be merged with other segments.
-	    /// Small values (e.g., less than 10,000) are best for
-	    /// interactive indexing, as this limits the length of
-	    /// pauses while indexing to a few seconds.  Larger values
-	    /// are best for batched indexing and speedier
-	    /// searches.<p/>
-	    /// 
-	    /// <p/>The default value is <see cref="int.MaxValue" />.<p/>
-	    /// 
-	    /// <p/>The default merge policy (<see cref="LogByteSizeMergePolicy" />)
-	    /// also allows you to set this
-	    /// limit by net size (in MB) of the segment, using 
-	    /// <see cref="LogByteSizeMergePolicy.MaxMergeMB" />.<p/>
-	    /// </summary>
-	    public virtual int MaxMergeDocs
-	    {
-	        set { this.maxMergeDocs = value; }
-	        get { return maxMergeDocs; }
-	    }
-	}
+        /// <summary>
+        /// Gets or sets the largest segment (measured by document
+        /// count) that may be merged with other segments.
+        /// <p/>Determines the largest segment (measured by
+        /// document count) that may be merged with other segments.
+        /// Small values (e.g., less than 10,000) are best for
+        /// interactive indexing, as this limits the length of
+        /// pauses while indexing to a few seconds.  Larger values
+        /// are best for batched indexing and speedier
+        /// searches.<p/>
+        /// 
+        /// <p/>The default value is <see cref="int.MaxValue" />.<p/>
+        /// 
+        /// <p/>The default merge policy (<see cref="LogByteSizeMergePolicy" />)
+        /// also allows you to set this
+        /// limit by net size (in MB) of the segment, using 
+        /// <see cref="LogByteSizeMergePolicy.MaxMergeMB" />.<p/>
+        /// </summary>
+        public virtual int MaxMergeDocs
+        {
+            set { this.maxMergeDocs = value; }
+            get { return maxMergeDocs; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/MergeDocIDRemapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MergeDocIDRemapper.cs b/src/core/Index/MergeDocIDRemapper.cs
index 2771b53..5c06721 100644
--- a/src/core/Index/MergeDocIDRemapper.cs
+++ b/src/core/Index/MergeDocIDRemapper.cs
@@ -20,108 +20,108 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Remaps docIDs after a merge has completed, where the
-	/// merged segments had at least one deletion.  This is used
-	/// to renumber the buffered deletes in IndexWriter when a
-	/// merge of segments with deletions commits. 
-	/// </summary>
-	
-	sealed class MergeDocIDRemapper
-	{
-		internal int[] starts; // used for binary search of mapped docID
-		internal int[] newStarts; // starts, minus the deletes
-		internal int[][] docMaps; // maps docIDs in the merged set
-		internal int minDocID; // minimum docID that needs renumbering
-		internal int maxDocID; // 1+ the max docID that needs renumbering
-		internal int docShift; // total # deleted docs that were compacted by this merge
-		
-		public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
-		{
-			this.docMaps = docMaps;
-			SegmentInfo firstSegment = merge.segments.Info(0);
-			int i = 0;
-			while (true)
-			{
-				SegmentInfo info = infos.Info(i);
-				if (info.Equals(firstSegment))
-					break;
-				minDocID += info.docCount;
-				i++;
-			}
-			
-			int numDocs = 0;
-			for (int j = 0; j < docMaps.Length; i++, j++)
-			{
-				numDocs += infos.Info(i).docCount;
-				System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
-			}
-			maxDocID = minDocID + numDocs;
-			
-			starts = new int[docMaps.Length];
-			newStarts = new int[docMaps.Length];
-			
-			starts[0] = minDocID;
-			newStarts[0] = minDocID;
-			for (i = 1; i < docMaps.Length; i++)
-			{
-				int lastDocCount = merge.segments.Info(i - 1).docCount;
-				starts[i] = starts[i - 1] + lastDocCount;
-				newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
-			}
-			docShift = numDocs - mergedDocCount;
-			
-			// There are rare cases when docShift is 0.  It happens
-			// if you try to delete a docID that's out of bounds,
-			// because the SegmentReader still allocates deletedDocs
-			// and pretends it has deletions ... so we can't make
-			// this assert here
-			// assert docShift > 0;
-			
-			// Make sure it all adds up:
-			System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
-		}
-		
-		public int Remap(int oldDocID)
-		{
-			if (oldDocID < minDocID)
-			// Unaffected by merge
-				return oldDocID;
-			else if (oldDocID >= maxDocID)
-			// This doc was "after" the merge, so simple shift
-				return oldDocID - docShift;
-			else
-			{
-				// Binary search to locate this document & find its new docID
-				int lo = 0; // search starts array
-				int hi = docMaps.Length - 1; // for first element less
-				
-				while (hi >= lo)
-				{
-					int mid = Number.URShift((lo + hi), 1);
-					int midValue = starts[mid];
-					if (oldDocID < midValue)
-						hi = mid - 1;
-					else if (oldDocID > midValue)
-						lo = mid + 1;
-					else
-					{
-						// found a match
-						while (mid + 1 < docMaps.Length && starts[mid + 1] == midValue)
-						{
-							mid++; // scan to last match
-						}
-						if (docMaps[mid] != null)
-							return newStarts[mid] + docMaps[mid][oldDocID - starts[mid]];
-						else
-							return newStarts[mid] + oldDocID - starts[mid];
-					}
-				}
-				if (docMaps[hi] != null)
-					return newStarts[hi] + docMaps[hi][oldDocID - starts[hi]];
-				else
-					return newStarts[hi] + oldDocID - starts[hi];
-			}
-		}
-	}
+    
+    /// <summary>Remaps docIDs after a merge has completed, where the
+    /// merged segments had at least one deletion.  This is used
+    /// to renumber the buffered deletes in IndexWriter when a
+    /// merge of segments with deletions commits. 
+    /// </summary>
+    
+    sealed class MergeDocIDRemapper
+    {
+        internal int[] starts; // used for binary search of mapped docID
+        internal int[] newStarts; // starts, minus the deletes
+        internal int[][] docMaps; // maps docIDs in the merged set
+        internal int minDocID; // minimum docID that needs renumbering
+        internal int maxDocID; // 1+ the max docID that needs renumbering
+        internal int docShift; // total # deleted docs that were compacted by this merge
+        
+        public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
+        {
+            this.docMaps = docMaps;
+            SegmentInfo firstSegment = merge.segments.Info(0);
+            int i = 0;
+            while (true)
+            {
+                SegmentInfo info = infos.Info(i);
+                if (info.Equals(firstSegment))
+                    break;
+                minDocID += info.docCount;
+                i++;
+            }
+            
+            int numDocs = 0;
+            for (int j = 0; j < docMaps.Length; i++, j++)
+            {
+                numDocs += infos.Info(i).docCount;
+                System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
+            }
+            maxDocID = minDocID + numDocs;
+            
+            starts = new int[docMaps.Length];
+            newStarts = new int[docMaps.Length];
+            
+            starts[0] = minDocID;
+            newStarts[0] = minDocID;
+            for (i = 1; i < docMaps.Length; i++)
+            {
+                int lastDocCount = merge.segments.Info(i - 1).docCount;
+                starts[i] = starts[i - 1] + lastDocCount;
+                newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
+            }
+            docShift = numDocs - mergedDocCount;
+            
+            // There are rare cases when docShift is 0.  It happens
+            // if you try to delete a docID that's out of bounds,
+            // because the SegmentReader still allocates deletedDocs
+            // and pretends it has deletions ... so we can't make
+            // this assert here
+            // assert docShift > 0;
+            
+            // Make sure it all adds up:
+            System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
+        }
+        
+        public int Remap(int oldDocID)
+        {
+            if (oldDocID < minDocID)
+            // Unaffected by merge
+                return oldDocID;
+            else if (oldDocID >= maxDocID)
+            // This doc was "after" the merge, so simple shift
+                return oldDocID - docShift;
+            else
+            {
+                // Binary search to locate this document & find its new docID
+                int lo = 0; // search starts array
+                int hi = docMaps.Length - 1; // for first element less
+                
+                while (hi >= lo)
+                {
+                    int mid = Number.URShift((lo + hi), 1);
+                    int midValue = starts[mid];
+                    if (oldDocID < midValue)
+                        hi = mid - 1;
+                    else if (oldDocID > midValue)
+                        lo = mid + 1;
+                    else
+                    {
+                        // found a match
+                        while (mid + 1 < docMaps.Length && starts[mid + 1] == midValue)
+                        {
+                            mid++; // scan to last match
+                        }
+                        if (docMaps[mid] != null)
+                            return newStarts[mid] + docMaps[mid][oldDocID - starts[mid]];
+                        else
+                            return newStarts[mid] + oldDocID - starts[mid];
+                    }
+                }
+                if (docMaps[hi] != null)
+                    return newStarts[hi] + docMaps[hi][oldDocID - starts[hi]];
+                else
+                    return newStarts[hi] + oldDocID - starts[hi];
+            }
+        }
+    }
 }
\ No newline at end of file


[30/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/SuggestWord.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/SuggestWord.cs b/src/contrib/SpellChecker/Spell/SuggestWord.cs
index c8bec15..54840b2 100644
--- a/src/contrib/SpellChecker/Spell/SuggestWord.cs
+++ b/src/contrib/SpellChecker/Spell/SuggestWord.cs
@@ -19,7 +19,7 @@ using System;
 
 namespace SpellChecker.Net.Search.Spell
 {
-	
+    
     /// <summary>  SuggestWord Class, used in suggestSimilar method in SpellChecker class.
     /// 
     /// </summary>
@@ -29,13 +29,13 @@ namespace SpellChecker.Net.Search.Spell
     {
         /// <summary> the score of the word</summary>
         public float score;
-		
+        
         /// <summary> The freq of the word</summary>
         public int freq;
-		
+        
         /// <summary> the suggested word</summary>
         public System.String termString;
-		
+        
         public int CompareTo(SuggestWord a)
         {
             //first criteria: the edit distance
@@ -47,18 +47,18 @@ namespace SpellChecker.Net.Search.Spell
             {
                 return - 1;
             }
-			
+            
             //second criteria (if first criteria is equal): the popularity
             if (freq > a.freq)
             {
                 return 1;
             }
-			
+            
             if (freq < a.freq)
             {
                 return - 1;
             }
-			
+            
             return 0;
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/SuggestWordQueue.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/SuggestWordQueue.cs b/src/contrib/SpellChecker/Spell/SuggestWordQueue.cs
index de4dc09..7ae17ec 100644
--- a/src/contrib/SpellChecker/Spell/SuggestWordQueue.cs
+++ b/src/contrib/SpellChecker/Spell/SuggestWordQueue.cs
@@ -22,12 +22,12 @@ namespace SpellChecker.Net.Search.Spell
 
     sealed class SuggestWordQueue : PriorityQueue
     {
-		
+        
         internal SuggestWordQueue(int size)
         {
             Initialize(size);
         }
-		
+        
         override public bool LessThan(SuggestWord a, SuggestWord b)
         {
             var val = a.CompareTo(b);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/SpellChecker/Spell/TRStringDistance.cs
----------------------------------------------------------------------
diff --git a/src/contrib/SpellChecker/Spell/TRStringDistance.cs b/src/contrib/SpellChecker/Spell/TRStringDistance.cs
index f797f59..79b2314 100644
--- a/src/contrib/SpellChecker/Spell/TRStringDistance.cs
+++ b/src/contrib/SpellChecker/Spell/TRStringDistance.cs
@@ -18,16 +18,16 @@
 
 namespace SpellChecker.Net.Search.Spell
 {
-	
+    
     /// <summary> Edit distance  class</summary>
     public class TRStringDistance
     {
-		
+        
         internal char[] sa;
         internal int n;
         internal int[][][] cache = new int[30][][];
-		
-		
+        
+        
         /// <summary> Optimized to run a bit faster than the static getDistance().
         /// In one benchmark times were 5.3sec using ctr vs 8.5sec w/ static method, thus 37% faster.
         /// </summary>
@@ -36,8 +36,8 @@ namespace SpellChecker.Net.Search.Spell
             sa = target.ToCharArray();
             n = sa.Length;
         }
-		
-		
+        
+        
         //***************************
         // Compute Levenshtein distance
         //***************************
@@ -56,7 +56,7 @@ namespace SpellChecker.Net.Search.Spell
             {
                 return n;
             }
-			
+            
             if (m >= cache.Length)
             {
                 d = Form(n, m);
@@ -68,31 +68,31 @@ namespace SpellChecker.Net.Search.Spell
             else
             {
                 d = cache[m] = Form(n, m);
-				
+                
                 // Step 3
             }
             for (int i = 1; i <= n; i++)
             {
                 char s_i = sa[i - 1];
-				
+                
                 // Step 4
-				
+                
                 for (int j = 1; j <= m; j++)
                 {
                     char t_j = ta[j - 1];
-					
+                    
                     // Step 5
 
                     int cost = s_i == t_j ? 0 : 1;
                     d[i][j] = Min3(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + cost);
                 }
             }
-			
+            
             // Step 7
             return d[n][m];
         }
-		
-		
+        
+        
         /// <summary> </summary>
         private static int[][] Form(int n, int m)
         {
@@ -102,7 +102,7 @@ namespace SpellChecker.Net.Search.Spell
                 d[i] = new int[m + 1];
             }
             // Step 2
-			
+            
             for (int i = 0; i <= n; i++)
             {
                 d[i][0] = i;
@@ -113,8 +113,8 @@ namespace SpellChecker.Net.Search.Spell
             }
             return d;
         }
-		
-		
+        
+        
         //**************************
         // Get minimum of three values
         //**************************

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/WordNet/SynExpand/SynExpand.cs
----------------------------------------------------------------------
diff --git a/src/contrib/WordNet/SynExpand/SynExpand.cs b/src/contrib/WordNet/SynExpand/SynExpand.cs
index a830f6f..79498c0 100644
--- a/src/contrib/WordNet/SynExpand/SynExpand.cs
+++ b/src/contrib/WordNet/SynExpand/SynExpand.cs
@@ -87,48 +87,48 @@ namespace WorldNet.Net
             Analyzer a,
             String field,
             float boost)
-		{
-			already = new List<String>(); // avoid dups 
-			var top = new List<String>(); // needs to be separately listed..
-			if (field == null)
-				field = "contents";
-			
+        {
+            already = new List<String>(); // avoid dups 
+            var top = new List<String>(); // needs to be separately listed..
+            if (field == null)
+                field = "contents";
+            
             if (a == null)
-				a = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT);
-			
-			// [1] Parse query into separate words so that when we expand we can avoid dups
-			var ts = a.TokenStream(field, new StringReader(query));
+                a = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT);
+            
+            // [1] Parse query into separate words so that when we expand we can avoid dups
+            var ts = a.TokenStream(field, new StringReader(query));
             var termAtt = ts.AddAttribute<TermAttribute>();
-		    
+            
             while (ts.IncrementToken())
-			{
-				var word = termAtt.Term;
-				
+            {
+                var word = termAtt.Term;
+                
                 if (!already.Contains(word))
-				{
-					already.Add(word);
-					top.Add(word);
-				}
-			}
-
-			tmp = new BooleanQuery();
-			
-			// [2] form query
-			System.Collections.IEnumerator it = top.GetEnumerator();
-			while (it.MoveNext())
-			{
-				// [2a] add to level words in
-				var word = (String) it.Current;
-				var tq = new TermQuery(new Term(field, word));
-				tmp.Add(tq, Occur.SHOULD);
-
-			    var c = new CollectorImpl(field, boost);
+                {
+                    already.Add(word);
+                    top.Add(word);
+                }
+            }
+
+            tmp = new BooleanQuery();
+            
+            // [2] form query
+            System.Collections.IEnumerator it = top.GetEnumerator();
+            while (it.MoveNext())
+            {
+                // [2a] add to level words in
+                var word = (String) it.Current;
+                var tq = new TermQuery(new Term(field, word));
+                tmp.Add(tq, Occur.SHOULD);
+
+                var c = new CollectorImpl(field, boost);
                 syns.Search(new TermQuery(new Term(Syns2Index.F_WORD, word)), c);
-			}
-			
-			return tmp;
-		}
-	
+            }
+            
+            return tmp;
+        }
+    
 
         /// <summary>
         /// From project WordNet.Net.Syns2Index

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/WordNet/SynLookup/SynLookup.cs
----------------------------------------------------------------------
diff --git a/src/contrib/WordNet/SynLookup/SynLookup.cs b/src/contrib/WordNet/SynLookup/SynLookup.cs
index 62c436d..024dcc9 100644
--- a/src/contrib/WordNet/SynLookup/SynLookup.cs
+++ b/src/contrib/WordNet/SynLookup/SynLookup.cs
@@ -27,100 +27,100 @@ using Lucene.Net.Store;
 
 namespace WorldNet.Net
 {
-	/// <summary> Test program to look up synonyms.</summary>
-	public class SynLookup
-	{
-		static List<String> already;
-		private static BooleanQuery tmp;
-
-		[STAThread]
-		public static void  Main(System.String[] args)
-		{
-			if (args.Length != 2)
-			{
-				System.Console.Out.WriteLine(typeof(SynLookup) + " <index path> <word>");
-				return;
-			}
-			
-			using (var directory = FSDirectory.Open(new DirectoryInfo(args[0])))
-			{
-				using (var searcher = new IndexSearcher(directory, true))
-				{
-
-					String word = args[1];
-					Query query = new TermQuery(new Term(Syns2Index.F_WORD, word));
-					var countingCollector = new CountingCollector();
-					searcher.Search(query, countingCollector);
-
-					if (countingCollector.numHits == 0)
-					{
-						Console.Out.WriteLine("No synonyms found for " + word);
-					}
-					else
-					{
-						Console.Out.WriteLine("Synonyms found for \"" + word + "\":");
-					}
-
-					var hits = searcher.Search(query, countingCollector.numHits).ScoreDocs;
-
-					foreach (var v in
-						hits.Select(t => searcher.Doc(t.Doc)).Select(doc => doc.GetValues(Syns2Index.F_SYN)).SelectMany(values => values))
-					{
-						Console.Out.WriteLine(v);
-					}
-
-				}
-			}
-		}
-		
-		/// <summary> 
-		/// Perform synonym expansion on a query.
-		/// </summary>
-		/// <param name="query">query</param>
-		/// <param name="syns">syns</param>
-		/// <param name="a">a</param>
-		/// <param name="field">field</param>
-		/// <param name="boost">boost</param>
-		public static Query Expand(String query, 
-			Searcher syns, 
-			Analyzer a, 
-			String field, 
-			float boost)
-		{
-			already = new List<String>(); // avoid dups		
-			var top = new List<String>(); // needs to be separately listed..
-
-			var ts = a.TokenStream(field, new StringReader(query));
-			var termAtt = ts.AddAttribute<TermAttribute>();
-
-			while (ts.IncrementToken())
-			{
-				var word = termAtt.Term;
-
-				if (!already.Contains(word))
-				{
-					already.Add(word);
-					top.Add(word);
-				}
-			}
-
-			tmp = new BooleanQuery();
-
-			// [2] form query
-			System.Collections.IEnumerator it = top.GetEnumerator();
-			while (it.MoveNext())
-			{
-				// [2a] add to level words in
-				var word = (String)it.Current;
-				var tq = new TermQuery(new Term(field, word));
-				tmp.Add(tq, Occur.SHOULD);
-
-				var c = new CollectorImpl(field, boost);
-				syns.Search(new TermQuery(new Term(Syns2Index.F_WORD, word)), c);
-			}
-
-			return tmp;
-		}
+    /// <summary> Test program to look up synonyms.</summary>
+    public class SynLookup
+    {
+        static List<String> already;
+        private static BooleanQuery tmp;
+
+        [STAThread]
+        public static void  Main(System.String[] args)
+        {
+            if (args.Length != 2)
+            {
+                System.Console.Out.WriteLine(typeof(SynLookup) + " <index path> <word>");
+                return;
+            }
+            
+            using (var directory = FSDirectory.Open(new DirectoryInfo(args[0])))
+            {
+                using (var searcher = new IndexSearcher(directory, true))
+                {
+
+                    String word = args[1];
+                    Query query = new TermQuery(new Term(Syns2Index.F_WORD, word));
+                    var countingCollector = new CountingCollector();
+                    searcher.Search(query, countingCollector);
+
+                    if (countingCollector.numHits == 0)
+                    {
+                        Console.Out.WriteLine("No synonyms found for " + word);
+                    }
+                    else
+                    {
+                        Console.Out.WriteLine("Synonyms found for \"" + word + "\":");
+                    }
+
+                    var hits = searcher.Search(query, countingCollector.numHits).ScoreDocs;
+
+                    foreach (var v in
+                        hits.Select(t => searcher.Doc(t.Doc)).Select(doc => doc.GetValues(Syns2Index.F_SYN)).SelectMany(values => values))
+                    {
+                        Console.Out.WriteLine(v);
+                    }
+
+                }
+            }
+        }
+        
+        /// <summary> 
+        /// Perform synonym expansion on a query.
+        /// </summary>
+        /// <param name="query">query</param>
+        /// <param name="syns">syns</param>
+        /// <param name="a">a</param>
+        /// <param name="field">field</param>
+        /// <param name="boost">boost</param>
+        public static Query Expand(String query, 
+            Searcher syns, 
+            Analyzer a, 
+            String field, 
+            float boost)
+        {
+            already = new List<String>(); // avoid dups        
+            var top = new List<String>(); // needs to be separately listed..
+
+            var ts = a.TokenStream(field, new StringReader(query));
+            var termAtt = ts.AddAttribute<TermAttribute>();
+
+            while (ts.IncrementToken())
+            {
+                var word = termAtt.Term;
+
+                if (!already.Contains(word))
+                {
+                    already.Add(word);
+                    top.Add(word);
+                }
+            }
+
+            tmp = new BooleanQuery();
+
+            // [2] form query
+            System.Collections.IEnumerator it = top.GetEnumerator();
+            while (it.MoveNext())
+            {
+                // [2a] add to level words in
+                var word = (String)it.Current;
+                var tq = new TermQuery(new Term(field, word));
+                tmp.Add(tq, Occur.SHOULD);
+
+                var c = new CollectorImpl(field, boost);
+                syns.Search(new TermQuery(new Term(Syns2Index.F_WORD, word)), c);
+            }
+
+            return tmp;
+        }
 
         internal sealed class CountingCollector : Collector
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/WordNet/Syns2Index/Syns2Index.cs
----------------------------------------------------------------------
diff --git a/src/contrib/WordNet/Syns2Index/Syns2Index.cs b/src/contrib/WordNet/Syns2Index/Syns2Index.cs
index ac5bea6..da96a8a 100644
--- a/src/contrib/WordNet/Syns2Index/Syns2Index.cs
+++ b/src/contrib/WordNet/Syns2Index/Syns2Index.cs
@@ -29,264 +29,264 @@ using IndexWriter = Lucene.Net.Index.IndexWriter;
 
 namespace WorldNet.Net
 {
-	
-	/// <summary> Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a>
-	/// into a Lucene index suitable for looking up synonyms and performing query expansion (<see cref="SynExpand.Expand"/>).
-	/// 
-	/// This has been tested with WordNet 2.0.
-	/// 
-	/// The index has fields named "word" (<see cref="F_WORD"/>)
-	/// and "syn" (<see cref="F_SYN"/>).
-	/// <p>
-	/// The source word (such as 'big') can be looked up in the
-	/// "word" field, and if present there will be fields named "syn"
-	/// for every synonym. What's tricky here is that there could be <b>multiple</b>
-	/// fields with the same name, in the general case for words that have multiple synonyms.
-	/// That's not a problem with Lucene, you just use <see cref="Document.GetValues"/>
-	/// </p>
-	/// <p>
-	/// While the WordNet file distinguishes groups of synonyms with
-	/// related meanings we don't do that here.
-	/// </p>
-	/// This can take 4 minutes to execute and build an index on a "fast" system and the index takes up almost 3 MB.
-	/// </summary>
-	/// 
-	/// <seealso cref="http://www.cogsci.princeton.edu/~wn/"></seealso>
-	/// <seealso cref="http://www.cogsci.princeton.edu/~wn/man/prologdb.5WN.html"></seealso>
-	/// <seealso cref="http://www.hostmon.com/rfc/advanced.jsp"> </seealso>
-	public class Syns2Index
-	{
-		/// <summary> </summary>
-		private static readonly System.IO.StreamWriter o;
-		
-		/// <summary> </summary>
-		private static readonly System.IO.StreamWriter err;
-		
-		/// <summary> </summary>
-		public const System.String F_SYN = "syn";
-		
-		/// <summary> </summary>
-		public const System.String F_WORD = "word";
-		
-		/// <summary> </summary>
-		private static readonly Analyzer ana = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT);
-		
-		/// <summary> 
-		/// Takes arg of prolog file name and index directory.
-		/// </summary>
-		[STAThread]
-		public static void  Main(System.String[] args)
-		{
-			// get command line arguments
-			String prologFilename = null; // name of file "wn_s.pl"
-			String indexDir = null;
-			if (args.Length == 2)
-			{
-				prologFilename = args[0];
-				indexDir = args[1];
-			}
-			else
-			{
-				Usage();
-				Environment.Exit(1);
-			}
-			
-			// ensure that the prolog file is readable
-			if (!(new FileInfo(prologFilename)).Exists)
-			{
-				err.WriteLine("Error: cannot read Prolog file: " + prologFilename);
-				Environment.Exit(1);
-			}
-			// exit if the target index directory already exists
-			if (Directory.Exists((new FileInfo(indexDir)).FullName))
-			{
-				err.WriteLine("Error: index directory already exists: " + indexDir);
-				err.WriteLine("Please specify a name of a non-existent directory");
-				Environment.Exit(1);
-			}
-			
-			o.WriteLine("Opening Prolog file " + prologFilename);
-			var fis = new FileStream(prologFilename, FileMode.Open, FileAccess.Read);
-			var br = new StreamReader(new StreamReader(fis, System.Text.Encoding.Default).BaseStream, new StreamReader(fis, System.Text.Encoding.Default).CurrentEncoding);
-			String line;
-			
-			// maps a word to all the "groups" it's in
-			System.Collections.IDictionary word2Nums = new System.Collections.SortedList();
-			// maps a group to all the words in it
-			System.Collections.IDictionary num2Words = new System.Collections.SortedList();
-			// number of rejected words
-			var ndecent = 0;
-			
-			// status output
-			var mod = 1;
-			var row = 1;
-			// parse prolog file
-			o.WriteLine("[1/2] Parsing " + prologFilename);
-			while ((line = br.ReadLine()) != null)
-			{
-				// occasional progress
-				if ((++row) % mod == 0) // periodically print out line we read in
-				{
-					mod *= 2;
-					o.WriteLine("\t" + row + " " + line + " " + word2Nums.Count + " " + num2Words.Count + " ndecent=" + ndecent);
-				}
-				
-				// syntax check
-				if (!line.StartsWith("s("))
-				{
-					err.WriteLine("OUCH: " + line);
-					Environment.Exit(1);
-				}
-				
-				// parse line
-				line = line.Substring(2);
-				var comma = line.IndexOf(',');
-				var num = line.Substring(0, comma);
-				var q1 = line.IndexOf('\'');
-				line = line.Substring(q1 + 1);
-				var q2 = line.IndexOf('\'');
-				var word = line.Substring(0, q2).ToLower().Replace("''", "'");
-				
-				// make sure is a normal word
-				if (!IsDecent(word))
-				{
-					ndecent++;
-					continue; // don't store words w/ spaces
-				}
-				
-				// 1/2: word2Nums map
-				// append to entry or add new one
-				var lis = (System.Collections.IList) word2Nums[word];
-				if (lis == null)
-				{
-					lis = new List<String> {num};
-					word2Nums[word] = lis;
-				}
-				else
-					lis.Add(num);
-				
-				// 2/2: num2Words map
-				lis = (System.Collections.IList) num2Words[num];
-				if (lis == null)
-				{
-					lis = new List<String> { word };
-					num2Words[num] = lis;
-				}
-				else
-					lis.Add(word);
-			}
-			
-			// close the streams
-			fis.Close();
-			br.Close();
-			
-			// create the index
-			o.WriteLine("[2/2] Building index to store synonyms, " + " map sizes are " + word2Nums.Count + " and " + num2Words.Count);
-			Index(indexDir, word2Nums, num2Words);
-		}
-		
-		/// <summary> 
-		/// Checks to see if a word contains only alphabetic characters by
-		/// checking it one character at a time.
-		/// </summary>
-		/// <param name="s">string to check </param>
-		/// <returns> <c>true</c> if the string is decent</returns>
-		private static bool IsDecent(String s)
-		{
-			var len = s.Length;
-			for (var i = 0; i < len; i++)
-			{
-				if (!Char.IsLetter(s[i]))
-				{
-					return false;
-				}
-			}
-			return true;
-		}
-		
-		/// <summary> 
-		/// Forms a Lucene index based on the 2 maps.
-		/// </summary>
-		/// <param name="indexDir">the direcotry where the index should be created</param>
-		/// <param name="word2Nums">word2Nums</param>
-		/// <param name="num2Words">num2Words</param>
-		private static void  Index(String indexDir, System.Collections.IDictionary word2Nums, System.Collections.IDictionary num2Words)
-		{
-			var row = 0;
-			var mod = 1;
-			
-			using (var dir = FSDirectory.Open(new DirectoryInfo(indexDir)))
-			{
-				var writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
-				writer.UseCompoundFile = true; // why?
+    
+    /// <summary> Convert the prolog file wn_s.pl from the <a href="http://www.cogsci.princeton.edu/2.0/WNprolog-2.0.tar.gz">WordNet prolog download</a>
+    /// into a Lucene index suitable for looking up synonyms and performing query expansion (<see cref="SynExpand.Expand"/>).
+    /// 
+    /// This has been tested with WordNet 2.0.
+    /// 
+    /// The index has fields named "word" (<see cref="F_WORD"/>)
+    /// and "syn" (<see cref="F_SYN"/>).
+    /// <p>
+    /// The source word (such as 'big') can be looked up in the
+    /// "word" field, and if present there will be fields named "syn"
+    /// for every synonym. What's tricky here is that there could be <b>multiple</b>
+    /// fields with the same name, in the general case for words that have multiple synonyms.
+    /// That's not a problem with Lucene, you just use <see cref="Document.GetValues"/>
+    /// </p>
+    /// <p>
+    /// While the WordNet file distinguishes groups of synonyms with
+    /// related meanings we don't do that here.
+    /// </p>
+    /// This can take 4 minutes to execute and build an index on a "fast" system and the index takes up almost 3 MB.
+    /// </summary>
+    /// 
+    /// <seealso cref="http://www.cogsci.princeton.edu/~wn/"></seealso>
+    /// <seealso cref="http://www.cogsci.princeton.edu/~wn/man/prologdb.5WN.html"></seealso>
+    /// <seealso cref="http://www.hostmon.com/rfc/advanced.jsp"> </seealso>
+    public class Syns2Index
+    {
+        /// <summary> </summary>
+        private static readonly System.IO.StreamWriter o;
+        
+        /// <summary> </summary>
+        private static readonly System.IO.StreamWriter err;
+        
+        /// <summary> </summary>
+        public const System.String F_SYN = "syn";
+        
+        /// <summary> </summary>
+        public const System.String F_WORD = "word";
+        
+        /// <summary> </summary>
+        private static readonly Analyzer ana = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT);
+        
+        /// <summary> 
+        /// Takes arg of prolog file name and index directory.
+        /// </summary>
+        [STAThread]
+        public static void  Main(System.String[] args)
+        {
+            // get command line arguments
+            String prologFilename = null; // name of file "wn_s.pl"
+            String indexDir = null;
+            if (args.Length == 2)
+            {
+                prologFilename = args[0];
+                indexDir = args[1];
+            }
+            else
+            {
+                Usage();
+                Environment.Exit(1);
+            }
+            
+            // ensure that the prolog file is readable
+            if (!(new FileInfo(prologFilename)).Exists)
+            {
+                err.WriteLine("Error: cannot read Prolog file: " + prologFilename);
+                Environment.Exit(1);
+            }
+            // exit if the target index directory already exists
+            if (Directory.Exists((new FileInfo(indexDir)).FullName))
+            {
+                err.WriteLine("Error: index directory already exists: " + indexDir);
+                err.WriteLine("Please specify a name of a non-existent directory");
+                Environment.Exit(1);
+            }
+            
+            o.WriteLine("Opening Prolog file " + prologFilename);
+            var fis = new FileStream(prologFilename, FileMode.Open, FileAccess.Read);
+            var br = new StreamReader(new StreamReader(fis, System.Text.Encoding.Default).BaseStream, new StreamReader(fis, System.Text.Encoding.Default).CurrentEncoding);
+            String line;
+            
+            // maps a word to all the "groups" it's in
+            System.Collections.IDictionary word2Nums = new System.Collections.SortedList();
+            // maps a group to all the words in it
+            System.Collections.IDictionary num2Words = new System.Collections.SortedList();
+            // number of rejected words
+            var ndecent = 0;
+            
+            // status output
+            var mod = 1;
+            var row = 1;
+            // parse prolog file
+            o.WriteLine("[1/2] Parsing " + prologFilename);
+            while ((line = br.ReadLine()) != null)
+            {
+                // occasional progress
+                if ((++row) % mod == 0) // periodically print out line we read in
+                {
+                    mod *= 2;
+                    o.WriteLine("\t" + row + " " + line + " " + word2Nums.Count + " " + num2Words.Count + " ndecent=" + ndecent);
+                }
+                
+                // syntax check
+                if (!line.StartsWith("s("))
+                {
+                    err.WriteLine("OUCH: " + line);
+                    Environment.Exit(1);
+                }
+                
+                // parse line
+                line = line.Substring(2);
+                var comma = line.IndexOf(',');
+                var num = line.Substring(0, comma);
+                var q1 = line.IndexOf('\'');
+                line = line.Substring(q1 + 1);
+                var q2 = line.IndexOf('\'');
+                var word = line.Substring(0, q2).ToLower().Replace("''", "'");
+                
+                // make sure is a normal word
+                if (!IsDecent(word))
+                {
+                    ndecent++;
+                    continue; // don't store words w/ spaces
+                }
+                
+                // 1/2: word2Nums map
+                // append to entry or add new one
+                var lis = (System.Collections.IList) word2Nums[word];
+                if (lis == null)
+                {
+                    lis = new List<String> {num};
+                    word2Nums[word] = lis;
+                }
+                else
+                    lis.Add(num);
+                
+                // 2/2: num2Words map
+                lis = (System.Collections.IList) num2Words[num];
+                if (lis == null)
+                {
+                    lis = new List<String> { word };
+                    num2Words[num] = lis;
+                }
+                else
+                    lis.Add(word);
+            }
+            
+            // close the streams
+            fis.Close();
+            br.Close();
+            
+            // create the index
+            o.WriteLine("[2/2] Building index to store synonyms, " + " map sizes are " + word2Nums.Count + " and " + num2Words.Count);
+            Index(indexDir, word2Nums, num2Words);
+        }
+        
+        /// <summary> 
+        /// Checks to see if a word contains only alphabetic characters by
+        /// checking it one character at a time.
+        /// </summary>
+        /// <param name="s">string to check </param>
+        /// <returns> <c>true</c> if the string is decent</returns>
+        private static bool IsDecent(String s)
+        {
+            var len = s.Length;
+            for (var i = 0; i < len; i++)
+            {
+                if (!Char.IsLetter(s[i]))
+                {
+                    return false;
+                }
+            }
+            return true;
+        }
+        
+        /// <summary> 
+        /// Forms a Lucene index based on the 2 maps.
+        /// </summary>
+        /// <param name="indexDir">the direcotry where the index should be created</param>
+        /// <param name="word2Nums">word2Nums</param>
+        /// <param name="num2Words">num2Words</param>
+        private static void  Index(String indexDir, System.Collections.IDictionary word2Nums, System.Collections.IDictionary num2Words)
+        {
+            var row = 0;
+            var mod = 1;
+            
+            using (var dir = FSDirectory.Open(new DirectoryInfo(indexDir)))
+            {
+                var writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
+                writer.UseCompoundFile = true; // why?
 
-				var i1 = word2Nums.Keys.GetEnumerator();
-				while (i1.MoveNext())
-				{
-					var g = (String)i1.Current;
-					var doc = new Document();
+                var i1 = word2Nums.Keys.GetEnumerator();
+                while (i1.MoveNext())
+                {
+                    var g = (String)i1.Current;
+                    var doc = new Document();
 
-					var n = Index(word2Nums, num2Words, g, doc);
-					if (n > 0)
-					{
-						doc.Add(new Field(F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
-						if ((++row % mod) == 0)
-						{
-							o.WriteLine("\trow=" + row + "/" + word2Nums.Count + " doc= " + doc);
-							mod *= 2;
-						}
-						writer.AddDocument(doc);
-					}
-				}
-				o.WriteLine("Optimizing..");
-				writer.Optimize();
-				writer.Close();
-			}
-			
-		}
+                    var n = Index(word2Nums, num2Words, g, doc);
+                    if (n > 0)
+                    {
+                        doc.Add(new Field(F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
+                        if ((++row % mod) == 0)
+                        {
+                            o.WriteLine("\trow=" + row + "/" + word2Nums.Count + " doc= " + doc);
+                            mod *= 2;
+                        }
+                        writer.AddDocument(doc);
+                    }
+                }
+                o.WriteLine("Optimizing..");
+                writer.Optimize();
+                writer.Close();
+            }
+            
+        }
 
-		/// <summary> 
-		/// Given the 2 maps fills a document for 1 word.
-		/// </summary>
-		private static int Index(System.Collections.IDictionary word2Nums, System.Collections.IDictionary num2Words, System.String g, Document doc)
-		{
-			var keys = (System.Collections.IList) word2Nums[g]; // get list of key#'s
-			var i2 = keys.GetEnumerator();
-			
-			var already = new System.Collections.SortedList(); // keep them sorted
-			
-			// pass 1: fill up 'already' with all words
-			while (i2.MoveNext()) // for each key#
-			{
-				foreach (var item in
-					((System.Collections.IList) num2Words[i2.Current]).Cast<object>().Where(item => already.Contains(item) == false))
-				{
-					already.Add(item, item);
-				}
-			}
+        /// <summary> 
+        /// Given the 2 maps fills a document for 1 word.
+        /// </summary>
+        private static int Index(System.Collections.IDictionary word2Nums, System.Collections.IDictionary num2Words, System.String g, Document doc)
+        {
+            var keys = (System.Collections.IList) word2Nums[g]; // get list of key#'s
+            var i2 = keys.GetEnumerator();
+            
+            var already = new System.Collections.SortedList(); // keep them sorted
+            
+            // pass 1: fill up 'already' with all words
+            while (i2.MoveNext()) // for each key#
+            {
+                foreach (var item in
+                    ((System.Collections.IList) num2Words[i2.Current]).Cast<object>().Where(item => already.Contains(item) == false))
+                {
+                    already.Add(item, item);
+                }
+            }
 
-			var num = 0;
-			already.Remove(g); // of course a word is it's own syn
-			var it = already.GetEnumerator();
-			while (it.MoveNext())
-			{
-				var cur = (String) it.Key;
-				// don't store things like 'pit bull' -> 'american pit bull'
-				if (!IsDecent(cur))
-				{
-					continue;
-				}
-				num++;
-				doc.Add(new Field(F_SYN, cur, Field.Store.YES, Field.Index.NO));
-			}
-			return num;
-		}
-		
-		/// <summary> </summary>
-		private static void  Usage()
-		{
-			o.WriteLine("\n\n" + typeof(Syns2Index) + " <prolog file> <index dir>\n\n");
-		}
+            var num = 0;
+            already.Remove(g); // of course a word is it's own syn
+            var it = already.GetEnumerator();
+            while (it.MoveNext())
+            {
+                var cur = (String) it.Key;
+                // don't store things like 'pit bull' -> 'american pit bull'
+                if (!IsDecent(cur))
+                {
+                    continue;
+                }
+                num++;
+                doc.Add(new Field(F_SYN, cur, Field.Store.YES, Field.Index.NO));
+            }
+            return num;
+        }
+        
+        /// <summary> </summary>
+        private static void  Usage()
+        {
+            o.WriteLine("\n\n" + typeof(Syns2Index) + " <prolog file> <index dir>\n\n");
+        }
 
-	}
+    }
 }
\ No newline at end of file


[35/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/SpanishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/SpanishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/SpanishStemmer.cs
index 4355da6..eaf9904 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/SpanishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/SpanishStemmer.cs
@@ -24,1213 +24,1213 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162
 
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class SpanishStemmer : SnowballProgram
-	{
-		public SpanishStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this)};
-			a_1 = new Among[]{new Among("la", - 1, - 1, "", this), new Among("sela", 0, - 1, "", this), new Among("le", - 1, - 1, "", this), new Among("me", - 1, - 1, "", this), new Among("se", - 1, - 1, "", this), new Among("lo", - 1, - 1, "", this), new Among("selo", 5, - 1, "", this), new Among("las", - 1, - 1, "", this), new Among("selas", 7, - 1, "", this), new Among("les", - 1, - 1, "", this), new Among("los", - 1, - 1, "", this), new Among("selos", 10, - 1, "", this), new Among("nos", - 1, - 1, "", this)};
-			a_2 = new Among[]{new Among("ando", - 1, 6, "", this), new Among("iendo", - 1, 6, "", this), new Among("yendo", - 1, 7, "", this), new Among("\u00E1ndo", - 1, 2, "", this), new Among("i\u00E9ndo", - 1, 1, "", this), new Among("ar", - 1, 6, "", this), new Among("er", - 1, 6, "", this), new Among("ir", - 1, 6, "", this), new Among("\u00E1r", - 1, 3, "", this), new Among("\u00E9r", - 1, 4, "", this), new Among("\u00EDr", - 1, 5, "", this)};
-			a_3 = new Among[]{new Among("ic", - 1, - 1, "", this), new Among("ad", - 1, - 1, "", this), new Among("os", - 1, - 1, "", this), new Among("iv", - 1, 1, "", this)};
-			a_4 = new Among[]{new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this)};
-			a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 1, "", this)};
-			a_6 = new Among[]{new Among("ica", - 1, 1, "", this), new Among("encia", - 1, 5, "", this), new Among("adora", - 1, 2, "", this), new Among("osa", - 1, 1, "", this), new Among("ista", - 1, 1, "", this), new Among("iva", - 1, 9, "", this), new Among("anza", - 1, 1, "", this), new Among("log\u00EDa", - 1, 3, "", this), new Among("idad", - 1, 8, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("mente", - 1, 7, "", this), new Among("amente", 11, 6, "", this), new Among("aci\u00F3n", - 1, 2, "", this), new Among("uci\u00F3n", - 1, 4, "", this), new Among("ico", - 1, 1, "", this), new Among("ismo", - 1, 1, "", this), new Among("oso", - 1, 1, "", this), new Among("amiento", - 1, 1, "", this), new Among("imiento", - 1, 1, "", this), new Among("ivo", - 1, 9, "", this), new Among("ador", - 1, 2, "", this), new Among("icas", - 1, 1, "", this), new Among("encias", - 1, 5, "", this), new Among("adoras", - 1, 2, "", this), new Among("osas", - 1, 1,
  "", this), new Among("istas", - 1, 1, "", this), new Among("ivas", - 1, 9, "", this), new Among("anzas", - 1, 1, "", this), new Among("log\u00EDas", - 1, 3, "", this), new Among("idades", - 1, 8, "", this), new Among("ables", - 1, 1, "", this), new Among("ibles", - 1, 1, "", this), new Among("aciones", - 1, 2, "", this), new Among("uciones", - 1, 4, "", this), new Among("adores", - 1, 2, "", this), new Among("icos", - 1, 1, "", this), new Among("ismos", - 1, 1, "", this), new Among("osos", - 1, 1, "", this), new Among("amientos", - 1, 1, "", this), new Among("imientos", - 1, 1, "", this), new Among("ivos", - 1, 9, "", this)};
-			a_7 = new Among[]{new Among("ya", - 1, 1, "", this), new Among("ye", - 1, 1, "", this), new Among("yan", - 1, 1, "", this), new Among("yen", - 1, 1, "", this), new Among("yeron", - 1, 1, "", this), new Among("yendo", - 1, 1, "", this), new Among("yo", - 1, 1, "", this), new Among("yas", - 1, 1, "", this), new Among("yes", - 1, 1, "", this), new Among("yais", - 1, 1, "", this), new Among("yamos", - 1, 1, "", this), new Among("y\u00F3", - 1, 1, "", this)};
-			a_8 = new Among[]{new Among("aba", - 1, 2, "", this), new Among("ada", - 1, 2, "", this), new Among("ida", - 1, 2, "", this), new Among("ara", - 1, 2, "", this), new Among("iera", - 1, 2, "", this), new Among("\u00EDa", - 1, 2, "", this), new Among("ar\u00EDa", 5, 2, "", this), new Among("er\u00EDa", 5, 2, "", this), new Among("ir\u00EDa", 5, 2, "", this), new Among("ad", - 1, 2, "", this), new Among("ed", - 1, 2, "", this), new Among("id", - 1, 2, "", this), new Among("ase", - 1, 2, "", this), new Among("iese", - 1, 2, "", this), new Among("aste", - 1, 2, "", this), new Among("iste", - 1, 2, "", this), new Among("an", - 1, 2, "", this), new Among("aban", 16, 2, "", this), new Among("aran", 16, 2, "", this), new Among("ieran", 16, 2, "", this), new Among("\u00EDan", 16, 2, "", this), new Among("ar\u00EDan", 20, 2, "", this), new Among("er\u00EDan", 20, 2, "", this), new Among("ir\u00EDan", 20, 2, "", this), new Among("en", - 1, 1, "", this), new Among("asen", 24, 2, "", this), ne
 w Among("iesen", 24, 2, "", this), new Among("aron", - 1, 2, "", this), new Among("ieron", - 1, 2, "", this), new Among("ar\u00E1n", - 1, 2, "", this), new Among("er\u00E1n", - 1, 2, "", this), new Among("ir\u00E1n", - 1, 2, "", this), new Among("ado", - 1, 2, "", this), new Among("ido", - 1, 2, "", this), new Among("ando", - 1, 2, "", this), new Among("iendo", - 1, 2, "", this), new Among("ar", - 1, 2, "", this), new Among("er", - 1, 2, "", this), new Among("ir", - 1, 2, "", this), new Among("as", - 1, 2, "", this), new Among("abas", 39, 2, "", this), new Among("adas", 39, 2, "", this), new Among("idas", 39, 2, "", this), new Among("aras", 39, 2, "", this), new Among("ieras", 39, 2, "", this), new Among("\u00EDas", 39, 2, "", this), new Among("ar\u00EDas", 45, 2, "", this), new Among("er\u00EDas", 45, 2, "", this), new Among("ir\u00EDas", 45, 2, "", this), new Among("es", - 1, 1, "", this), new Among("ases", 49, 2, "", this), new Among("ieses", 49, 2, "", this), new Among("abais", 
 - 1, 2, "", this), new Among("arais", - 
-				1, 2, "", this), new Among("ierais", - 1, 2, "", this), new Among("\u00EDais", - 1, 2, "", this), new Among("ar\u00EDais", 55, 2, "", this), new Among("er\u00EDais", 55, 2, "", this), new Among("ir\u00EDais", 55, 2, "", this), new Among("aseis", - 1, 2, "", this), new Among("ieseis", - 1, 2, "", this), new Among("asteis", - 1, 2, "", this), new Among("isteis", - 1, 2, "", this), new Among("\u00E1is", - 1, 2, "", this), new Among("\u00E9is", - 1, 1, "", this), new Among("ar\u00E9is", 64, 2, "", this), new Among("er\u00E9is", 64, 2, "", this), new Among("ir\u00E9is", 64, 2, "", this), new Among("ados", - 1, 2, "", this), new Among("idos", - 1, 2, "", this), new Among("amos", - 1, 2, "", this), new Among("\u00E1bamos", 70, 2, "", this), new Among("\u00E1ramos", 70, 2, "", this), new Among("i\u00E9ramos", 70, 2, "", this), new Among("\u00EDamos", 70, 2, "", this), new Among("ar\u00EDamos", 74, 2, "", this), new Among("er\u00EDamos", 74, 2, "", this), new Among("ir\u00EDamos", 74, 2,
  "", this), new Among("emos", - 1, 1, "", this), new Among("aremos", 78, 2, "", this), new Among("eremos", 78, 2, "", this), new Among("iremos", 78, 2, "", this), new Among("\u00E1semos", 78, 2, "", this), new Among("i\u00E9semos", 78, 2, "", this), new Among("imos", - 1, 2, "", this), new Among("ar\u00E1s", - 1, 2, "", this), new Among("er\u00E1s", - 1, 2, "", this), new Among("ir\u00E1s", - 1, 2, "", this), new Among("\u00EDs", - 1, 2, "", this), new Among("ar\u00E1", - 1, 2, "", this), new Among("er\u00E1", - 1, 2, "", this), new Among("ir\u00E1", - 1, 2, "", this), new Among("ar\u00E9", - 1, 2, "", this), new Among("er\u00E9", - 1, 2, "", this), new Among("ir\u00E9", - 1, 2, "", this), new Among("i\u00F3", - 1, 2, "", this)};
-			a_9 = new Among[]{new Among("a", - 1, 1, "", this), new Among("e", - 1, 2, "", this), new Among("o", - 1, 1, "", this), new Among("os", - 1, 1, "", this), new Among("\u00E1", - 1, 1, "", this), new Among("\u00E9", - 1, 2, "", this), new Among("\u00ED", - 1, 1, "", this), new Among("\u00F3", - 1, 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private Among[] a_8;
-		private Among[] a_9;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (1), (char) (17), (char) (4), (char) (10)};
-		
-		private int I_p2;
-		private int I_p1;
-		private int I_pV;
-		
-		protected internal virtual void  copy_from(SpanishStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			I_pV = other.I_pV;
-			base.copy_from(other);
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_6;
-			int v_8;
-			// (, line 31
-			I_pV = limit;
-			I_p1 = limit;
-			I_p2 = limit;
-			// do, line 37
-			v_1 = cursor;
-			do 
-			{
-				// (, line 37
-				// or, line 39
-				do 
-				{
-					v_2 = cursor;
-					do 
-					{
-						// (, line 38
-						if (!(in_grouping(g_v, 97, 252)))
-						{
-							goto lab2_brk;
-						}
-						// or, line 38
-						do 
-						{
-							v_3 = cursor;
-							do 
-							{
-								// (, line 38
-								if (!(out_grouping(g_v, 97, 252)))
-								{
-									goto lab4_brk;
-								}
-								// gopast, line 38
-								while (true)
-								{
-									do 
-									{
-										if (!(in_grouping(g_v, 97, 252)))
-										{
-											goto lab6_brk;
-										}
-										goto golab5_brk;
-									}
-									while (false);
+    public class SpanishStemmer : SnowballProgram
+    {
+        public SpanishStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this)};
+            a_1 = new Among[]{new Among("la", - 1, - 1, "", this), new Among("sela", 0, - 1, "", this), new Among("le", - 1, - 1, "", this), new Among("me", - 1, - 1, "", this), new Among("se", - 1, - 1, "", this), new Among("lo", - 1, - 1, "", this), new Among("selo", 5, - 1, "", this), new Among("las", - 1, - 1, "", this), new Among("selas", 7, - 1, "", this), new Among("les", - 1, - 1, "", this), new Among("los", - 1, - 1, "", this), new Among("selos", 10, - 1, "", this), new Among("nos", - 1, - 1, "", this)};
+            a_2 = new Among[]{new Among("ando", - 1, 6, "", this), new Among("iendo", - 1, 6, "", this), new Among("yendo", - 1, 7, "", this), new Among("\u00E1ndo", - 1, 2, "", this), new Among("i\u00E9ndo", - 1, 1, "", this), new Among("ar", - 1, 6, "", this), new Among("er", - 1, 6, "", this), new Among("ir", - 1, 6, "", this), new Among("\u00E1r", - 1, 3, "", this), new Among("\u00E9r", - 1, 4, "", this), new Among("\u00EDr", - 1, 5, "", this)};
+            a_3 = new Among[]{new Among("ic", - 1, - 1, "", this), new Among("ad", - 1, - 1, "", this), new Among("os", - 1, - 1, "", this), new Among("iv", - 1, 1, "", this)};
+            a_4 = new Among[]{new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this)};
+            a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 1, "", this)};
+            a_6 = new Among[]{new Among("ica", - 1, 1, "", this), new Among("encia", - 1, 5, "", this), new Among("adora", - 1, 2, "", this), new Among("osa", - 1, 1, "", this), new Among("ista", - 1, 1, "", this), new Among("iva", - 1, 9, "", this), new Among("anza", - 1, 1, "", this), new Among("log\u00EDa", - 1, 3, "", this), new Among("idad", - 1, 8, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("mente", - 1, 7, "", this), new Among("amente", 11, 6, "", this), new Among("aci\u00F3n", - 1, 2, "", this), new Among("uci\u00F3n", - 1, 4, "", this), new Among("ico", - 1, 1, "", this), new Among("ismo", - 1, 1, "", this), new Among("oso", - 1, 1, "", this), new Among("amiento", - 1, 1, "", this), new Among("imiento", - 1, 1, "", this), new Among("ivo", - 1, 9, "", this), new Among("ador", - 1, 2, "", this), new Among("icas", - 1, 1, "", this), new Among("encias", - 1, 5, "", this), new Among("adoras", - 1, 2, "", this), new Among("osas"
 , - 1, 1, "", this), new Among("istas", - 1, 1, "", this), new Among("ivas", - 1, 9, "", this), new Among("anzas", - 1, 1, "", this), new Among("log\u00EDas", - 1, 3, "", this), new Among("idades", - 1, 8, "", this), new Among("ables", - 1, 1, "", this), new Among("ibles", - 1, 1, "", this), new Among("aciones", - 1, 2, "", this), new Among("uciones", - 1, 4, "", this), new Among("adores", - 1, 2, "", this), new Among("icos", - 1, 1, "", this), new Among("ismos", - 1, 1, "", this), new Among("osos", - 1, 1, "", this), new Among("amientos", - 1, 1, "", this), new Among("imientos", - 1, 1, "", this), new Among("ivos", - 1, 9, "", this)};
+            a_7 = new Among[]{new Among("ya", - 1, 1, "", this), new Among("ye", - 1, 1, "", this), new Among("yan", - 1, 1, "", this), new Among("yen", - 1, 1, "", this), new Among("yeron", - 1, 1, "", this), new Among("yendo", - 1, 1, "", this), new Among("yo", - 1, 1, "", this), new Among("yas", - 1, 1, "", this), new Among("yes", - 1, 1, "", this), new Among("yais", - 1, 1, "", this), new Among("yamos", - 1, 1, "", this), new Among("y\u00F3", - 1, 1, "", this)};
+            a_8 = new Among[]{new Among("aba", - 1, 2, "", this), new Among("ada", - 1, 2, "", this), new Among("ida", - 1, 2, "", this), new Among("ara", - 1, 2, "", this), new Among("iera", - 1, 2, "", this), new Among("\u00EDa", - 1, 2, "", this), new Among("ar\u00EDa", 5, 2, "", this), new Among("er\u00EDa", 5, 2, "", this), new Among("ir\u00EDa", 5, 2, "", this), new Among("ad", - 1, 2, "", this), new Among("ed", - 1, 2, "", this), new Among("id", - 1, 2, "", this), new Among("ase", - 1, 2, "", this), new Among("iese", - 1, 2, "", this), new Among("aste", - 1, 2, "", this), new Among("iste", - 1, 2, "", this), new Among("an", - 1, 2, "", this), new Among("aban", 16, 2, "", this), new Among("aran", 16, 2, "", this), new Among("ieran", 16, 2, "", this), new Among("\u00EDan", 16, 2, "", this), new Among("ar\u00EDan", 20, 2, "", this), new Among("er\u00EDan", 20, 2, "", this), new Among("ir\u00EDan", 20, 2, "", this), new Among("en", - 1, 1, "", this), new Among("asen", 24, 2, "", 
 this), new Among("iesen", 24, 2, "", this), new Among("aron", - 1, 2, "", this), new Among("ieron", - 1, 2, "", this), new Among("ar\u00E1n", - 1, 2, "", this), new Among("er\u00E1n", - 1, 2, "", this), new Among("ir\u00E1n", - 1, 2, "", this), new Among("ado", - 1, 2, "", this), new Among("ido", - 1, 2, "", this), new Among("ando", - 1, 2, "", this), new Among("iendo", - 1, 2, "", this), new Among("ar", - 1, 2, "", this), new Among("er", - 1, 2, "", this), new Among("ir", - 1, 2, "", this), new Among("as", - 1, 2, "", this), new Among("abas", 39, 2, "", this), new Among("adas", 39, 2, "", this), new Among("idas", 39, 2, "", this), new Among("aras", 39, 2, "", this), new Among("ieras", 39, 2, "", this), new Among("\u00EDas", 39, 2, "", this), new Among("ar\u00EDas", 45, 2, "", this), new Among("er\u00EDas", 45, 2, "", this), new Among("ir\u00EDas", 45, 2, "", this), new Among("es", - 1, 1, "", this), new Among("ases", 49, 2, "", this), new Among("ieses", 49, 2, "", this), new Among(
 "abais", - 1, 2, "", this), new Among("arais", - 
+                1, 2, "", this), new Among("ierais", - 1, 2, "", this), new Among("\u00EDais", - 1, 2, "", this), new Among("ar\u00EDais", 55, 2, "", this), new Among("er\u00EDais", 55, 2, "", this), new Among("ir\u00EDais", 55, 2, "", this), new Among("aseis", - 1, 2, "", this), new Among("ieseis", - 1, 2, "", this), new Among("asteis", - 1, 2, "", this), new Among("isteis", - 1, 2, "", this), new Among("\u00E1is", - 1, 2, "", this), new Among("\u00E9is", - 1, 1, "", this), new Among("ar\u00E9is", 64, 2, "", this), new Among("er\u00E9is", 64, 2, "", this), new Among("ir\u00E9is", 64, 2, "", this), new Among("ados", - 1, 2, "", this), new Among("idos", - 1, 2, "", this), new Among("amos", - 1, 2, "", this), new Among("\u00E1bamos", 70, 2, "", this), new Among("\u00E1ramos", 70, 2, "", this), new Among("i\u00E9ramos", 70, 2, "", this), new Among("\u00EDamos", 70, 2, "", this), new Among("ar\u00EDamos", 74, 2, "", this), new Among("er\u00EDamos", 74, 2, "", this), new Among("ir\u00EDa
 mos", 74, 2, "", this), new Among("emos", - 1, 1, "", this), new Among("aremos", 78, 2, "", this), new Among("eremos", 78, 2, "", this), new Among("iremos", 78, 2, "", this), new Among("\u00E1semos", 78, 2, "", this), new Among("i\u00E9semos", 78, 2, "", this), new Among("imos", - 1, 2, "", this), new Among("ar\u00E1s", - 1, 2, "", this), new Among("er\u00E1s", - 1, 2, "", this), new Among("ir\u00E1s", - 1, 2, "", this), new Among("\u00EDs", - 1, 2, "", this), new Among("ar\u00E1", - 1, 2, "", this), new Among("er\u00E1", - 1, 2, "", this), new Among("ir\u00E1", - 1, 2, "", this), new Among("ar\u00E9", - 1, 2, "", this), new Among("er\u00E9", - 1, 2, "", this), new Among("ir\u00E9", - 1, 2, "", this), new Among("i\u00F3", - 1, 2, "", this)};
+            a_9 = new Among[]{new Among("a", - 1, 1, "", this), new Among("e", - 1, 2, "", this), new Among("o", - 1, 1, "", this), new Among("os", - 1, 1, "", this), new Among("\u00E1", - 1, 1, "", this), new Among("\u00E9", - 1, 2, "", this), new Among("\u00ED", - 1, 1, "", this), new Among("\u00F3", - 1, 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private Among[] a_8;
+        private Among[] a_9;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (1), (char) (17), (char) (4), (char) (10)};
+        
+        private int I_p2;
+        private int I_p1;
+        private int I_pV;
+        
+        protected internal virtual void  copy_from(SpanishStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            I_pV = other.I_pV;
+            base.copy_from(other);
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_6;
+            int v_8;
+            // (, line 31
+            I_pV = limit;
+            I_p1 = limit;
+            I_p2 = limit;
+            // do, line 37
+            v_1 = cursor;
+            do 
+            {
+                // (, line 37
+                // or, line 39
+                do 
+                {
+                    v_2 = cursor;
+                    do 
+                    {
+                        // (, line 38
+                        if (!(in_grouping(g_v, 97, 252)))
+                        {
+                            goto lab2_brk;
+                        }
+                        // or, line 38
+                        do 
+                        {
+                            v_3 = cursor;
+                            do 
+                            {
+                                // (, line 38
+                                if (!(out_grouping(g_v, 97, 252)))
+                                {
+                                    goto lab4_brk;
+                                }
+                                // gopast, line 38
+                                while (true)
+                                {
+                                    do 
+                                    {
+                                        if (!(in_grouping(g_v, 97, 252)))
+                                        {
+                                            goto lab6_brk;
+                                        }
+                                        goto golab5_brk;
+                                    }
+                                    while (false);
 
 lab6_brk: ;
-									
-									if (cursor >= limit)
-									{
-										goto lab4_brk;
-									}
-									cursor++;
-								}
+                                    
+                                    if (cursor >= limit)
+                                    {
+                                        goto lab4_brk;
+                                    }
+                                    cursor++;
+                                }
 
 golab5_brk: ;
-								
-								goto lab3_brk;
-							}
-							while (false);
+                                
+                                goto lab3_brk;
+                            }
+                            while (false);
 
 lab4_brk: ;
-							
-							cursor = v_3;
-							// (, line 38
-							if (!(in_grouping(g_v, 97, 252)))
-							{
-								goto lab2_brk;
-							}
-							// gopast, line 38
-							while (true)
-							{
-								do 
-								{
-									if (!(out_grouping(g_v, 97, 252)))
-									{
-										goto lab8_brk;
-									}
-									goto golab7_brk;
-								}
-								while (false);
+                            
+                            cursor = v_3;
+                            // (, line 38
+                            if (!(in_grouping(g_v, 97, 252)))
+                            {
+                                goto lab2_brk;
+                            }
+                            // gopast, line 38
+                            while (true)
+                            {
+                                do 
+                                {
+                                    if (!(out_grouping(g_v, 97, 252)))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    goto golab7_brk;
+                                }
+                                while (false);
 
 lab8_brk: ;
-								
-								if (cursor >= limit)
-								{
-									goto lab2_brk;
-								}
-								cursor++;
-							}
+                                
+                                if (cursor >= limit)
+                                {
+                                    goto lab2_brk;
+                                }
+                                cursor++;
+                            }
 
 golab7_brk: ;
-							
-						}
-						while (false);
+                            
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						goto lab1_brk;
-					}
-					while (false);
+                        
+                        goto lab1_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = v_2;
-					// (, line 40
-					if (!(out_grouping(g_v, 97, 252)))
-					{
-						goto lab0_brk;
-					}
-					// or, line 40
-					do 
-					{
-						v_6 = cursor;
-						do 
-						{
-							// (, line 40
-							if (!(out_grouping(g_v, 97, 252)))
-							{
-								goto lab10_brk;
-							}
-							// gopast, line 40
-							while (true)
-							{
-								do 
-								{
-									if (!(in_grouping(g_v, 97, 252)))
-									{
-										goto lab12_brk;
-									}
-									goto golab11_brk;
-								}
-								while (false);
+                    
+                    cursor = v_2;
+                    // (, line 40
+                    if (!(out_grouping(g_v, 97, 252)))
+                    {
+                        goto lab0_brk;
+                    }
+                    // or, line 40
+                    do 
+                    {
+                        v_6 = cursor;
+                        do 
+                        {
+                            // (, line 40
+                            if (!(out_grouping(g_v, 97, 252)))
+                            {
+                                goto lab10_brk;
+                            }
+                            // gopast, line 40
+                            while (true)
+                            {
+                                do 
+                                {
+                                    if (!(in_grouping(g_v, 97, 252)))
+                                    {
+                                        goto lab12_brk;
+                                    }
+                                    goto golab11_brk;
+                                }
+                                while (false);
 
 lab12_brk: ;
-								
-								if (cursor >= limit)
-								{
-									goto lab10_brk;
-								}
-								cursor++;
-							}
+                                
+                                if (cursor >= limit)
+                                {
+                                    goto lab10_brk;
+                                }
+                                cursor++;
+                            }
 
 golab11_brk: ;
-							
-							goto lab9_brk;
-						}
-						while (false);
+                            
+                            goto lab9_brk;
+                        }
+                        while (false);
 
 lab10_brk: ;
-						
-						cursor = v_6;
-						// (, line 40
-						if (!(in_grouping(g_v, 97, 252)))
-						{
-							goto lab0_brk;
-						}
-						// next, line 40
-						if (cursor >= limit)
-						{
-							goto lab0_brk;
-						}
-						cursor++;
-					}
-					while (false);
+                        
+                        cursor = v_6;
+                        // (, line 40
+                        if (!(in_grouping(g_v, 97, 252)))
+                        {
+                            goto lab0_brk;
+                        }
+                        // next, line 40
+                        if (cursor >= limit)
+                        {
+                            goto lab0_brk;
+                        }
+                        cursor++;
+                    }
+                    while (false);
 
 lab9_brk: ;
-					
-				}
-				while (false);
+                    
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				// setmark pV, line 41
-				I_pV = cursor;
-			}
-			while (false);
+                
+                // setmark pV, line 41
+                I_pV = cursor;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 43
-			v_8 = cursor;
-			do 
-			{
-				// (, line 43
-				// gopast, line 44
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 252)))
-						{
-							goto lab15_brk;
-						}
-						goto golab14_brk;
-					}
-					while (false);
+            
+            cursor = v_1;
+            // do, line 43
+            v_8 = cursor;
+            do 
+            {
+                // (, line 43
+                // gopast, line 44
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 252)))
+                        {
+                            goto lab15_brk;
+                        }
+                        goto golab14_brk;
+                    }
+                    while (false);
 
 lab15_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab14_brk: ;
-				
-				// gopast, line 44
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 252)))
-						{
-							goto lab17_brk;
-						}
-						goto golab16_brk;
-					}
-					while (false);
+                
+                // gopast, line 44
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 252)))
+                        {
+                            goto lab17_brk;
+                        }
+                        goto golab16_brk;
+                    }
+                    while (false);
 
 lab17_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab16_brk: ;
-				
-				// setmark p1, line 44
-				I_p1 = cursor;
-				// gopast, line 45
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 252)))
-						{
-							goto lab19_brk;
-						}
-						goto golab18_brk;
-					}
-					while (false);
+                
+                // setmark p1, line 44
+                I_p1 = cursor;
+                // gopast, line 45
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 252)))
+                        {
+                            goto lab19_brk;
+                        }
+                        goto golab18_brk;
+                    }
+                    while (false);
 
 lab19_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab18_brk: ;
-				
-				// gopast, line 45
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 252)))
-						{
-							goto lab21_brk;
-						}
-						goto golab20_brk;
-					}
-					while (false);
+                
+                // gopast, line 45
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 252)))
+                        {
+                            goto lab21_brk;
+                        }
+                        goto golab20_brk;
+                    }
+                    while (false);
 
 lab21_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab20_brk: ;
-				
-				// setmark p2, line 45
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 45
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab13_brk: ;
-			
-			cursor = v_8;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 49
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 49
-					// [, line 50
-					bra = cursor;
-					// substring, line 50
-					among_var = find_among(a_0, 6);
-					if (among_var == 0)
-					{
-						goto lab5_brk;
-					}
-					// ], line 50
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab5_brk;
-						
-						case 1: 
-							// (, line 51
-							// <-, line 51
-							slice_from("a");
-							break;
-						
-						case 2: 
-							// (, line 52
-							// <-, line 52
-							slice_from("e");
-							break;
-						
-						case 3: 
-							// (, line 53
-							// <-, line 53
-							slice_from("i");
-							break;
-						
-						case 4: 
-							// (, line 54
-							// <-, line 54
-							slice_from("o");
-							break;
-						
-						case 5: 
-							// (, line 55
-							// <-, line 55
-							slice_from("u");
-							break;
-						
-						case 6: 
-							// (, line 57
-							// next, line 57
-							if (cursor >= limit)
-							{
-								goto lab5_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab0;
-				}
-				while (false);
+            
+            cursor = v_8;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 49
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 49
+                    // [, line 50
+                    bra = cursor;
+                    // substring, line 50
+                    among_var = find_among(a_0, 6);
+                    if (among_var == 0)
+                    {
+                        goto lab5_brk;
+                    }
+                    // ], line 50
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab5_brk;
+                        
+                        case 1: 
+                            // (, line 51
+                            // <-, line 51
+                            slice_from("a");
+                            break;
+                        
+                        case 2: 
+                            // (, line 52
+                            // <-, line 52
+                            slice_from("e");
+                            break;
+                        
+                        case 3: 
+                            // (, line 53
+                            // <-, line 53
+                            slice_from("i");
+                            break;
+                        
+                        case 4: 
+                            // (, line 54
+                            // <-, line 54
+                            slice_from("o");
+                            break;
+                        
+                        case 5: 
+                            // (, line 55
+                            // <-, line 55
+                            slice_from("u");
+                            break;
+                        
+                        case 6: 
+                            // (, line 57
+                            // next, line 57
+                            if (cursor >= limit)
+                            {
+                                goto lab5_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab0;
+                }
+                while (false);
 
 lab5_brk: ;
-				
-				cursor = v_1;
-				goto replab0_brk;
+                
+                cursor = v_1;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_RV()
-		{
-			if (!(I_pV <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_attached_pronoun()
-		{
-			int among_var;
-			// (, line 67
-			// [, line 68
-			ket = cursor;
-			// substring, line 68
-			if (find_among_b(a_1, 13) == 0)
-			{
-				return false;
-			}
-			// ], line 68
-			bra = cursor;
-			// substring, line 72
-			among_var = find_among_b(a_2, 11);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// call RV, line 72
-			if (!r_RV())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 73
-					// ], line 73
-					bra = cursor;
-					// <-, line 73
-					slice_from("iendo");
-					break;
-				
-				case 2: 
-					// (, line 74
-					// ], line 74
-					bra = cursor;
-					// <-, line 74
-					slice_from("ando");
-					break;
-				
-				case 3: 
-					// (, line 75
-					// ], line 75
-					bra = cursor;
-					// <-, line 75
-					slice_from("ar");
-					break;
-				
-				case 4: 
-					// (, line 76
-					// ], line 76
-					bra = cursor;
-					// <-, line 76
-					slice_from("er");
-					break;
-				
-				case 5: 
-					// (, line 77
-					// ], line 77
-					bra = cursor;
-					// <-, line 77
-					slice_from("ir");
-					break;
-				
-				case 6: 
-					// (, line 81
-					// delete, line 81
-					slice_del();
-					break;
-				
-				case 7: 
-					// (, line 82
-					// literal, line 82
-					if (!(eq_s_b(1, "u")))
-					{
-						return false;
-					}
-					// delete, line 82
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 86
-			// [, line 87
-			ket = cursor;
-			// substring, line 87
-			among_var = find_among_b(a_6, 42);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 87
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 98
-					// call R2, line 99
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 99
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 103
-					// call R2, line 104
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 104
-					slice_del();
-					// try, line 105
-					v_1 = limit - cursor;
-					do 
-					{
-						// (, line 105
-						// [, line 105
-						ket = cursor;
-						// literal, line 105
-						if (!(eq_s_b(2, "ic")))
-						{
-							cursor = limit - v_1;
-							goto lab0_brk;
-						}
-						// ], line 105
-						bra = cursor;
-						// call R2, line 105
-						if (!r_R2())
-						{
-							cursor = limit - v_1;
-							goto lab0_brk;
-						}
-						// delete, line 105
-						slice_del();
-					}
-					while (false);
+            
+            return true;
+        }
+        
+        private bool r_RV()
+        {
+            if (!(I_pV <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_attached_pronoun()
+        {
+            int among_var;
+            // (, line 67
+            // [, line 68
+            ket = cursor;
+            // substring, line 68
+            if (find_among_b(a_1, 13) == 0)
+            {
+                return false;
+            }
+            // ], line 68
+            bra = cursor;
+            // substring, line 72
+            among_var = find_among_b(a_2, 11);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // call RV, line 72
+            if (!r_RV())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 73
+                    // ], line 73
+                    bra = cursor;
+                    // <-, line 73
+                    slice_from("iendo");
+                    break;
+                
+                case 2: 
+                    // (, line 74
+                    // ], line 74
+                    bra = cursor;
+                    // <-, line 74
+                    slice_from("ando");
+                    break;
+                
+                case 3: 
+                    // (, line 75
+                    // ], line 75
+                    bra = cursor;
+                    // <-, line 75
+                    slice_from("ar");
+                    break;
+                
+                case 4: 
+                    // (, line 76
+                    // ], line 76
+                    bra = cursor;
+                    // <-, line 76
+                    slice_from("er");
+                    break;
+                
+                case 5: 
+                    // (, line 77
+                    // ], line 77
+                    bra = cursor;
+                    // <-, line 77
+                    slice_from("ir");
+                    break;
+                
+                case 6: 
+                    // (, line 81
+                    // delete, line 81
+                    slice_del();
+                    break;
+                
+                case 7: 
+                    // (, line 82
+                    // literal, line 82
+                    if (!(eq_s_b(1, "u")))
+                    {
+                        return false;
+                    }
+                    // delete, line 82
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 86
+            // [, line 87
+            ket = cursor;
+            // substring, line 87
+            among_var = find_among_b(a_6, 42);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 87
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 98
+                    // call R2, line 99
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 99
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 103
+                    // call R2, line 104
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 104
+                    slice_del();
+                    // try, line 105
+                    v_1 = limit - cursor;
+                    do 
+                    {
+                        // (, line 105
+                        // [, line 105
+                        ket = cursor;
+                        // literal, line 105
+                        if (!(eq_s_b(2, "ic")))
+                        {
+                            cursor = limit - v_1;
+                            goto lab0_brk;
+                        }
+                        // ], line 105
+                        bra = cursor;
+                        // call R2, line 105
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_1;
+                            goto lab0_brk;
+                        }
+                        // delete, line 105
+                        slice_del();
+                    }
+                    while (false);
 
 lab0_brk: ;
-					
-					break;
-				
-				case 3: 
-					// (, line 109
-					// call R2, line 110
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 110
-					slice_from("log");
-					break;
-				
-				case 4: 
-					// (, line 113
-					// call R2, line 114
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 114
-					slice_from("u");
-					break;
-				
-				case 5: 
-					// (, line 117
-					// call R2, line 118
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 118
-					slice_from("ente");
-					break;
-				
-				case 6: 
-					// (, line 121
-					// call R1, line 122
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 122
-					slice_del();
-					// try, line 123
-					v_2 = limit - cursor;
-					do 
-					{
-						// (, line 123
-						// [, line 124
-						ket = cursor;
-						// substring, line 124
-						among_var = find_among_b(a_3, 4);
-						if (among_var == 0)
-						{
-							cursor = limit - v_2;
-							goto lab1_brk;
-						}
-						// ], line 124
-						bra = cursor;
-						// call R2, line 124
-						if (!r_R2())
-						{
-							cursor = limit - v_2;
-							goto lab1_brk;
-						}
-						// delete, line 124
-						slice_del();
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_2;
-								goto lab1_brk;
-							
-							case 1: 
-								// (, line 125
-								// [, line 125
-								ket = cursor;
-								// literal, line 125
-								if (!(eq_s_b(2, "at")))
-								{
-									cursor = limit - v_2;
-									goto lab1_brk;
-								}
-								// ], line 125
-								bra = cursor;
-								// call R2, line 125
-								if (!r_R2())
-								{
-									cursor = limit - v_2;
-									goto lab1_brk;
-								}
-								// delete, line 125
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                    
+                    break;
+                
+                case 3: 
+                    // (, line 109
+                    // call R2, line 110
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 110
+                    slice_from("log");
+                    break;
+                
+                case 4: 
+                    // (, line 113
+                    // call R2, line 114
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 114
+                    slice_from("u");
+                    break;
+                
+                case 5: 
+                    // (, line 117
+                    // call R2, line 118
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 118
+                    slice_from("ente");
+                    break;
+                
+                case 6: 
+                    // (, line 121
+                    // call R1, line 122
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 122
+                    slice_del();
+                    // try, line 123
+                    v_2 = limit - cursor;
+                    do 
+                    {
+                        // (, line 123
+                        // [, line 124
+                        ket = cursor;
+                        // substring, line 124
+                        among_var = find_among_b(a_3, 4);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_2;
+                            goto lab1_brk;
+                        }
+                        // ], line 124
+                        bra = cursor;
+                        // call R2, line 124
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_2;
+                            goto lab1_brk;
+                        }
+                        // delete, line 124
+                        slice_del();
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_2;
+                                goto lab1_brk;
+                            
+                            case 1: 
+                                // (, line 125
+                                // [, line 125
+                                ket = cursor;
+                                // literal, line 125
+                                if (!(eq_s_b(2, "at")))
+                                {
+                                    cursor = limit - v_2;
+                                    goto lab1_brk;
+                                }
+                                // ], line 125
+                                bra = cursor;
+                                // call R2, line 125
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_2;
+                                    goto lab1_brk;
+                                }
+                                // delete, line 125
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab1_brk: ;
-					
-					break;
-				
-				case 7: 
-					// (, line 133
-					// call R2, line 134
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 134
-					slice_del();
-					// try, line 135
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 135
-						// [, line 136
-						ket = cursor;
-						// substring, line 136
-						among_var = find_among_b(a_4, 2);
-						if (among_var == 0)
-						{
-							cursor = limit - v_3;
-							goto lab2_brk;
-						}
-						// ], line 136
-						bra = cursor;
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_3;
-								goto lab2_brk;
-							
-							case 1: 
-								// (, line 138
-								// call R2, line 138
-								if (!r_R2())
-								{
-									cursor = limit - v_3;
-									goto lab2_brk;
-								}
-								// delete, line 138
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                    
+                    break;
+                
+                case 7: 
+                    // (, line 133
+                    // call R2, line 134
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 134
+                    slice_del();
+                    // try, line 135
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 135
+                        // [, line 136
+                        ket = cursor;
+                        // substring, line 136
+                        among_var = find_among_b(a_4, 2);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_3;
+                            goto lab2_brk;
+                        }
+                        // ], line 136
+                        bra = cursor;
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_3;
+                                goto lab2_brk;
+                            
+                            case 1: 
+                                // (, line 138
+                                // call R2, line 138
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab2_brk;
+                                }
+                                // delete, line 138
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					break;
-				
-				case 8: 
-					// (, line 144
-					// call R2, line 145
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 145
-					slice_del();
-					// try, line 146
-					v_4 = limit - cursor;
-					do 
-					{
-						// (, line 146
-						// [, line 147
-						ket = cursor;
-						// substring, line 147
-						among_var = find_among_b(a_5, 3);
-						if (among_var == 0)
-						{
-							cursor = limit - v_4;
-							goto lab3_brk;
-						}
-						// ], line 147
-						bra = cursor;
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_4;
-								goto lab3_brk;
-							
-							case 1: 
-								// (, line 150
-								// call R2, line 150
-								if (!r_R2())
-								{
-									cursor = limit - v_4;
-									goto lab3_brk;
-								}
-								// delete, line 150
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                    
+                    break;
+                
+                case 8: 
+                    // (, line 144
+                    // call R2, line 145
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 145
+                    slice_del();
+                    // try, line 146
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // (, line 146
+                        // [, line 147
+                        ket = cursor;
+                        // substring, line 147
+                        among_var = find_among_b(a_5, 3);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_4;
+                            goto lab3_brk;
+                        }
+                        // ], line 147
+                        bra = cursor;
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_4;
+                                goto lab3_brk;
+                            
+                            case 1: 
+                                // (, line 150
+                                // call R2, line 150
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_4;
+                                    goto lab3_brk;
+                                }
+                                // delete, line 150
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					break;
-				
-				case 9: 
-					// (, line 156
-					// call R2, line 157
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 157
-					slice_del();
-					// try, line 158
-					v_5 = limit - cursor;
-					do 
-					{
-						// (, line 158
-						// [, line 159
-						ket = cursor;
-						// literal, line 159
-						if (!(eq_s_b(2, "at")))
-						{
-							cursor = limit - v_5;
-							goto lab4_brk;
-						}
-						// ], line 159
-						bra = cursor;
-						// call R2, line 159
-						if (!r_R2())
-						{
-							cursor = limit - v_5;
-							goto lab4_brk;
-						}
-						// delete, line 159
-						slice_del();
-					}
-					while (false);
+                    
+                    break;
+                
+                case 9: 
+                    // (, line 156
+                    // call R2, line 157
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 157
+                    slice_del();
+                    // try, line 158
+                    v_5 = limit - cursor;
+                    do 
+                    {
+                        // (, line 158
+                        // [, line 159
+                        ket = cursor;
+                        // literal, line 159
+                        if (!(eq_s_b(2, "at")))
+                        {
+                            cursor = limit - v_5;
+                            goto lab4_brk;
+                        }
+                        // ], line 159
+                        bra = cursor;
+                        // call R2, line 159
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_5;
+                            goto lab4_brk;
+                        }
+                        // delete, line 159
+                        slice_del();
+                    }
+                    while (false);
 
 lab4_brk: ;
-					
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_y_verb_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 165
-			// setlimit, line 166
-			v_1 = limit - cursor;
-			// tomark, line 166
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 166
-			// [, line 166
-			ket = cursor;
-			// substring, line 166
-			among_var = find_among_b(a_7, 12);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 166
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 169
-					// literal, line 169
-					if (!(eq_s_b(1, "u")))
-					{
-						return false;
-					}
-					// delete, line 169
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_verb_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 173
-			// setlimit, line 174
-			v_1 = limit - cursor;
-			// tomark, line 174
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 174
-			// [, line 174
-			ket = cursor;
-			// substring, line 174
-			among_var = find_among_b(a_8, 96);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 174
-			bra = cursor;
-			limit_backward = v_2;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 177
-					// try, line 177
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 177
-						// literal, line 177
-						if (!(eq_s_b(1, "u")))
-						{
-							cursor = limit - v_3;
-							goto lab5_brk;
-						}
-						// test, line 177
-						v_4 = limit - cursor;
-						// literal, line 177
-						if (!(eq_s_b(1, "g")))
-						{
-							cursor = limit - v_3;
-							goto lab5_brk;
-						}
-						cursor = limit - v_4;
-					}
-					while (false);
+                    
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_y_verb_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 165
+            // setlimit, line 166
+            v_1 = limit - cursor;
+            // tomark, line 166
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 166
+            // [, line 166
+            ket = cursor;
+            // substring, line 166
+            among_var = find_among_b(a_7, 12);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 166
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 169
+                    // literal, line 169
+                    if (!(eq_s_b(1, "u")))
+                    {
+                        return false;
+                    }
+                    // delete, line 169
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_verb_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 173
+            // setlimit, line 174
+            v_1 = limit - cursor;
+            // tomark, line 174
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 174
+            // [, line 174
+            ket = cursor;
+            // substring, line 174
+            among_var = find_among_b(a_8, 96);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 174
+            bra = cursor;
+            limit_backward = v_2;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 177
+                    // try, line 177
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 177
+                        // literal, line 177
+                        if (!(eq_s_b(1, "u")))
+                        {
+                            cursor = limit - v_3;
+                            goto lab5_brk;
+                        }
+                        // test, line 177
+                        v_4 = limit - cursor;
+                        // literal, line 177
+                        if (!(eq_s_b(1, "g")))
+                        {
+                            cursor = limit - v_3;
+                            goto lab5_brk;
+                        }
+                        cursor = limit - v_4;
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-					// ], line 177
-					bra = cursor;
-					// delete, line 177
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 198
-					// delete, line 198
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_residual_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 202
-			// [, line 203
-			ket = cursor;
-			// substring, line 203
-			among_var = find_among_b(a_9, 8);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 203
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 206
-					// call RV, line 206
-					if (!r_RV())
-					{
-						return false;
-					}
-					// delete, line 206
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 208
-					// call RV, line 208
-					if (!r_RV())
-					{
-						return false;
-					}
-					// delete, line 208
-					slice_del();
-					// try, line 208
-					v_1 = limit - cursor;
-					do 
-					{
-						// (, line 208
-						// [, line 208
-						ket = cursor;
-						// literal, line 208
-						if (!(eq_s_b(1, "u")))
-						{
-							cursor = limit - v_1;
-						goto lab5_brk;
-						}
-						// ], line 208
-						bra = cursor;
-						// test, line 208
-						v_2 = limit - cursor;
-						// literal, line 208
-						if (!(eq_s_b(1, "g")))
-						{
-							cursor = limit - v_1;
-							goto lab5_brk;
-						}
-						cursor = limit - v_2;
-						// call RV, line 208
-						if (!r_RV())
-						{
-							cursor = limit - v_1;
-							goto lab5_brk;
-						}
-						// delete, line 208
-						slice_del();
-					}
-					while (false);
+                    
+                    // ], line 177
+                    bra = cursor;
+                    // delete, line 177
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 198
+                    // delete, line 198
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_residual_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 202
+            // [, line 203
+            ket = cursor;
+            // substring, line 203
+            among_var = find_among_b(a_9, 8);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 203
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 206
+                    // call RV, line 206
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // delete, line 206
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 208
+                    // call RV, line 208
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // delete, line 208
+                    slice_del();
+                    // try, line 208
+                    v_1 = limit - cursor;
+                    do 
+                    {
+                        // (, line 208
+                        // [, line 208
+                        ket = cursor;
+                        // literal, line 208
+                        if (!(eq_s_b(1, "u")))
+                        {
+                            cursor = limit - v_1;
+                        goto lab5_brk;
+                        }
+                        // ], line 208
+                        bra = cursor;
+                        // test, line 208
+                        v_2 = limit - cursor;
+                        // literal, line 208
+                        if (!(eq_s_b(1, "g")))
+                        {
+                            cursor = limit - v_1;
+                            goto lab5_brk;
+                        }
+                        cursor = limit - v_2;
+                        // call RV, line 208
+                        if (!r_RV())
+                        {
+                            cursor = limit - v_1;
+                            goto lab5_brk;
+                        }
+                        // delete, line 208
+                        slice_del();
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-					break;
-				}
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			// (, line 213
-			// do, line 214
-			v_1 = cursor;
-			do 
-			{
-				// call mark_regions, line 214
-				if (!r_mark_regions())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+                    
+                    break;
+                }
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            // (, line 213
+            // do, line 214
+            v_1 = cursor;
+            do 
+            {
+                // call mark_regions, line 214
+                if (!r_mark_regions())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// backwards, line 215
-			limit_backward = cursor; cursor = limit;
-			// (, line 215
-			// do, line 216
-			v_2 = limit - cursor;
-			do 
-			{
-				// call attached_pronoun, line 216
-				if (!r_attached_pronoun())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // backwards, line 215
+            limit_backward = cursor; cursor = limit;
+            // (, line 215
+            // do, line 216
+            v_2 = limit - cursor;
+            do 
+            {
+                // call attached_pronoun, line 216
+                if (!r_attached_pronoun())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = limit - v_2;
-			// do, line 217
-			v_3 = limit - cursor;
-			do 
-			{
-				// (, line 217
-				// or, line 217
-				do 
-				{
-					v_4 = limit - cursor;
-					do 
-					{
-						// call standard_suffix, line 217
-						if (!r_standard_suffix())
-						{
-							goto lab4_brk;
-						}
-						goto lab3_brk;
-					}
-					while (false);
+            
+            cursor = limit - v_2;
+            // do, line 217
+            v_3 = limit - cursor;
+            do 
+            {
+                // (, line 217
+                // or, line 217
+                do 
+                {
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // call standard_suffix, line 217
+                        if (!r_standard_suffix())
+                        {
+                            goto lab4_brk;
+                        }
+                        goto lab3_brk;
+                    }
+                    while (false);
 
 lab4_brk: ;
-					
-					cursor = limit - v_4;
-					do 
-					{
-						// call y_verb_suffix, line 218
-						if (!r_y_verb_suffix())
-						{
-							goto lab5_brk;
-						}
-						goto lab3_brk;
-					}
-					while (false);
+                    
+                    cursor = limit - v_4;
+                    do 
+                    {
+                        // call y_verb_suffix, line 218
+                        if (!r_y_verb_suffix())
+                        {
+                            goto lab5_brk;
+                        }
+                        goto lab3_brk;
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-					cursor = limit - v_4;
-					// call verb_suffix, line 219
-					if (!r_verb_suffix())
-					{
-						goto lab2_brk;
-					}
-				}
-				while (false);
+                    
+                    cursor = limit - v_4;
+                    // call verb_suffix, line 219
+                    if (!r_verb_suffix())
+                    {
+                        goto lab2_brk;
+                    }
+                }
+                while (false);
 
 lab3_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab2_brk: ;
 
-			cursor = limit - v_3;
-			// do, line 221
-			v_5 = limit - cursor;
-			do 
-			{
-				// call residual_suffix, line 221
-				if (!r_residual_suffix())
-				{
-					goto lab6_brk;
-				}
-			}
-			while (false);
+            cursor = limit - v_3;
+            // do, line 221
+            v_5 = limit - cursor;
+            do 
+            {
+                // call residual_suffix, line 221
+                if (!r_residual_suffix())
+                {
+                    goto lab6_brk;
+                }
+            }
+            while (false);
 
 lab6_brk: ;
-			
-			cursor = limit - v_5;
-			cursor = limit_backward; // do, line 223
-			v_6 = cursor;
-			do 
-			{
-				// call postlude, line 223
-				if (!r_postlude())
-				{
-					goto lab7_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_5;
+            cursor = limit_backward; // do, line 223
+            v_6 = cursor;
+            do 
+            {
+                // call postlude, line 223
+                if (!r_postlude())
+                {
+                    goto lab7_brk;
+                }
+            }
+            while (false);
 
 lab7_brk: ;
-			
-			cursor = v_6;
-			return true;
-		}
-	}
+            
+            cursor = v_6;
+            return true;
+        }
+    }
 }


[45/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/EnglishStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/EnglishStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/EnglishStemmer.cs
index 9d54d08..43dded7 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/EnglishStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/EnglishStemmer.cs
@@ -27,1455 +27,1455 @@ namespace SF.Snowball.Ext
     public class EnglishStemmer : SnowballProgram
     {
 
-		public EnglishStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("gener", - 1, - 1, "", this)};
-			a_1 = new Among[]{new Among("ied", - 1, 2, "", this), new Among("s", - 1, 3, "", this), new Among("ies", 1, 2, "", this), new Among("sses", 1, 1, "", this), new Among("ss", 1, - 1, "", this), new Among("us", 1, - 1, "", this)};
-			a_2 = new Among[]{new Among("", - 1, 3, "", this), new Among("bb", 0, 2, "", this), new Among("dd", 0, 2, "", this), new Among("ff", 0, 2, "", this), new Among("gg", 0, 2, "", this), new Among("bl", 0, 1, "", this), new Among("mm", 0, 2, "", this), new Among("nn", 0, 2, "", this), new Among("pp", 0, 2, "", this), new Among("rr", 0, 2, "", this), new Among("at", 0, 1, "", this), new Among("tt", 0, 2, "", this), new Among("iz", 0, 1, "", this)};
-			a_3 = new Among[]{new Among("ed", - 1, 2, "", this), new Among("eed", 0, 1, "", this), new Among("ing", - 1, 2, "", this), new Among("edly", - 1, 2, "", this), new Among("eedly", 3, 1, "", this), new Among("ingly", - 1, 2, "", this)};
-			a_4 = new Among[]{new Among("anci", - 1, 3, "", this), new Among("enci", - 1, 2, "", this), new Among("ogi", - 1, 13, "", this), new Among("li", - 1, 16, "", this), new Among("bli", 3, 12, "", this), new Among("abli", 4, 4, "", this), new Among("alli", 3, 8, "", this), new Among("fulli", 3, 14, "", this), new Among("lessli", 3, 15, "", this), new Among("ousli", 3, 10, "", this), new Among("entli", 3, 5, "", this), new Among("aliti", - 1, 8, "", this), new Among("biliti", - 1, 12, "", this), new Among("iviti", - 1, 11, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 14, 7, "", this), new Among("alism", - 1, 8, "", this), new Among("ation", - 1, 7, "", this), new Among("ization", 17, 6, "", this), new Among("izer", - 1, 6, "", this), new Among("ator", - 1, 7, "", this), new Among("iveness", - 1, 11, "", this), new Among("fulness", - 1, 9, "", this), new Among("ousness", - 1, 10, "", this)};
-			a_5 = new Among[]{new Among("icate", - 1, 4, "", this), new Among("ative", - 1, 6, "", this), new Among("alize", - 1, 3, "", this), new Among("iciti", - 1, 4, "", this), new Among("ical", - 1, 4, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 5, 2, "", this), new Among("ful", - 1, 5, "", this), new Among("ness", - 1, 5, "", this)};
-			a_6 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 1, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("ive", - 1, 1, "", this), new Among("ize", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("al", - 1, 1, "", this), new Among("ism", - 1, 1, "", this), new Among("ion", - 1, 2, "", this), new Among("er", - 1, 1, "", this), new Among("ous", - 1, 1, "", this), new Among("ant", - 1, 1, "", this), new Among("ent", - 1, 1, "", this), new Among("ment", 15, 1, "", this), new Among("ement", 16, 1, "", this)};
-			a_7 = new Among[]{new Among("e", - 1, 1, "", this), new Among("l", - 1, 2, "", this)};
-			a_8 = new Among[]{new Among("succeed", - 1, - 1, "", this), new Among("proceed", - 1, - 1, "", this), new Among("exceed", - 1, - 1, "", this), new Among("canning", - 1, - 1, "", this), new Among("inning", - 1, - 1, "", this), new Among("earring", - 1, - 1, "", this), new Among("herring", - 1, - 1, "", this), new Among("outing", - 1, - 1, "", this)};
-			a_9 = new Among[]{new Among("andes", - 1, - 1, "", this), new Among("atlas", - 1, - 1, "", this), new Among("bias", - 1, - 1, "", this), new Among("cosmos", - 1, - 1, "", this), new Among("dying", - 1, 3, "", this), new Among("early", - 1, 9, "", this), new Among("gently", - 1, 7, "", this), new Among("howe", - 1, - 1, "", this), new Among("idly", - 1, 6, "", this), new Among("lying", - 1, 4, "", this), new Among("news", - 1, - 1, "", this), new Among("only", - 1, 10, "", this), new Among("singly", - 1, 11, "", this), new Among("skies", - 1, 2, "", this), new Among("skis", - 1, 1, "", this), new Among("sky", - 1, - 1, "", this), new Among("tying", - 1, 5, "", this), new Among("ugly", - 1, 8, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private Among[] a_8;
-		private Among[] a_9;
+        public EnglishStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("gener", - 1, - 1, "", this)};
+            a_1 = new Among[]{new Among("ied", - 1, 2, "", this), new Among("s", - 1, 3, "", this), new Among("ies", 1, 2, "", this), new Among("sses", 1, 1, "", this), new Among("ss", 1, - 1, "", this), new Among("us", 1, - 1, "", this)};
+            a_2 = new Among[]{new Among("", - 1, 3, "", this), new Among("bb", 0, 2, "", this), new Among("dd", 0, 2, "", this), new Among("ff", 0, 2, "", this), new Among("gg", 0, 2, "", this), new Among("bl", 0, 1, "", this), new Among("mm", 0, 2, "", this), new Among("nn", 0, 2, "", this), new Among("pp", 0, 2, "", this), new Among("rr", 0, 2, "", this), new Among("at", 0, 1, "", this), new Among("tt", 0, 2, "", this), new Among("iz", 0, 1, "", this)};
+            a_3 = new Among[]{new Among("ed", - 1, 2, "", this), new Among("eed", 0, 1, "", this), new Among("ing", - 1, 2, "", this), new Among("edly", - 1, 2, "", this), new Among("eedly", 3, 1, "", this), new Among("ingly", - 1, 2, "", this)};
+            a_4 = new Among[]{new Among("anci", - 1, 3, "", this), new Among("enci", - 1, 2, "", this), new Among("ogi", - 1, 13, "", this), new Among("li", - 1, 16, "", this), new Among("bli", 3, 12, "", this), new Among("abli", 4, 4, "", this), new Among("alli", 3, 8, "", this), new Among("fulli", 3, 14, "", this), new Among("lessli", 3, 15, "", this), new Among("ousli", 3, 10, "", this), new Among("entli", 3, 5, "", this), new Among("aliti", - 1, 8, "", this), new Among("biliti", - 1, 12, "", this), new Among("iviti", - 1, 11, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 14, 7, "", this), new Among("alism", - 1, 8, "", this), new Among("ation", - 1, 7, "", this), new Among("ization", 17, 6, "", this), new Among("izer", - 1, 6, "", this), new Among("ator", - 1, 7, "", this), new Among("iveness", - 1, 11, "", this), new Among("fulness", - 1, 9, "", this), new Among("ousness", - 1, 10, "", this)};
+            a_5 = new Among[]{new Among("icate", - 1, 4, "", this), new Among("ative", - 1, 6, "", this), new Among("alize", - 1, 3, "", this), new Among("iciti", - 1, 4, "", this), new Among("ical", - 1, 4, "", this), new Among("tional", - 1, 1, "", this), new Among("ational", 5, 2, "", this), new Among("ful", - 1, 5, "", this), new Among("ness", - 1, 5, "", this)};
+            a_6 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("ance", - 1, 1, "", this), new Among("ence", - 1, 1, "", this), new Among("able", - 1, 1, "", this), new Among("ible", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("ive", - 1, 1, "", this), new Among("ize", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("al", - 1, 1, "", this), new Among("ism", - 1, 1, "", this), new Among("ion", - 1, 2, "", this), new Among("er", - 1, 1, "", this), new Among("ous", - 1, 1, "", this), new Among("ant", - 1, 1, "", this), new Among("ent", - 1, 1, "", this), new Among("ment", 15, 1, "", this), new Among("ement", 16, 1, "", this)};
+            a_7 = new Among[]{new Among("e", - 1, 1, "", this), new Among("l", - 1, 2, "", this)};
+            a_8 = new Among[]{new Among("succeed", - 1, - 1, "", this), new Among("proceed", - 1, - 1, "", this), new Among("exceed", - 1, - 1, "", this), new Among("canning", - 1, - 1, "", this), new Among("inning", - 1, - 1, "", this), new Among("earring", - 1, - 1, "", this), new Among("herring", - 1, - 1, "", this), new Among("outing", - 1, - 1, "", this)};
+            a_9 = new Among[]{new Among("andes", - 1, - 1, "", this), new Among("atlas", - 1, - 1, "", this), new Among("bias", - 1, - 1, "", this), new Among("cosmos", - 1, - 1, "", this), new Among("dying", - 1, 3, "", this), new Among("early", - 1, 9, "", this), new Among("gently", - 1, 7, "", this), new Among("howe", - 1, - 1, "", this), new Among("idly", - 1, 6, "", this), new Among("lying", - 1, 4, "", this), new Among("news", - 1, - 1, "", this), new Among("only", - 1, 10, "", this), new Among("singly", - 1, 11, "", this), new Among("skies", - 1, 2, "", this), new Among("skis", - 1, 1, "", this), new Among("sky", - 1, - 1, "", this), new Among("tying", - 1, 5, "", this), new Among("ugly", - 1, 8, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private Among[] a_8;
+        private Among[] a_9;
 
         private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1)};
-		private static readonly char[] g_v_WXY = new char[]{(char) (1), (char) (17), (char) (65), (char) (208), (char) (1)};
-		private static readonly char[] g_valid_LI = new char[]{(char) (55), (char) (141), (char) (2)};
-		
-		private bool B_Y_found;
-		private int I_p2;
-		private int I_p1;
-		
-		protected internal virtual void  copy_from(EnglishStemmer other)
-		{
-			B_Y_found = other.B_Y_found;
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 23
-			// unset Y_found, line 24
-			B_Y_found = false;
-			// do, line 25
-			v_1 = cursor;
-			do 
-			{
-				// (, line 25
-				// [, line 25
-				bra = cursor;
-				// literal, line 25
-				if (!(eq_s(1, "y")))
-				{
-					goto lab0_brk;
-				}
-				// ], line 25
-				ket = cursor;
-				if (!(in_grouping(g_v, 97, 121)))
-				{
-					goto lab0_brk;
-				}
-				// <-, line 25
-				slice_from("Y");
-				// set Y_found, line 25
-				B_Y_found = true;
-			}
-			while (false);
+        private static readonly char[] g_v_WXY = new char[]{(char) (1), (char) (17), (char) (65), (char) (208), (char) (1)};
+        private static readonly char[] g_valid_LI = new char[]{(char) (55), (char) (141), (char) (2)};
+        
+        private bool B_Y_found;
+        private int I_p2;
+        private int I_p1;
+        
+        protected internal virtual void  copy_from(EnglishStemmer other)
+        {
+            B_Y_found = other.B_Y_found;
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 23
+            // unset Y_found, line 24
+            B_Y_found = false;
+            // do, line 25
+            v_1 = cursor;
+            do 
+            {
+                // (, line 25
+                // [, line 25
+                bra = cursor;
+                // literal, line 25
+                if (!(eq_s(1, "y")))
+                {
+                    goto lab0_brk;
+                }
+                // ], line 25
+                ket = cursor;
+                if (!(in_grouping(g_v, 97, 121)))
+                {
+                    goto lab0_brk;
+                }
+                // <-, line 25
+                slice_from("Y");
+                // set Y_found, line 25
+                B_Y_found = true;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 26
-			v_2 = cursor;
-			do 
-			{
-				// repeat, line 26
-				while (true)
-				{
-					v_3 = cursor;
-					do 
-					{
-						// (, line 26
-						// goto, line 26
-						while (true)
-						{
-							v_4 = cursor;
-							do 
-							{
-								// (, line 26
-								if (!(in_grouping(g_v, 97, 121)))
-								{
-									goto lab5_brk;
-								}
-								// [, line 26
-								bra = cursor;
-								// literal, line 26
-								if (!(eq_s(1, "y")))
-								{
-									goto lab5_brk;
-								}
-								// ], line 26
-								ket = cursor;
-								cursor = v_4;
-								goto golab4_brk;
-							}
-							while (false);
+            
+            cursor = v_1;
+            // do, line 26
+            v_2 = cursor;
+            do 
+            {
+                // repeat, line 26
+                while (true)
+                {
+                    v_3 = cursor;
+                    do 
+                    {
+                        // (, line 26
+                        // goto, line 26
+                        while (true)
+                        {
+                            v_4 = cursor;
+                            do 
+                            {
+                                // (, line 26
+                                if (!(in_grouping(g_v, 97, 121)))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // [, line 26
+                                bra = cursor;
+                                // literal, line 26
+                                if (!(eq_s(1, "y")))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // ], line 26
+                                ket = cursor;
+                                cursor = v_4;
+                                goto golab4_brk;
+                            }
+                            while (false);
 
 lab5_brk: ;
-							
-							cursor = v_4;
-							if (cursor >= limit)
-							{
-								goto lab3_brk;
-							}
-							cursor++;
-						}
+                            
+                            cursor = v_4;
+                            if (cursor >= limit)
+                            {
+                                goto lab3_brk;
+                            }
+                            cursor++;
+                        }
 
 golab4_brk: ;
 
-						// <-, line 26
-						slice_from("Y");
-						// set Y_found, line 26
-						B_Y_found = true;
-						goto replab2;
-					}
-					while (false);
+                        // <-, line 26
+                        slice_from("Y");
+                        // set Y_found, line 26
+                        B_Y_found = true;
+                        goto replab2;
+                    }
+                    while (false);
 
 lab3_brk: ;
 
-					cursor = v_3;
-					goto replab2_brk;
+                    cursor = v_3;
+                    goto replab2_brk;
 
 replab2: ;
-				}
+                }
 
 replab2_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab1_brk: ;
 
-			cursor = v_2;
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			int v_2;
-			// (, line 29
-			I_p1 = limit;
-			I_p2 = limit;
-			// do, line 32
-			v_1 = cursor;
-			do 
-			{
-				// (, line 32
-				// or, line 36
-				do 
-				{
-					v_2 = cursor;
-					do 
-					{
-						// among, line 33
-						if (find_among(a_0, 1) == 0)
-						{
-							goto lab2_brk;
-						}
-						goto lab1_brk;
-					}
-					while (false);
+            cursor = v_2;
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            int v_2;
+            // (, line 29
+            I_p1 = limit;
+            I_p2 = limit;
+            // do, line 32
+            v_1 = cursor;
+            do 
+            {
+                // (, line 32
+                // or, line 36
+                do 
+                {
+                    v_2 = cursor;
+                    do 
+                    {
+                        // among, line 33
+                        if (find_among(a_0, 1) == 0)
+                        {
+                            goto lab2_brk;
+                        }
+                        goto lab1_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = v_2;
-					// (, line 36
-					// gopast, line 36
-					while (true)
-					{
-						do 
-						{
-							if (!(in_grouping(g_v, 97, 121)))
-							{
-								goto lab4_brk;
-							}
-							goto golab3_brk;
-						}
-						while (false);
+                    
+                    cursor = v_2;
+                    // (, line 36
+                    // gopast, line 36
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(in_grouping(g_v, 97, 121)))
+                            {
+                                goto lab4_brk;
+                            }
+                            goto golab3_brk;
+                        }
+                        while (false);
 
 lab4_brk: ;
 
-						if (cursor >= limit)
-						{
-							goto lab0_brk;
-						}
-						cursor++;
-					}
+                        if (cursor >= limit)
+                        {
+                            goto lab0_brk;
+                        }
+                        cursor++;
+                    }
 
 golab3_brk: ;
-					
-					// gopast, line 36
-					while (true)
-					{
-						do 
-						{
-							if (!(out_grouping(g_v, 97, 121)))
-							{
-								goto lab6_brk;
-							}
-							goto golab5_brk;
-						}
-						while (false);
+                    
+                    // gopast, line 36
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(out_grouping(g_v, 97, 121)))
+                            {
+                                goto lab6_brk;
+                            }
+                            goto golab5_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						if (cursor >= limit)
-						{
-							goto lab0_brk;
-						}
-						cursor++;
-					}
+                        
+                        if (cursor >= limit)
+                        {
+                            goto lab0_brk;
+                        }
+                        cursor++;
+                    }
 
 golab5_brk: ;
-					
-				}
-				while (false);
+                    
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				// setmark p1, line 37
-				I_p1 = cursor;
-				// gopast, line 38
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 121)))
-						{
-							goto lab8_brk;
-						}
-						goto golab7_brk;
-					}
-					while (false);
+                
+                // setmark p1, line 37
+                I_p1 = cursor;
+                // gopast, line 38
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 121)))
+                        {
+                            goto lab8_brk;
+                        }
+                        goto golab7_brk;
+                    }
+                    while (false);
 
 lab8_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab7_brk: ;
-				
-				// gopast, line 38
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 121)))
-						{
-							goto lab10_brk;
-						}
-						goto golab9_brk;
-					}
-					while (false);
+                
+                // gopast, line 38
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 121)))
+                        {
+                            goto lab10_brk;
+                        }
+                        goto golab9_brk;
+                    }
+                    while (false);
 
 lab10_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab0_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab0_brk;
+                    }
+                    cursor++;
+                }
 
 golab9_brk: ;
-				
-				// setmark p2, line 38
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 38
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			return true;
-		}
-		
-		private bool r_shortv()
-		{
-			int v_1;
-			// (, line 44
-			// or, line 46
-			do 
-			{
-				v_1 = limit - cursor;
-				do 
-				{
-					// (, line 45
-					if (!(out_grouping_b(g_v_WXY, 89, 121)))
-					{
-						goto lab1_brk;
-					}
-					if (!(in_grouping_b(g_v, 97, 121)))
-					{
-						goto lab1_brk;
-					}
-					if (!(out_grouping_b(g_v, 97, 121)))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+            
+            cursor = v_1;
+            return true;
+        }
+        
+        private bool r_shortv()
+        {
+            int v_1;
+            // (, line 44
+            // or, line 46
+            do 
+            {
+                v_1 = limit - cursor;
+                do 
+                {
+                    // (, line 45
+                    if (!(out_grouping_b(g_v_WXY, 89, 121)))
+                    {
+                        goto lab1_brk;
+                    }
+                    if (!(in_grouping_b(g_v, 97, 121)))
+                    {
+                        goto lab1_brk;
+                    }
+                    if (!(out_grouping_b(g_v, 97, 121)))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_1;
-				// (, line 47
-				if (!(out_grouping_b(g_v, 97, 121)))
-				{
-					return false;
-				}
-				if (!(in_grouping_b(g_v, 97, 121)))
-				{
-					return false;
-				}
-				// atlimit, line 47
-				if (cursor > limit_backward)
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_1;
+                // (, line 47
+                if (!(out_grouping_b(g_v, 97, 121)))
+                {
+                    return false;
+                }
+                if (!(in_grouping_b(g_v, 97, 121)))
+                {
+                    return false;
+                }
+                // atlimit, line 47
+                if (cursor > limit_backward)
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_Step_1a()
-		{
-			int among_var;
-			int v_1;
-			// (, line 53
-			// [, line 54
-			ket = cursor;
-			// substring, line 54
-			among_var = find_among_b(a_1, 6);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 54
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 55
-					// <-, line 55
-					slice_from("ss");
-					break;
-				
-				case 2: 
-					// (, line 57
-					// or, line 57
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// (, line 57
-							// next, line 57
-							if (cursor <= limit_backward)
-							{
-								goto lab1_brk;
-							}
-							cursor--;
-							// atlimit, line 57
-							if (cursor > limit_backward)
-							{
-								goto lab1_brk;
-							}
-							// <-, line 57
-							slice_from("ie");
-							goto lab0_brk;
-						}
-						while (false);
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_Step_1a()
+        {
+            int among_var;
+            int v_1;
+            // (, line 53
+            // [, line 54
+            ket = cursor;
+            // substring, line 54
+            among_var = find_among_b(a_1, 6);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 54
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 55
+                    // <-, line 55
+                    slice_from("ss");
+                    break;
+                
+                case 2: 
+                    // (, line 57
+                    // or, line 57
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // (, line 57
+                            // next, line 57
+                            if (cursor <= limit_backward)
+                            {
+                                goto lab1_brk;
+                            }
+                            cursor--;
+                            // atlimit, line 57
+                            if (cursor > limit_backward)
+                            {
+                                goto lab1_brk;
+                            }
+                            // <-, line 57
+                            slice_from("ie");
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
 
-						cursor = limit - v_1;
-						// <-, line 57
-						slice_from("i");
-					}
-					while (false);
+                        cursor = limit - v_1;
+                        // <-, line 57
+                        slice_from("i");
+                    }
+                    while (false);
 
 lab0_brk: ;
 
-					break;
-				
-				case 3: 
-					// (, line 58
-					// next, line 58
-					if (cursor <= limit_backward)
-					{
-						return false;
-					}
-					cursor--;
-					// gopast, line 58
-					while (true)
-					{
-						do 
-						{
-							if (!(in_grouping_b(g_v, 97, 121)))
-							{
-								goto lab3_brk;
-							}
-							goto golab2_brk;
-						}
-						while (false);
+                    break;
+                
+                case 3: 
+                    // (, line 58
+                    // next, line 58
+                    if (cursor <= limit_backward)
+                    {
+                        return false;
+                    }
+                    cursor--;
+                    // gopast, line 58
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(in_grouping_b(g_v, 97, 121)))
+                            {
+                                goto lab3_brk;
+                            }
+                            goto golab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
 
-						if (cursor <= limit_backward)
-						{
-							return false;
-						}
-						cursor--;
-					}
+                        if (cursor <= limit_backward)
+                        {
+                            return false;
+                        }
+                        cursor--;
+                    }
 
 golab2_brk: ;
 
-					// delete, line 58
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_1b()
-		{
-			int among_var;
-			int v_1;
-			int v_3;
-			int v_4;
-			// (, line 63
-			// [, line 64
-			ket = cursor;
-			// substring, line 64
-			among_var = find_among_b(a_3, 6);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 64
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 66
-					// call R1, line 66
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 66
-					slice_from("ee");
-					break;
-				
-				case 2: 
-					// (, line 68
-					// test, line 69
-					v_1 = limit - cursor;
-					// gopast, line 69
-					while (true)
-					{
-						do 
-						{
-							if (!(in_grouping_b(g_v, 97, 121)))
-							{
-								goto lab1_brk;
-							}
-							goto golab0_brk;
-						}
-						while (false);
+                    // delete, line 58
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_1b()
+        {
+            int among_var;
+            int v_1;
+            int v_3;
+            int v_4;
+            // (, line 63
+            // [, line 64
+            ket = cursor;
+            // substring, line 64
+            among_var = find_among_b(a_3, 6);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 64
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 66
+                    // call R1, line 66
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 66
+                    slice_from("ee");
+                    break;
+                
+                case 2: 
+                    // (, line 68
+                    // test, line 69
+                    v_1 = limit - cursor;
+                    // gopast, line 69
+                    while (true)
+                    {
+                        do 
+                        {
+                            if (!(in_grouping_b(g_v, 97, 121)))
+                            {
+                                goto lab1_brk;
+                            }
+                            goto golab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
 
-						if (cursor <= limit_backward)
-						{
-							return false;
-						}
-						cursor--;
-					}
+                        if (cursor <= limit_backward)
+                        {
+                            return false;
+                        }
+                        cursor--;
+                    }
 
 golab0_brk: ;
-					
-					cursor = limit - v_1;
-					// delete, line 69
-					slice_del();
-					// test, line 70
-					v_3 = limit - cursor;
-					// substring, line 70
-					among_var = find_among_b(a_2, 13);
-					if (among_var == 0)
-					{
-						return false;
-					}
-					cursor = limit - v_3;
-					switch (among_var)
-					{
-						
-						case 0: 
-							return false;
-						
-						case 1: 
-							// (, line 72
-							// <+, line 72
-							{
-								int c = cursor;
-								insert(cursor, cursor, "e");
-								cursor = c;
-							}
-							break;
-						
-						case 2: 
-							// (, line 75
-							// [, line 75
-							ket = cursor;
-							// next, line 75
-							if (cursor <= limit_backward)
-							{
-								return false;
-							}
-							cursor--;
-							// ], line 75
-							bra = cursor;
-							// delete, line 75
-							slice_del();
-							break;
-						
-						case 3: 
-							// (, line 76
-							// atmark, line 76
-							if (cursor != I_p1)
-							{
-								return false;
-							}
-							// test, line 76
-							v_4 = limit - cursor;
-							// call shortv, line 76
-							if (!r_shortv())
-							{
-								return false;
-							}
-							cursor = limit - v_4;
-							// <+, line 76
-							{
-								int c = cursor;
-								insert(cursor, cursor, "e");
-								cursor = c;
-							}
-							break;
-						}
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_1c()
-		{
-			int v_1;
-			int v_2;
-			// (, line 82
-			// [, line 83
-			ket = cursor;
-			// or, line 83
-			do 
-			{
-				v_1 = limit - cursor;
-				do 
-				{
-					// literal, line 83
-					if (!(eq_s_b(1, "y")))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+                    
+                    cursor = limit - v_1;
+                    // delete, line 69
+                    slice_del();
+                    // test, line 70
+                    v_3 = limit - cursor;
+                    // substring, line 70
+                    among_var = find_among_b(a_2, 13);
+                    if (among_var == 0)
+                    {
+                        return false;
+                    }
+                    cursor = limit - v_3;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            return false;
+                        
+                        case 1: 
+                            // (, line 72
+                            // <+, line 72
+                            {
+                                int c = cursor;
+                                insert(cursor, cursor, "e");
+                                cursor = c;
+                            }
+                            break;
+                        
+                        case 2: 
+                            // (, line 75
+                            // [, line 75
+                            ket = cursor;
+                            // next, line 75
+                            if (cursor <= limit_backward)
+                            {
+                                return false;
+                            }
+                            cursor--;
+                            // ], line 75
+                            bra = cursor;
+                            // delete, line 75
+                            slice_del();
+                            break;
+                        
+                        case 3: 
+                            // (, line 76
+                            // atmark, line 76
+                            if (cursor != I_p1)
+                            {
+                                return false;
+                            }
+                            // test, line 76
+                            v_4 = limit - cursor;
+                            // call shortv, line 76
+                            if (!r_shortv())
+                            {
+                                return false;
+                            }
+                            cursor = limit - v_4;
+                            // <+, line 76
+                            {
+                                int c = cursor;
+                                insert(cursor, cursor, "e");
+                                cursor = c;
+                            }
+                            break;
+                        }
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_1c()
+        {
+            int v_1;
+            int v_2;
+            // (, line 82
+            // [, line 83
+            ket = cursor;
+            // or, line 83
+            do 
+            {
+                v_1 = limit - cursor;
+                do 
+                {
+                    // literal, line 83
+                    if (!(eq_s_b(1, "y")))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_1;
-				// literal, line 83
-				if (!(eq_s_b(1, "Y")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_1;
+                // literal, line 83
+                if (!(eq_s_b(1, "Y")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
 
-			// ], line 83
-			bra = cursor;
-			if (!(out_grouping_b(g_v, 97, 121)))
-			{
-				return false;
-			}
-			// not, line 84
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// atlimit, line 84
-					if (cursor > limit_backward)
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+            // ], line 83
+            bra = cursor;
+            if (!(out_grouping_b(g_v, 97, 121)))
+            {
+                return false;
+            }
+            // not, line 84
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // atlimit, line 84
+                    if (cursor > limit_backward)
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
 
-				cursor = limit - v_2;
-			}
-			// <-, line 85
-			slice_from("i");
-			return true;
-		}
-		
-		private bool r_Step_2()
-		{
-			int among_var;
-			// (, line 88
-			// [, line 89
-			ket = cursor;
-			// substring, line 89
-			among_var = find_among_b(a_4, 24);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 89
-			bra = cursor;
-			// call R1, line 89
-			if (!r_R1())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 90
-					// <-, line 90
-					slice_from("tion");
-					break;
-				
-				case 2: 
-					// (, line 91
-					// <-, line 91
-					slice_from("ence");
-					break;
-				
-				case 3: 
-					// (, line 92
-					// <-, line 92
-					slice_from("ance");
-					break;
-				
-				case 4: 
-					// (, line 93
-					// <-, line 93
-					slice_from("able");
-					break;
-				
-				case 5: 
-					// (, line 94
-					// <-, line 94
-					slice_from("ent");
-					break;
-				
-				case 6: 
-					// (, line 96
-					// <-, line 96
-					slice_from("ize");
-					break;
-				
-				case 7: 
-					// (, line 98
-					// <-, line 98
-					slice_from("ate");
-					break;
-				
-				case 8: 
-					// (, line 100
-					// <-, line 100
-					slice_from("al");
-					break;
-				
-				case 9: 
-					// (, line 101
-					// <-, line 101
-					slice_from("ful");
-					break;
-				
-				case 10: 
-					// (, line 103
-					// <-, line 103
-					slice_from("ous");
-					break;
-				
-				case 11: 
-					// (, line 105
-					// <-, line 105
-					slice_from("ive");
-					break;
-				
-				case 12: 
-					// (, line 107
-					// <-, line 107
-					slice_from("ble");
-					break;
-				
-				case 13: 
-					// (, line 108
-					// literal, line 108
-					if (!(eq_s_b(1, "l")))
-					{
-						return false;
-					}
-					// <-, line 108
-					slice_from("og");
-					break;
-				
-				case 14: 
-					// (, line 109
-					// <-, line 109
-					slice_from("ful");
-					break;
-				
-				case 15: 
-					// (, line 110
-					// <-, line 110
-					slice_from("less");
-					break;
-				
-				case 16: 
-					// (, line 111
-					if (!(in_grouping_b(g_valid_LI, 99, 116)))
-					{
-						return false;
-					}
-					// delete, line 111
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_3()
-		{
-			int among_var;
-			// (, line 115
-			// [, line 116
-			ket = cursor;
-			// substring, line 116
-			among_var = find_among_b(a_5, 9);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 116
-			bra = cursor;
-			// call R1, line 116
-			if (!r_R1())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 117
-					// <-, line 117
-					slice_from("tion");
-					break;
-				
-				case 2: 
-					// (, line 118
-					// <-, line 118
-					slice_from("ate");
-					break;
-				
-				case 3: 
-					// (, line 119
-					// <-, line 119
-					slice_from("al");
-					break;
-				
-				case 4: 
-					// (, line 121
-					// <-, line 121
-					slice_from("ic");
-					break;
-				
-				case 5: 
-					// (, line 123
-					// delete, line 123
-					slice_del();
-					break;
-				
-				case 6: 
-					// (, line 125
-					// call R2, line 125
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 125
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_4()
-		{
-			int among_var;
-			int v_1;
-			// (, line 129
-			// [, line 130
-			ket = cursor;
-			// substring, line 130
-			among_var = find_among_b(a_6, 18);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 130
-			bra = cursor;
-			// call R2, line 130
-			if (!r_R2())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 133
-					// delete, line 133
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 134
-					// or, line 134
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// literal, line 134
-							if (!(eq_s_b(1, "s")))
-							{
-								goto lab1_brk;
-							}
-							goto lab0_brk;
-						}
-						while (false);
+                cursor = limit - v_2;
+            }
+            // <-, line 85
+            slice_from("i");
+            return true;
+        }
+        
+        private bool r_Step_2()
+        {
+            int among_var;
+            // (, line 88
+            // [, line 89
+            ket = cursor;
+            // substring, line 89
+            among_var = find_among_b(a_4, 24);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 89
+            bra = cursor;
+            // call R1, line 89
+            if (!r_R1())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 90
+                    // <-, line 90
+                    slice_from("tion");
+                    break;
+                
+                case 2: 
+                    // (, line 91
+                    // <-, line 91
+                    slice_from("ence");
+                    break;
+                
+                case 3: 
+                    // (, line 92
+                    // <-, line 92
+                    slice_from("ance");
+                    break;
+                
+                case 4: 
+                    // (, line 93
+                    // <-, line 93
+                    slice_from("able");
+                    break;
+                
+                case 5: 
+                    // (, line 94
+                    // <-, line 94
+                    slice_from("ent");
+                    break;
+                
+                case 6: 
+                    // (, line 96
+                    // <-, line 96
+                    slice_from("ize");
+                    break;
+                
+                case 7: 
+                    // (, line 98
+                    // <-, line 98
+                    slice_from("ate");
+                    break;
+                
+                case 8: 
+                    // (, line 100
+                    // <-, line 100
+                    slice_from("al");
+                    break;
+                
+                case 9: 
+                    // (, line 101
+                    // <-, line 101
+                    slice_from("ful");
+                    break;
+                
+                case 10: 
+                    // (, line 103
+                    // <-, line 103
+                    slice_from("ous");
+                    break;
+                
+                case 11: 
+                    // (, line 105
+                    // <-, line 105
+                    slice_from("ive");
+                    break;
+                
+                case 12: 
+                    // (, line 107
+                    // <-, line 107
+                    slice_from("ble");
+                    break;
+                
+                case 13: 
+                    // (, line 108
+                    // literal, line 108
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        return false;
+                    }
+                    // <-, line 108
+                    slice_from("og");
+                    break;
+                
+                case 14: 
+                    // (, line 109
+                    // <-, line 109
+                    slice_from("ful");
+                    break;
+                
+                case 15: 
+                    // (, line 110
+                    // <-, line 110
+                    slice_from("less");
+                    break;
+                
+                case 16: 
+                    // (, line 111
+                    if (!(in_grouping_b(g_valid_LI, 99, 116)))
+                    {
+                        return false;
+                    }
+                    // delete, line 111
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_3()
+        {
+            int among_var;
+            // (, line 115
+            // [, line 116
+            ket = cursor;
+            // substring, line 116
+            among_var = find_among_b(a_5, 9);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 116
+            bra = cursor;
+            // call R1, line 116
+            if (!r_R1())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 117
+                    // <-, line 117
+                    slice_from("tion");
+                    break;
+                
+                case 2: 
+                    // (, line 118
+                    // <-, line 118
+                    slice_from("ate");
+                    break;
+                
+                case 3: 
+                    // (, line 119
+                    // <-, line 119
+                    slice_from("al");
+                    break;
+                
+                case 4: 
+                    // (, line 121
+                    // <-, line 121
+                    slice_from("ic");
+                    break;
+                
+                case 5: 
+                    // (, line 123
+                    // delete, line 123
+                    slice_del();
+                    break;
+                
+                case 6: 
+                    // (, line 125
+                    // call R2, line 125
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 125
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_4()
+        {
+            int among_var;
+            int v_1;
+            // (, line 129
+            // [, line 130
+            ket = cursor;
+            // substring, line 130
+            among_var = find_among_b(a_6, 18);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 130
+            bra = cursor;
+            // call R2, line 130
+            if (!r_R2())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 133
+                    // delete, line 133
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 134
+                    // or, line 134
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // literal, line 134
+                            if (!(eq_s_b(1, "s")))
+                            {
+                                goto lab1_brk;
+                            }
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
 
-						cursor = limit - v_1;
-						// literal, line 134
-						if (!(eq_s_b(1, "t")))
-						{
-							return false;
-						}
-					}
-					while (false);
+                        cursor = limit - v_1;
+                        // literal, line 134
+                        if (!(eq_s_b(1, "t")))
+                        {
+                            return false;
+                        }
+                    }
+                    while (false);
 
 lab0_brk: ;
 
-					// delete, line 134
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_5()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 138
-			// [, line 139
-			ket = cursor;
-			// substring, line 139
-			among_var = find_among_b(a_7, 2);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 139
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 140
-					// or, line 140
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// call R2, line 140
-							if (!r_R2())
-							{
-								goto lab1_brk;
-							}
-							goto lab0_brk;
-						}
-						while (false);
+                    // delete, line 134
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_5()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 138
+            // [, line 139
+            ket = cursor;
+            // substring, line 139
+            among_var = find_among_b(a_7, 2);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 139
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 140
+                    // or, line 140
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // call R2, line 140
+                            if (!r_R2())
+                            {
+                                goto lab1_brk;
+                            }
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						cursor = limit - v_1;
-						// (, line 140
-						// call R1, line 140
-						if (!r_R1())
-						{
-							return false;
-						}
-						// not, line 140
-						{
-							v_2 = limit - cursor;
-							do 
-							{
-								// call shortv, line 140
-								if (!r_shortv())
-								{
-									goto lab2_brk;
-								}
-								return false;
-							}
-							while (false);
+                        
+                        cursor = limit - v_1;
+                        // (, line 140
+                        // call R1, line 140
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // not, line 140
+                        {
+                            v_2 = limit - cursor;
+                            do 
+                            {
+                                // call shortv, line 140
+                                if (!r_shortv())
+                                {
+                                    goto lab2_brk;
+                                }
+                                return false;
+                            }
+                            while (false);
 
 lab2_brk: ;
-							
-							cursor = limit - v_2;
-						}
-					}
-					while (false);
+                            
+                            cursor = limit - v_2;
+                        }
+                    }
+                    while (false);
 lab0_brk: ;
-					// delete, line 140
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 141
-					// call R2, line 141
-					if (!r_R2())
-					{
-						return false;
-					}
-					// literal, line 141
-					if (!(eq_s_b(1, "l")))
-					{
-						return false;
-					}
-					// delete, line 141
-					slice_del();
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_exception2()
-		{
-			// (, line 145
-			// [, line 147
-			ket = cursor;
-			// substring, line 147
-			if (find_among_b(a_8, 8) == 0)
-			{
-				return false;
-			}
-			// ], line 147
-			bra = cursor;
-			// atlimit, line 147
-			if (cursor > limit_backward)
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_exception1()
-		{
-			int among_var;
-			// (, line 157
-			// [, line 159
-			bra = cursor;
-			// substring, line 159
-			among_var = find_among(a_9, 18);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 159
-			ket = cursor;
-			// atlimit, line 159
-			if (cursor < limit)
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 163
-					// <-, line 163
-					slice_from("ski");
-					break;
-				
-				case 2: 
-					// (, line 164
-					// <-, line 164
-					slice_from("sky");
-					break;
-				
-				case 3: 
-					// (, line 165
-					// <-, line 165
-					slice_from("die");
-					break;
-				
-				case 4: 
-					// (, line 166
-					// <-, line 166
-					slice_from("lie");
-					break;
-				
-				case 5: 
-					// (, line 167
-					// <-, line 167
-					slice_from("tie");
-					break;
-				
-				case 6: 
-					// (, line 171
-					// <-, line 171
-					slice_from("idl");
-					break;
-				
-				case 7: 
-					// (, line 172
-					// <-, line 172
-					slice_from("gentl");
-					break;
-				
-				case 8: 
-					// (, line 173
-					// <-, line 173
-					slice_from("ugli");
-					break;
-				
-				case 9: 
-					// (, line 174
-					// <-, line 174
-					slice_from("earli");
-					break;
-				
-				case 10: 
-					// (, line 175
-					// <-, line 175
-					slice_from("onli");
-					break;
-				
-				case 11: 
-					// (, line 176
-					// <-, line 176
-					slice_from("singl");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int v_1;
-			int v_2;
-			// (, line 192
-			// Boolean test Y_found, line 192
-			if (!(B_Y_found))
-			{
-				return false;
-			}
-			// repeat, line 192
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 192
-					// goto, line 192
-					while (true)
-					{
-						v_2 = cursor;
-						do 
-						{
-							// (, line 192
-							// [, line 192
-							bra = cursor;
-							// literal, line 192
-							if (!(eq_s(1, "Y")))
-							{
-								goto lab3_brk;
-							}
-							// ], line 192
-							ket = cursor;
-							cursor = v_2;
-							goto golab2_brk;
-						}
-						while (false);
+                    // delete, line 140
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 141
+                    // call R2, line 141
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // literal, line 141
+                    if (!(eq_s_b(1, "l")))
+                    {
+                        return false;
+                    }
+                    // delete, line 141
+                    slice_del();
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_exception2()
+        {
+            // (, line 145
+            // [, line 147
+            ket = cursor;
+            // substring, line 147
+            if (find_among_b(a_8, 8) == 0)
+            {
+                return false;
+            }
+            // ], line 147
+            bra = cursor;
+            // atlimit, line 147
+            if (cursor > limit_backward)
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_exception1()
+        {
+            int among_var;
+            // (, line 157
+            // [, line 159
+            bra = cursor;
+            // substring, line 159
+            among_var = find_among(a_9, 18);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 159
+            ket = cursor;
+            // atlimit, line 159
+            if (cursor < limit)
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 163
+                    // <-, line 163
+                    slice_from("ski");
+                    break;
+                
+                case 2: 
+                    // (, line 164
+                    // <-, line 164
+                    slice_from("sky");
+                    break;
+                
+                case 3: 
+                    // (, line 165
+                    // <-, line 165
+                    slice_from("die");
+                    break;
+                
+                case 4: 
+                    // (, line 166
+                    // <-, line 166
+                    slice_from("lie");
+                    break;
+                
+                case 5: 
+                    // (, line 167
+                    // <-, line 167
+                    slice_from("tie");
+                    break;
+                
+                case 6: 
+                    // (, line 171
+                    // <-, line 171
+                    slice_from("idl");
+                    break;
+                
+                case 7: 
+                    // (, line 172
+                    // <-, line 172
+                    slice_from("gentl");
+                    break;
+                
+                case 8: 
+                    // (, line 173
+                    // <-, line 173
+                    slice_from("ugli");
+                    break;
+                
+                case 9: 
+                    // (, line 174
+                    // <-, line 174
+                    slice_from("earli");
+                    break;
+                
+                case 10: 
+                    // (, line 175
+                    // <-, line 175
+                    slice_from("onli");
+                    break;
+                
+                case 11: 
+                    // (, line 176
+                    // <-, line 176
+                    slice_from("singl");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int v_1;
+            int v_2;
+            // (, line 192
+            // Boolean test Y_found, line 192
+            if (!(B_Y_found))
+            {
+                return false;
+            }
+            // repeat, line 192
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 192
+                    // goto, line 192
+                    while (true)
+                    {
+                        v_2 = cursor;
+                        do 
+                        {
+                            // (, line 192
+                            // [, line 192
+                            bra = cursor;
+                            // literal, line 192
+                            if (!(eq_s(1, "Y")))
+                            {
+                                goto lab3_brk;
+                            }
+                            // ], line 192
+                            ket = cursor;
+                            cursor = v_2;
+                            goto golab2_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = v_2;
-						if (cursor >= limit)
-						{
-							goto lab1_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_2;
+                        if (cursor >= limit)
+                        {
+                            goto lab1_brk;
+                        }
+                        cursor++;
+                    }
 golab2_brk: ;
-					
-					// <-, line 192
-					slice_from("y");
-					goto replab0;
-				}
-				while (false);
+                    
+                    // <-, line 192
+                    slice_from("y");
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_1;
-				goto replab0_brk;
+                
+                cursor = v_1;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			int v_9;
-			int v_10;
-			int v_11;
-			int v_12;
-			int v_13;
-			// (, line 194
-			// or, line 196
-			do 
-			{
-				v_1 = cursor;
-				do 
-				{
-					// call exception1, line 196
-					if (!r_exception1())
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+            
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            int v_9;
+            int v_10;
+            int v_11;
+            int v_12;
+            int v_13;
+            // (, line 194
+            // or, line 196
+            do 
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // call exception1, line 196
+                    if (!r_exception1())
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
 
-				cursor = v_1;
-				// (, line 196
-				// test, line 198
-				v_2 = cursor;
-				// hop, line 198
-				{
-					int c = cursor + 3;
-					if (0 > c || c > limit)
-					{
-						return false;
-					}
-					cursor = c;
-				}
-				cursor = v_2;
-				// do, line 199
-				v_3 = cursor;
-				do 
-				{
-					// call prelude, line 199
-					if (!r_prelude())
-					{
-						goto lab2_brk;
-					}
-				}
-				while (false);
+                cursor = v_1;
+                // (, line 196
+                // test, line 198
+                v_2 = cursor;
+                // hop, line 198
+                {
+                    int c = cursor + 3;
+                    if (0 > c || c > limit)
+                    {
+                        return false;
+                    }
+                    cursor = c;
+                }
+                cursor = v_2;
+                // do, line 199
+                v_3 = cursor;
+                do 
+                {
+                    // call prelude, line 199
+                    if (!r_prelude())
+                    {
+                        goto lab2_brk;
+                    }
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = v_3;
-				// do, line 200
-				v_4 = cursor;
-				do 
-				{
-					// call mark_regions, line 200
-					if (!r_mark_regions())
-					{
-						goto lab3_brk;
-					}
-				}
-				while (false);
+                
+                cursor = v_3;
+                // do, line 200
+                v_4 = cursor;
+                do 
+                {
+                    // call mark_regions, line 200
+                    if (!r_mark_regions())
+                    {
+                        goto lab3_brk;
+                    }
+                }
+                while (false);
 
 lab3_brk: ;
 
-				cursor = v_4;
-				// backwards, line 201
-				limit_backward = cursor; cursor = limit;
-				// (, line 201
-				// do, line 203
-				v_5 = limit - cursor;
-				do 
-				{
-					// call Step_1a, line 203
-					if (!r_Step_1a())
-					{
-						goto lab4_brk;
-					}
-				}
-				while (false);
+                cursor = v_4;
+                // backwards, line 201
+                limit_backward = cursor; cursor = limit;
+                // (, line 201
+                // do, line 203
+                v_5 = limit - cursor;
+                do 
+                {
+                    // call Step_1a, line 203
+                    if (!r_Step_1a())
+                    {
+                        goto lab4_brk;
+                    }
+                }
+                while (false);
 
 lab4_brk: ;
 
-				cursor = limit - v_5;
-				// or, line 205
-				do 
-				{
-					v_6 = limit - cursor;
-					do 
-					{
-						// call exception2, line 205
-						if (!r_exception2())
-						{
-							goto lab6_brk;
-						}
-						goto lab5_brk;
-					}
-					while (false);
+                cursor = limit - v_5;
+                // or, line 205
+                do 
+                {
+                    v_6 = limit - cursor;
+                    do 
+                    {
+                        // call exception2, line 205
+                        if (!r_exception2())
+                        {
+                            goto lab6_brk;
+                        }
+                        goto lab5_brk;
+                    }
+                    while (false);
 
 lab6_brk: ;
 
-					cursor = limit - v_6;
-					// (, line 205
-					// do, line 207
-					v_7 = limit - cursor;
-					do 
-					{
-						// call Step_1b, line 207
-						if (!r_Step_1b())
-						{
-							goto lab7_brk;
-						}
-					}
-					while (false);
+                    cursor = limit - v_6;
+                    // (, line 205
+                    // do, line 207
+                    v_7 = limit - cursor;
+                    do 
+                    {
+                        // call Step_1b, line 207
+                        if (!r_Step_1b())
+                        {
+                            goto lab7_brk;
+                        }
+                    }
+                    while (false);
 
 lab7_brk: ;
-					
-					cursor = limit - v_7;
-					// do, line 208
-					v_8 = limit - cursor;
-					do 
-					{
-						// call Step_1c, line 208
-						if (!r_Step_1c())
-						{
-							goto lab8_brk;
-						}
-					}
-					while (false);
+                    
+                    cursor = limit - v_7;
+                    // do, line 208
+                    v_8 = limit - cursor;
+                    do 
+                    {
+                        // call Step_1c, line 208
+                        if (!r_Step_1c())
+                        {
+                            goto lab8_brk;
+                        }
+                    }
+                    while (false);
 
 lab8_brk: ;
 
-					cursor = limit - v_8;
-					// do, line 210
-					v_9 = limit - cursor;
-					do 
-					{
-						// call Step_2, line 210
-						if (!r_Step_2())
-						{
-							goto lab9_brk;
-						}
-					}
-					while (false);
+                    cursor = limit - v_8;
+                    // do, line 210
+                    v_9 = limit - cursor;
+                    do 
+                    {
+                        // call Step_2, line 210
+                        if (!r_Step_2())
+                        {
+                            goto lab9_brk;
+                        }
+                    }
+                    while (false);
 
 lab9_brk: ;
-					
-					cursor = limit - v_9;
-					// do, line 211
-					v_10 = limit - cursor;
-					do 
-					{
-						// call Step_3, line 211
-						if (!r_Step_3())
-						{
-							goto lab10_brk;
-						}
-					}
-					while (false);
+                    
+                    cursor = limit - v_9;
+                    // do, line 211
+                    v_10 = limit - cursor;
+                    do 
+                    {
+                        // call Step_3, line 211
+                        if (!r_Step_3())
+                        {
+                            goto lab10_brk;
+                        }
+                    }
+                    while (false);
 
 lab10_brk: ;
-					
-					cursor = limit - v_10;
-					// do, line 212
-					v_11 = limit - cursor;
-					do 
-					{
-						// call Step_4, line 212
-						if (!r_Step_4())
-						{
-							goto lab11_brk;
-						}
-					}
-					while (false);
+                    
+                    cursor = limit - v_10;
+                    // do, line 212
+                    v_11 = limit - cursor;
+                    do 
+                    {
+                        // call Step_4, line 212
+                        if (!r_Step_4())
+                        {
+                            goto lab11_brk;
+                        }
+                    }
+                    while (false);
 
 lab11_brk: ;
-					
-					cursor = limit - v_11;
-					// do, line 214
-					v_12 = limit - cursor;
-					do 
-					{
-						// call Step_5, line 214
-						if (!r_Step_5())
-						{
-							goto lab12_brk;
-						}
-					}
-					while (false);
+                    
+                    cursor = limit - v_11;
+                    // do, line 214
+                    v_12 = limit - cursor;
+                    do 
+                    {
+                        // call Step_5, line 214
+                        if (!r_Step_5())
+                        {
+                            goto lab12_brk;
+                        }
+                    }
+                    while (false);
 
 lab12_brk: ;
-					
-					cursor = limit - v_12;
-				}
-				while (false);
+                    
+                    cursor = limit - v_12;
+                }
+                while (false);
 
 lab5_brk: ;
 
-				cursor = limit_backward; // do, line 217
-				v_13 = cursor;
-				do 
-				{
-					// call postlude, line 217
-					if (!r_postlude())
-					{
-						goto lab13_brk;
-					}
-				}
-				while (false);
+                cursor = limit_backward; // do, line 217
+                v_13 = cursor;
+                do 
+                {
+                    // call postlude, line 217
+                    if (!r_postlude())
+                    {
+                        goto lab13_brk;
+                    }
+                }
+                while (false);
 
 lab13_brk: ;
-				
-				cursor = v_13;
-			}
-			while (false);
+                
+                cursor = v_13;
+            }
+            while (false);
 
 lab0_brk: ;
 
-			return true;
-		}
-	}
+            return true;
+        }
+    }
 }


[26/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Standard/StandardTokenizer.cs b/src/core/Analysis/Standard/StandardTokenizer.cs
index dca409d..8f25c7c 100644
--- a/src/core/Analysis/Standard/StandardTokenizer.cs
+++ b/src/core/Analysis/Standard/StandardTokenizer.cs
@@ -26,207 +26,207 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Standard
 {
-	
-	/// <summary>A grammar-based tokenizer constructed with JFlex
-	/// 
-	/// <p/> This should be a good tokenizer for most European-language documents:
-	/// 
-	/// <list type="bullet">
-	/// <item>Splits words at punctuation characters, removing punctuation. However, a 
-	/// dot that's not followed by whitespace is considered part of a token.</item>
-	/// <item>Splits words at hyphens, unless there's a number in the token, in which case
-	/// the whole token is interpreted as a product number and is not split.</item>
-	/// <item>Recognizes email addresses and internet hostnames as one token.</item>
-	/// </list>
-	/// 
-	/// <p/>Many applications have specific tokenizer needs.  If this tokenizer does
-	/// not suit your application, please consider copying this source code
-	/// directory to your project and maintaining your own grammar-based tokenizer.
-	/// 
-	/// <a name="version"/>
-	/// <p/>
-	/// You must specify the required <see cref="Version" /> compatibility when creating
-	/// StandardAnalyzer:
-	/// <list type="bullet">
-	/// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></item>
-	/// </list>
-	/// </summary>
-	
-	public sealed class StandardTokenizer:Tokenizer
-	{
-		private void  InitBlock()
-		{
-			maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
-		}
-		/// <summary>A private instance of the JFlex-constructed scanner </summary>
-		private StandardTokenizerImpl scanner;
-		
-		public const int ALPHANUM   = 0;
-		public const int APOSTROPHE = 1;
-		public const int ACRONYM    = 2;
-		public const int COMPANY    = 3;
-		public const int EMAIL      = 4;
-		public const int HOST       = 5;
-		public const int NUM        = 6;
-		public const int CJ         = 7;
-		
-		/// <deprecated> this solves a bug where HOSTs that end with '.' are identified
-		/// as ACRONYMs.
-		/// </deprecated>
+    
+    /// <summary>A grammar-based tokenizer constructed with JFlex
+    /// 
+    /// <p/> This should be a good tokenizer for most European-language documents:
+    /// 
+    /// <list type="bullet">
+    /// <item>Splits words at punctuation characters, removing punctuation. However, a 
+    /// dot that's not followed by whitespace is considered part of a token.</item>
+    /// <item>Splits words at hyphens, unless there's a number in the token, in which case
+    /// the whole token is interpreted as a product number and is not split.</item>
+    /// <item>Recognizes email addresses and internet hostnames as one token.</item>
+    /// </list>
+    /// 
+    /// <p/>Many applications have specific tokenizer needs.  If this tokenizer does
+    /// not suit your application, please consider copying this source code
+    /// directory to your project and maintaining your own grammar-based tokenizer.
+    /// 
+    /// <a name="version"/>
+    /// <p/>
+    /// You must specify the required <see cref="Version" /> compatibility when creating
+    /// StandardAnalyzer:
+    /// <list type="bullet">
+    /// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+    /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></item>
+    /// </list>
+    /// </summary>
+    
+    public sealed class StandardTokenizer:Tokenizer
+    {
+        private void  InitBlock()
+        {
+            maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
+        }
+        /// <summary>A private instance of the JFlex-constructed scanner </summary>
+        private StandardTokenizerImpl scanner;
+        
+        public const int ALPHANUM   = 0;
+        public const int APOSTROPHE = 1;
+        public const int ACRONYM    = 2;
+        public const int COMPANY    = 3;
+        public const int EMAIL      = 4;
+        public const int HOST       = 5;
+        public const int NUM        = 6;
+        public const int CJ         = 7;
+        
+        /// <deprecated> this solves a bug where HOSTs that end with '.' are identified
+        /// as ACRONYMs.
+        /// </deprecated>
         [Obsolete("this solves a bug where HOSTs that end with '.' are identified as ACRONYMs.")]
-		public const int ACRONYM_DEP = 8;
-		
-		/// <summary>String token types that correspond to token type int constants </summary>
-		public static readonly System.String[] TOKEN_TYPES = new System.String[]{"<ALPHANUM>", "<APOSTROPHE>", "<ACRONYM>", "<COMPANY>", "<EMAIL>", "<HOST>", "<NUM>", "<CJ>", "<ACRONYM_DEP>"};
-		
-		private bool replaceInvalidAcronym;
-		
-		private int maxTokenLength;
+        public const int ACRONYM_DEP = 8;
+        
+        /// <summary>String token types that correspond to token type int constants </summary>
+        public static readonly System.String[] TOKEN_TYPES = new System.String[]{"<ALPHANUM>", "<APOSTROPHE>", "<ACRONYM>", "<COMPANY>", "<EMAIL>", "<HOST>", "<NUM>", "<CJ>", "<ACRONYM_DEP>"};
+        
+        private bool replaceInvalidAcronym;
+        
+        private int maxTokenLength;
 
-	    /// <summary>Set the max allowed token length.  Any token longer
-	    /// than this is skipped. 
-	    /// </summary>
-	    public int MaxTokenLength
-	    {
-	        get { return maxTokenLength; }
-	        set { this.maxTokenLength = value; }
-	    }
+        /// <summary>Set the max allowed token length.  Any token longer
+        /// than this is skipped. 
+        /// </summary>
+        public int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { this.maxTokenLength = value; }
+        }
 
-	    /// <summary> Creates a new instance of the
-	    /// <see cref="Lucene.Net.Analysis.Standard.StandardTokenizer" />. Attaches
-	    /// the <c>input</c> to the newly created JFlex scanner.
-	    /// 
-	    /// </summary>
-	    /// <param name="matchVersion"></param>
-	    /// <param name="input">The input reader
-	    /// 
-	    /// See http://issues.apache.org/jira/browse/LUCENE-1068
-	    /// </param>
-	    public StandardTokenizer(Version matchVersion, System.IO.TextReader input):base()
-		{
-			InitBlock();
-			this.scanner = new StandardTokenizerImpl(input);
-			Init(input, matchVersion);
-		}
+        /// <summary> Creates a new instance of the
+        /// <see cref="Lucene.Net.Analysis.Standard.StandardTokenizer" />. Attaches
+        /// the <c>input</c> to the newly created JFlex scanner.
+        /// 
+        /// </summary>
+        /// <param name="matchVersion"></param>
+        /// <param name="input">The input reader
+        /// 
+        /// See http://issues.apache.org/jira/browse/LUCENE-1068
+        /// </param>
+        public StandardTokenizer(Version matchVersion, System.IO.TextReader input):base()
+        {
+            InitBlock();
+            this.scanner = new StandardTokenizerImpl(input);
+            Init(input, matchVersion);
+        }
 
-		/// <summary> Creates a new StandardTokenizer with a given <see cref="AttributeSource" />.</summary>
-		public StandardTokenizer(Version matchVersion, AttributeSource source, System.IO.TextReader input):base(source)
-		{
-			InitBlock();
-			this.scanner = new StandardTokenizerImpl(input);
-			Init(input, matchVersion);
-		}
-		
-		/// <summary> Creates a new StandardTokenizer with a given
-		/// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />
-		/// </summary>
-		public StandardTokenizer(Version matchVersion, AttributeFactory factory, System.IO.TextReader input):base(factory)
-		{
-			InitBlock();
-			this.scanner = new StandardTokenizerImpl(input);
-			Init(input, matchVersion);
-		}
-		
-		private void  Init(System.IO.TextReader input, Version matchVersion)
-		{
-			if (matchVersion.OnOrAfter(Version.LUCENE_24))
-			{
-			    replaceInvalidAcronym = true;
-			}
-			else
-			{
-			    replaceInvalidAcronym = false;
-			}
-		    this.input = input;
-		    termAtt = AddAttribute<ITermAttribute>();
-		    offsetAtt = AddAttribute<IOffsetAttribute>();
-		    posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
-		    typeAtt = AddAttribute<ITypeAttribute>();
-		}
-		
-		// this tokenizer generates three attributes:
-		// offset, positionIncrement and type
-		private ITermAttribute termAtt;
-		private IOffsetAttribute offsetAtt;
-		private IPositionIncrementAttribute posIncrAtt;
-		private ITypeAttribute typeAtt;
-		
-		///<summary>
-		/// (non-Javadoc)
-		/// <see cref="Lucene.Net.Analysis.TokenStream.IncrementToken()" />
+        /// <summary> Creates a new StandardTokenizer with a given <see cref="AttributeSource" />.</summary>
+        public StandardTokenizer(Version matchVersion, AttributeSource source, System.IO.TextReader input):base(source)
+        {
+            InitBlock();
+            this.scanner = new StandardTokenizerImpl(input);
+            Init(input, matchVersion);
+        }
+        
+        /// <summary> Creates a new StandardTokenizer with a given
+        /// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />
+        /// </summary>
+        public StandardTokenizer(Version matchVersion, AttributeFactory factory, System.IO.TextReader input):base(factory)
+        {
+            InitBlock();
+            this.scanner = new StandardTokenizerImpl(input);
+            Init(input, matchVersion);
+        }
+        
+        private void  Init(System.IO.TextReader input, Version matchVersion)
+        {
+            if (matchVersion.OnOrAfter(Version.LUCENE_24))
+            {
+                replaceInvalidAcronym = true;
+            }
+            else
+            {
+                replaceInvalidAcronym = false;
+            }
+            this.input = input;
+            termAtt = AddAttribute<ITermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+            typeAtt = AddAttribute<ITypeAttribute>();
+        }
+        
+        // this tokenizer generates three attributes:
+        // offset, positionIncrement and type
+        private ITermAttribute termAtt;
+        private IOffsetAttribute offsetAtt;
+        private IPositionIncrementAttribute posIncrAtt;
+        private ITypeAttribute typeAtt;
+        
+        ///<summary>
+        /// (non-Javadoc)
+        /// <see cref="Lucene.Net.Analysis.TokenStream.IncrementToken()" />
         ///</summary>
-		public override bool IncrementToken()
-		{
-			ClearAttributes();
-			int posIncr = 1;
-			
-			while (true)
-			{
-				int tokenType = scanner.GetNextToken();
-				
-				if (tokenType == StandardTokenizerImpl.YYEOF)
-				{
-					return false;
-				}
-				
-				if (scanner.Yylength() <= maxTokenLength)
-				{
-					posIncrAtt.PositionIncrement = posIncr;
-					scanner.GetText(termAtt);
-					int start = scanner.Yychar();
-					offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.TermLength()));
-					// This 'if' should be removed in the next release. For now, it converts
-					// invalid acronyms to HOST. When removed, only the 'else' part should
-					// remain.
-					if (tokenType == StandardTokenizerImpl.ACRONYM_DEP)
-					{
-						if (replaceInvalidAcronym)
-						{
-							typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST];
-							termAtt.SetTermLength(termAtt.TermLength() - 1); // remove extra '.'
-						}
-						else
-						{
-							typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
-						}
-					}
-					else
-					{
-						typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[tokenType];
-					}
-					return true;
-				}
-				// When we skip a too-long term, we still increment the
-				// position increment
-				else
-					posIncr++;
-			}
-		}
-		
-		public override void  End()
-		{
-			// set final offset
-			int finalOffset = CorrectOffset(scanner.Yychar() + scanner.Yylength());
-			offsetAtt.SetOffset(finalOffset, finalOffset);
-		}
-		
-		public override void  Reset(System.IO.TextReader reader)
-		{
-			base.Reset(reader);
-			scanner.Reset(reader);
-		}
-		
-		/// <summary>
-		/// Remove in 3.X and make true the only valid value
-		/// See https://issues.apache.org/jira/browse/LUCENE-1068
+        public override bool IncrementToken()
+        {
+            ClearAttributes();
+            int posIncr = 1;
+            
+            while (true)
+            {
+                int tokenType = scanner.GetNextToken();
+                
+                if (tokenType == StandardTokenizerImpl.YYEOF)
+                {
+                    return false;
+                }
+                
+                if (scanner.Yylength() <= maxTokenLength)
+                {
+                    posIncrAtt.PositionIncrement = posIncr;
+                    scanner.GetText(termAtt);
+                    int start = scanner.Yychar();
+                    offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.TermLength()));
+                    // This 'if' should be removed in the next release. For now, it converts
+                    // invalid acronyms to HOST. When removed, only the 'else' part should
+                    // remain.
+                    if (tokenType == StandardTokenizerImpl.ACRONYM_DEP)
+                    {
+                        if (replaceInvalidAcronym)
+                        {
+                            typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST];
+                            termAtt.SetTermLength(termAtt.TermLength() - 1); // remove extra '.'
+                        }
+                        else
+                        {
+                            typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
+                        }
+                    }
+                    else
+                    {
+                        typeAtt.Type = StandardTokenizerImpl.TOKEN_TYPES[tokenType];
+                    }
+                    return true;
+                }
+                // When we skip a too-long term, we still increment the
+                // position increment
+                else
+                    posIncr++;
+            }
+        }
+        
+        public override void  End()
+        {
+            // set final offset
+            int finalOffset = CorrectOffset(scanner.Yychar() + scanner.Yylength());
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+        
+        public override void  Reset(System.IO.TextReader reader)
+        {
+            base.Reset(reader);
+            scanner.Reset(reader);
+        }
+        
+        /// <summary>
+        /// Remove in 3.X and make true the only valid value
+        /// See https://issues.apache.org/jira/browse/LUCENE-1068
         /// </summary>
         /// <param name="replaceInvalidAcronym">Set to true to replace mischaracterized acronyms as HOST.
         /// </param>
         [Obsolete("Remove in 3.X and make true the only valid value. See https://issues.apache.org/jira/browse/LUCENE-1068")]
-		public void  SetReplaceInvalidAcronym(bool replaceInvalidAcronym)
-		{
-			this.replaceInvalidAcronym = replaceInvalidAcronym;
-		}
-	}
+        public void  SetReplaceInvalidAcronym(bool replaceInvalidAcronym)
+        {
+            this.replaceInvalidAcronym = replaceInvalidAcronym;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Standard/StandardTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Standard/StandardTokenizerImpl.cs b/src/core/Analysis/Standard/StandardTokenizerImpl.cs
index cb4bf5f..cf2a81e 100644
--- a/src/core/Analysis/Standard/StandardTokenizerImpl.cs
+++ b/src/core/Analysis/Standard/StandardTokenizerImpl.cs
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.4.1 on 9/4/08 6:49 PM */
+/* The following code was generated by JFlex 1.4.1 on 9/4/08 6:49 PM */
 /* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -33,230 +33,230 @@ using Token = Lucene.Net.Analysis.Token;
 
 namespace Lucene.Net.Analysis.Standard
 {
-	
-	
-	/// <summary> This class is a scanner generated by 
-	/// <a href="http://www.jflex.de/">JFlex</a> 1.4.1
-	/// on 9/4/08 6:49 PM from the specification file
-	/// <tt>/tango/mike/src/lucene.standarddigit/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex</tt>
-	/// </summary>
-	class StandardTokenizerImpl
-	{
-		
-		/// <summary>This character denotes the end of file </summary>
-		public const int YYEOF = - 1;
-		
-		/// <summary>initial size of the lookahead buffer </summary>
-		private const int ZZ_BUFFERSIZE = 16384;
-		
-		/// <summary>lexical states </summary>
-		public const int YYINITIAL = 0;
-		
-		/// <summary> Translates characters to character classes</summary>
-		private const System.String ZZ_CMAP_PACKED = "\x0009\x0000\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0000\x0001\x000C\x0012\x0000\x0001\x0000\x0005\x0000\x0001\x0005" + "\x0001\x0003\x0004\x0000\x0001\x0009\x0001\x0007\x0001\x0004\x0001\x0009\x000A\x0002\x0006\x0000\x0001\x0006\x001A\x000A" + "\x0004\x0000\x0001\x0008\x0001\x0000\x001A\x000A\x002F\x0000\x0001\x000A\x000A\x0000\x0001\x000A\x0004\x0000\x0001\x000A" + "\x0005\x0000\x0017\x000A\x0001\x0000\x001F\x000A\x0001\x0000\u0128\x000A\x0002\x0000\x0012\x000A\x001C\x0000\x005E\x000A" + "\x0002\x0000\x0009\x000A\x0002\x0000\x0007\x000A\x000E\x0000\x0002\x000A\x000E\x0000\x0005\x000A\x0009\x0000\x0001\x000A" + "\x008B\x0000\x0001\x000A\x000B\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0014\x000A" + "\x0001\x0000\x002C\x000A\x0001\x0000\x0008\x000A\x0002\x0000\x001A\x000A\x000C\x0000\x0082\x000A\x000A\x0000\x0039\x000A" + "\x0002\x0000\x0002\x000A\x0002\x0000\x0002\x000A\x0003\x0000\x0026\x000A\x
 0002\x0000\x0002\x000A\x0037\x0000\x0026\x000A" + "\x0002\x0000\x0001\x000A\x0007\x0000\x0027\x000A\x0048\x0000\x001B\x000A\x0005\x0000\x0003\x000A\x002E\x0000\x001A\x000A" + "\x0005\x0000\x000B\x000A\x0015\x0000\x000A\x0002\x0007\x0000\x0063\x000A\x0001\x0000\x0001\x000A\x000F\x0000\x0002\x000A" + "\x0009\x0000\x000A\x0002\x0003\x000A\x0013\x0000\x0001\x000A\x0001\x0000\x001B\x000A\x0053\x0000\x0026\x000A\u015f\x0000" + "\x0035\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x0007\x0000\x000A\x000A\x0004\x0000\x000A\x0002\x0015\x0000" + "\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0003\x0000" + "\x0004\x000A\x0022\x0000\x0002\x000A\x0001\x0000\x0003\x000A\x0004\x0000\x000A\x0002\x0002\x000A\x0013\x0000\x0006\x000A" + "\x0004\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0002\x000A\x0001\x0000\x0002\x000A" + 
-			"\x0001\x0000\x0002\x000A\x001F\x0000\x0004\x000A\x0001\x0000\x0001\x000A\x0007\x0000\x000A\x0002\x0002\x0000\x0003\x000A" + "\x0010\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x0005\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x000F\x0000\x0001\x000A" + "\x0005\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0002\x0000\x0004\x000A\x0003\x0000\x0001\x000A\x001E\x0000\x0002\x000A\x0001\x0000\x0003\x000A" + "\x0004\x0000\x000A\x0002\x0015\x0000\x0006\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0004\x000A\x0003\x0000\x0002\x000A" + "\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A\x0003\x0000\x0002\x000A\x0003\x0000\x0003\x000A\x0003\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x002D\x0000\x0009\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x00
 0A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0024\x0000\x0001\x000A" + "\x0001\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A" + "\x0001\x0000\x0010\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0012\x000A\x0003\x0000\x0018\x000A" + "\x0001\x0000\x0009\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0007\x000A\x0039\x0000\x0001\x0001\x0030\x000A\x0001\x0001" + "\x0002\x000A\x000C\x0001\x0007\x000A\x0009\x0001\x000A\x0002\x0027\x0000\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000" + "\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0001\x000A\x0006\x0000\x0004\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0002\x000A\x0001\x0000\x0004\x000A\x0001\
 x0000" + 
-			"\x0002\x000A\x0009\x0000\x0001\x000A\x0002\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0009\x0000\x000A\x0002\x0002\x0000" + "\x0002\x000A\x0022\x0000\x0001\x000A\x001F\x0000\x000A\x0002\x0016\x0000\x0008\x000A\x0001\x0000\x0022\x000A\x001D\x0000" + "\x0004\x000A\x0074\x0000\x0022\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0002\x000A\x0015\x0000\x000A\x0002\x0006\x0000" + "\x0006\x000A\x004A\x0000\x0026\x000A\x000A\x0000\x0027\x000A\x0009\x0000\x005A\x000A\x0005\x0000\x0044\x000A\x0005\x0000" + "\x0052\x000A\x0006\x0000\x0007\x000A\x0001\x0000\x003F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000" + "\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0027\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0004\x000A\x0002\x0000\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0017\x000A\x0001\x00
 00" + "\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0027\x000A\x0001\x0000" + "\x0013\x000A\x000E\x0000\x0009\x0002\x002E\x0000\x0055\x000A\x000C\x0000\u026c\x000A\x0002\x0000\x0008\x000A\x000A\x0000" + "\x001A\x000A\x0005\x0000\x004B\x000A\x0095\x0000\x0034\x000A\x002C\x0000\x000A\x0002\x0026\x0000\x000A\x0002\x0006\x0000" + "\x0058\x000A\x0008\x0000\x0029\x000A\u0557\x0000\x009C\x000A\x0004\x0000\x005A\x000A\x0006\x0000\x0016\x000A\x0002\x0000" + "\x0006\x000A\x0002\x0000\x0026\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0008\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x001F\x000A\x0002\x0000\x0035\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0003\x0000\x0004\x000A\x0002\x0000\x0006\x000A\x0004\x0000" + "\x000D\x000A\x0005\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0082\x0000\x0001\x000A\x0082\x0000\x0001\x000A\x0004\
 x0000" + 
-			"\x0001\x000A\x0002\x0000\x000A\x000A\x0001\x0000\x0001\x000A\x0003\x0000\x0005\x000A\x0006\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0007\x000A\u0ecb\x0000" + "\x0002\x000A\x002A\x0000\x0005\x000A\x000A\x0000\x0001\x000B\x0054\x000B\x0008\x000B\x0002\x000B\x0002\x000B\x005A\x000B" + "\x0001\x000B\x0003\x000B\x0006\x000B\x0028\x000B\x0003\x000B\x0001\x0000\x005E\x000A\x0011\x0000\x0018\x000A\x0038\x0000" + "\x0010\x000B\u0100\x0000\x0080\x000B\x0080\x0000\u19b6\x000B\x000A\x000B\x0040\x0000\u51a6\x000B\x005A\x000B\u048d\x000A" + "\u0773\x0000\u2ba4\x000A\u215c\x0000\u012e\x000B\x00D2\x000B\x0007\x000A\x000C\x0000\x0005\x000A\x0005\x0000\x0001\x000A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x000D\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x006C\x000A\x0021\x0000\u016b\x000A\x0012\x0000\x0040\x000A\x0002\x0000\x0036\x00
 0A" + "\x0028\x0000\x000C\x000A\x0074\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0087\x000A\x0013\x0000\x000A\x0002" + "\x0007\x0000\x001A\x000A\x0006\x0000\x001A\x000A\x000A\x0000\x0001\x000B\x003A\x000B\x001F\x000A\x0003\x0000\x0006\x000A" + "\x0002\x0000\x0006\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0003\x000A\x0023\x0000";
-		
-		/// <summary> Translates characters to character classes</summary>
-		private static readonly char[] ZZ_CMAP = ZzUnpackCMap(ZZ_CMAP_PACKED);
-		
-		/// <summary> Translates DFA states to action switch labels.</summary>
-		private static readonly int[] ZZ_ACTION = ZzUnpackAction();
-		
-		private const System.String ZZ_ACTION_PACKED_0 = "\x0001\x0000\x0001\x0001\x0003\x0002\x0001\x0003\x0001\x0001\x000B\x0000\x0001\x0002\x0003\x0004" + "\x0002\x0000\x0001\x0005\x0001\x0000\x0001\x0005\x0003\x0004\x0006\x0005\x0001\x0006\x0001\x0004" + "\x0002\x0007\x0001\x0008\x0001\x0000\x0001\x0008\x0003\x0000\x0002\x0008\x0001\x0009\x0001\x000A" + "\x0001\x0004";
-		
-		private static int[] ZzUnpackAction()
-		{
-			int[] result = new int[51];
-			int offset = 0;
-			offset = ZzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
-			return result;
-		}
-		
-		private static int ZzUnpackAction(System.String packed, int offset, int[] result)
-		{
-			int i = 0; /* index in packed string  */
-			int j = offset; /* index in unpacked array */
-			int l = packed.Length;
-			while (i < l)
-			{
-				int count = packed[i++];
-				int value_Renamed = packed[i++];
-				do 
-					result[j++] = value_Renamed;
-				while (--count > 0);
-			}
-			return j;
-		}
-		
-		
-		/// <summary> Translates a state to a row index in the transition table</summary>
-		private static readonly int[] ZZ_ROWMAP = ZzUnpackRowMap();
-		
-		private const System.String ZZ_ROWMAP_PACKED_0 = "\x0000\x0000\x0000\x000E\x0000\x001C\x0000\x002A\x0000\x0038\x0000\x000E\x0000\x0046\x0000\x0054" + "\x0000\x0062\x0000\x0070\x0000\x007E\x0000\x008C\x0000\x009A\x0000\x00A8\x0000\x00B6\x0000\x00C4" + "\x0000\x00D2\x0000\x00E0\x0000\x00EE\x0000\x00FC\x0000\u010a\x0000\u0118\x0000\u0126\x0000\u0134" + "\x0000\u0142\x0000\u0150\x0000\u015e\x0000\u016c\x0000\u017a\x0000\u0188\x0000\u0196\x0000\u01a4" + "\x0000\u01b2\x0000\u01c0\x0000\u01ce\x0000\u01dc\x0000\u01ea\x0000\u01f8\x0000\x00D2\x0000\u0206" + "\x0000\u0214\x0000\u0222\x0000\u0230\x0000\u023e\x0000\u024c\x0000\u025a\x0000\x0054\x0000\x008C" + "\x0000\u0268\x0000\u0276\x0000\u0284";
-		
-		private static int[] ZzUnpackRowMap()
-		{
-			int[] result = new int[51];
-			int offset = 0;
-			offset = ZzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
-			return result;
-		}
-		
-		private static int ZzUnpackRowMap(System.String packed, int offset, int[] result)
-		{
-			int i = 0; /* index in packed string  */
-			int j = offset; /* index in unpacked array */
-			int l = packed.Length;
-			while (i < l)
-			{
-				int high = packed[i++] << 16;
-				result[j++] = high | packed[i++];
-			}
-			return j;
-		}
-		
-		/// <summary> The transition table of the DFA</summary>
-		private static readonly int[] ZZ_TRANS = ZzUnpackTrans();
-		
-		private const System.String ZZ_TRANS_PACKED_0 = "\x0001\x0002\x0001\x0003\x0001\x0004\x0007\x0002\x0001\x0005\x0001\x0006\x0001\x0007\x0001\x0002" + "\x000F\x0000\x0002\x0003\x0001\x0000\x0001\x0008\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x0003\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x0000\x0001\x000C\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x0004\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x000F\x0001\x0010" + "\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0010\x0000\x0001\x0002\x0001\x0000" + "\x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0017" + "\x0004\x0000\x0001\x0018\x0001\x0019\x0007\x0000\x0001\x001A\x0005\x0000\x0001\x001B\x0007\x0000" + "\x0001\x000B\x0004\x0000\x0001\x001C\x0001\x001D\x0007\x0000\x0001\x001E\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0007\x0000\x0001\x0021\x0004\x0000\x0001\x0022\x0001\x0023\x0007\x0000\x0001\x0024" + "\x000D\x0000\x0001\x0025\x0004\x0000\
 x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0026\x000D\x0000" + "\x0001\x0027\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0028\x0004\x0000\x0001\x0003\x0001\x0004" + "\x0001\x000F\x0001\x0008\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0004\x0000" + "\x0002\x0014\x0001\x0000\x0001\x0029\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014" + "\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x002B\x0001\x0000\x0001\x0009\x0002\x002C" + "\x0001\x002D\x0001\x0015\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x0029\x0001\x0000" + "\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0001\x0000\x0001\x002E" + "\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0017\x0004\x0000\x0002\x0018\x0001\x0000\x0001\x002A" + "\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0018\x0004\x0000\x0001\x0018\x0001\x0019" + "\x0001\x0000\x0001\x002C\x0001\x0000\x0001\x0009\x0002\x002C\x0001\x002D\x0001\x0019\x0004\x0000" + 
-			"\x0001\x0018\x0001\x0019\x0001\x0000\x0001\x002A\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000" + "\x0001\x001A\x0005\x0000\x0001\x001B\x0001\x0000\x0001\x002D\x0002\x0000\x0003\x002D\x0001\x001B" + "\x0004\x0000\x0002\x001C\x0001\x0000\x0001\x002F\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x001C\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x0030\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x001D\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x002F" + "\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001E\x0004\x0000\x0002\x001F\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001F\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0009\x0002\x000D\x0001\x000E\x0001\x0020" + "\x0004\x0000\x0001\x001F\x0001\x0020\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A" + "\x0001\x000B\x0001\x0021\x0004\x0000\x0002\x0022\x0001\x0000\x0001\x000B\x0002\x0000
 \x0003\x000B" + "\x0001\x0022\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000E\x0002\x0000\x0003\x000E" + "\x0001\x0023\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000B\x0002\x0000\x0003\x000B" + "\x0001\x0024\x0006\x0000\x0001\x000F\x0006\x0000\x0001\x0025\x0004\x0000\x0001\x0014\x0001\x0015" + "\x0001\x0000\x0001\x0031\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000" + "\x0002\x0017\x0001\x0000\x0001\x002E\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0028\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0014\x0004\x0000\x0002\x0018\x0007\x0000\x0001\x0018\x0004\x0000" + "\x0002\x001C\x0007\x0000\x0001\x001C\x0004\x0000\x0002\x001F\x0007\x0000\x0001\x001F\x0004\x0000" + "\x0002\x0022\x0007\x0000\x0001\x0022\x0004\x0000\x0002\x0032\x0007\x0000\x0001\x0032\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0033\x0004\x0000\x0002\x0032\x0001\x0000\x0001\x002E\x0002\x0000" + "\x0001\x002E\x0002\x0000\x0001\x0032\x0004\x0000\x0002\x0014\x0001\x000
 0\x0001\x0031\x0001\x0000" + 
-			"\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014\x0003\x0000";
-		
-		private static int[] ZzUnpackTrans()
-		{
-			int[] result = new int[658];
-			int offset = 0;
-			offset = ZzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
-			return result;
-		}
-		
-		private static int ZzUnpackTrans(System.String packed, int offset, int[] result)
-		{
-			int i = 0; /* index in packed string  */
-			int j = offset; /* index in unpacked array */
-			int l = packed.Length;
-			while (i < l)
-			{
-				int count = packed[i++];
-				int value_Renamed = packed[i++];
-				value_Renamed--;
-				do 
-					result[j++] = value_Renamed;
-				while (--count > 0);
-			}
-			return j;
-		}
-		
-		
-		/* error codes */
-		private const int ZZ_UNKNOWN_ERROR = 0;
-		private const int ZZ_NO_MATCH = 1;
-		private const int ZZ_PUSHBACK_2BIG = 2;
-		
-		/* error messages for the codes above */
-		private static readonly System.String[] ZZ_ERROR_MSG = new System.String[]{"Unkown internal scanner error", "Error: could not match input", "Error: pushback value was too large"};
-		
-		/// <summary> ZZ_ATTRIBUTE[aState] contains the attributes of state <c>aState</c></summary>
-		private static readonly int[] ZZ_ATTRIBUTE = ZzUnpackAttribute();
-		
-		private const System.String ZZ_ATTRIBUTE_PACKED_0 = "\x0001\x0000\x0001\x0009\x0003\x0001\x0001\x0009\x0001\x0001\x000B\x0000\x0004\x0001\x0002\x0000" + "\x0001\x0001\x0001\x0000\x000F\x0001\x0001\x0000\x0001\x0001\x0003\x0000\x0005\x0001";
-		
-		private static int[] ZzUnpackAttribute()
-		{
-			int[] result = new int[51];
-			int offset = 0;
-			offset = ZzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
-			return result;
-		}
-		
-		private static int ZzUnpackAttribute(System.String packed, int offset, int[] result)
-		{
-			int i = 0; /* index in packed string  */
-			int j = offset; /* index in unpacked array */
-			int l = packed.Length;
-			while (i < l)
-			{
-				int count = packed[i++];
-				int value_Renamed = packed[i++];
-				do 
-					result[j++] = value_Renamed;
-				while (--count > 0);
-			}
-			return j;
-		}
-		
-		/// <summary>the input device </summary>
-		private System.IO.TextReader zzReader;
-		
-		/// <summary>the current state of the DFA </summary>
-		private int zzState;
-		
-		/// <summary>the current lexical state </summary>
-		private int zzLexicalState = YYINITIAL;
-		
-		/// <summary>this buffer contains the current text to be matched and is
-		/// the source of the yytext() string 
-		/// </summary>
-		private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
-		
-		/// <summary>the textposition at the last accepting state </summary>
-		private int zzMarkedPos;
-		
-		/// <summary>the textposition at the last state to be included in yytext </summary>
-		private int zzPushbackPos;
-		
-		/// <summary>the current text position in the buffer </summary>
-		private int zzCurrentPos;
-		
-		/// <summary>startRead marks the beginning of the yytext() string in the buffer </summary>
-		private int zzStartRead;
-		
-		/// <summary>endRead marks the last character in the buffer, that has been read
-		/// from input 
-		/// </summary>
-		private int zzEndRead;
-		
-		/// <summary>number of newlines encountered up to the start of the matched text </summary>
-		private int yyline;
-		
-		/// <summary>the number of characters up to the start of the matched text </summary>
-		private int yychar;
-		
-		/// <summary> the number of characters from the last newline up to the start of the 
-		/// matched text
-		/// </summary>
-		private int yycolumn;
+    
+    
+    /// <summary> This class is a scanner generated by 
+    /// <a href="http://www.jflex.de/">JFlex</a> 1.4.1
+    /// on 9/4/08 6:49 PM from the specification file
+    /// <tt>/tango/mike/src/lucene.standarddigit/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex</tt>
+    /// </summary>
+    class StandardTokenizerImpl
+    {
+        
+        /// <summary>This character denotes the end of file </summary>
+        public const int YYEOF = - 1;
+        
+        /// <summary>initial size of the lookahead buffer </summary>
+        private const int ZZ_BUFFERSIZE = 16384;
+        
+        /// <summary>lexical states </summary>
+        public const int YYINITIAL = 0;
+        
+        /// <summary> Translates characters to character classes</summary>
+        private const System.String ZZ_CMAP_PACKED = "\x0009\x0000\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0000\x0001\x000C\x0012\x0000\x0001\x0000\x0005\x0000\x0001\x0005" + "\x0001\x0003\x0004\x0000\x0001\x0009\x0001\x0007\x0001\x0004\x0001\x0009\x000A\x0002\x0006\x0000\x0001\x0006\x001A\x000A" + "\x0004\x0000\x0001\x0008\x0001\x0000\x001A\x000A\x002F\x0000\x0001\x000A\x000A\x0000\x0001\x000A\x0004\x0000\x0001\x000A" + "\x0005\x0000\x0017\x000A\x0001\x0000\x001F\x000A\x0001\x0000\u0128\x000A\x0002\x0000\x0012\x000A\x001C\x0000\x005E\x000A" + "\x0002\x0000\x0009\x000A\x0002\x0000\x0007\x000A\x000E\x0000\x0002\x000A\x000E\x0000\x0005\x000A\x0009\x0000\x0001\x000A" + "\x008B\x0000\x0001\x000A\x000B\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0014\x000A" + "\x0001\x0000\x002C\x000A\x0001\x0000\x0008\x000A\x0002\x0000\x001A\x000A\x000C\x0000\x0082\x000A\x000A\x0000\x0039\x000A" + "\x0002\x0000\x0002\x000A\x0002\x0000\x0002\x000A\x0003\x0000\x0026\x
 000A\x0002\x0000\x0002\x000A\x0037\x0000\x0026\x000A" + "\x0002\x0000\x0001\x000A\x0007\x0000\x0027\x000A\x0048\x0000\x001B\x000A\x0005\x0000\x0003\x000A\x002E\x0000\x001A\x000A" + "\x0005\x0000\x000B\x000A\x0015\x0000\x000A\x0002\x0007\x0000\x0063\x000A\x0001\x0000\x0001\x000A\x000F\x0000\x0002\x000A" + "\x0009\x0000\x000A\x0002\x0003\x000A\x0013\x0000\x0001\x000A\x0001\x0000\x001B\x000A\x0053\x0000\x0026\x000A\u015f\x0000" + "\x0035\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x0007\x0000\x000A\x000A\x0004\x0000\x000A\x0002\x0015\x0000" + "\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0003\x0000" + "\x0004\x000A\x0022\x0000\x0002\x000A\x0001\x0000\x0003\x000A\x0004\x0000\x000A\x0002\x0002\x000A\x0013\x0000\x0006\x000A" + "\x0004\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0002\x000A\x0001\x0000\x0002\x000A" + 
+            "\x0001\x0000\x0002\x000A\x001F\x0000\x0004\x000A\x0001\x0000\x0001\x000A\x0007\x0000\x000A\x0002\x0002\x0000\x0003\x000A" + "\x0010\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x0005\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x000F\x0000\x0001\x000A" + "\x0005\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0002\x0000\x0004\x000A\x0003\x0000\x0001\x000A\x001E\x0000\x0002\x000A\x0001\x0000\x0003\x000A" + "\x0004\x0000\x000A\x0002\x0015\x0000\x0006\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0004\x000A\x0003\x0000\x0002\x000A" + "\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A\x0003\x0000\x0002\x000A\x0003\x0000\x0003\x000A\x0003\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x002D\x0000\x0009\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\
 x0017\x000A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0024\x0000\x0001\x000A" + "\x0001\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A" + "\x0001\x0000\x0010\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0012\x000A\x0003\x0000\x0018\x000A" + "\x0001\x0000\x0009\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0007\x000A\x0039\x0000\x0001\x0001\x0030\x000A\x0001\x0001" + "\x0002\x000A\x000C\x0001\x0007\x000A\x0009\x0001\x000A\x0002\x0027\x0000\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000" + "\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0001\x000A\x0006\x0000\x0004\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0002\x000A\x0001\x0000\x0004\x00
 0A\x0001\x0000" + 
+            "\x0002\x000A\x0009\x0000\x0001\x000A\x0002\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0009\x0000\x000A\x0002\x0002\x0000" + "\x0002\x000A\x0022\x0000\x0001\x000A\x001F\x0000\x000A\x0002\x0016\x0000\x0008\x000A\x0001\x0000\x0022\x000A\x001D\x0000" + "\x0004\x000A\x0074\x0000\x0022\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0002\x000A\x0015\x0000\x000A\x0002\x0006\x0000" + "\x0006\x000A\x004A\x0000\x0026\x000A\x000A\x0000\x0027\x000A\x0009\x0000\x005A\x000A\x0005\x0000\x0044\x000A\x0005\x0000" + "\x0052\x000A\x0006\x0000\x0007\x000A\x0001\x0000\x003F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000" + "\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0027\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0004\x000A\x0002\x0000\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0017\x000A\
 x0001\x0000" + "\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0027\x000A\x0001\x0000" + "\x0013\x000A\x000E\x0000\x0009\x0002\x002E\x0000\x0055\x000A\x000C\x0000\u026c\x000A\x0002\x0000\x0008\x000A\x000A\x0000" + "\x001A\x000A\x0005\x0000\x004B\x000A\x0095\x0000\x0034\x000A\x002C\x0000\x000A\x0002\x0026\x0000\x000A\x0002\x0006\x0000" + "\x0058\x000A\x0008\x0000\x0029\x000A\u0557\x0000\x009C\x000A\x0004\x0000\x005A\x000A\x0006\x0000\x0016\x000A\x0002\x0000" + "\x0006\x000A\x0002\x0000\x0026\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0008\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x001F\x000A\x0002\x0000\x0035\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0003\x0000\x0004\x000A\x0002\x0000\x0006\x000A\x0004\x0000" + "\x000D\x000A\x0005\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0082\x0000\x0001\x000A\x0082\x0000\x0001\x00
 0A\x0004\x0000" + 
+            "\x0001\x000A\x0002\x0000\x000A\x000A\x0001\x0000\x0001\x000A\x0003\x0000\x0005\x000A\x0006\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0007\x000A\u0ecb\x0000" + "\x0002\x000A\x002A\x0000\x0005\x000A\x000A\x0000\x0001\x000B\x0054\x000B\x0008\x000B\x0002\x000B\x0002\x000B\x005A\x000B" + "\x0001\x000B\x0003\x000B\x0006\x000B\x0028\x000B\x0003\x000B\x0001\x0000\x005E\x000A\x0011\x0000\x0018\x000A\x0038\x0000" + "\x0010\x000B\u0100\x0000\x0080\x000B\x0080\x0000\u19b6\x000B\x000A\x000B\x0040\x0000\u51a6\x000B\x005A\x000B\u048d\x000A" + "\u0773\x0000\u2ba4\x000A\u215c\x0000\u012e\x000B\x00D2\x000B\x0007\x000A\x000C\x0000\x0005\x000A\x0005\x0000\x0001\x000A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x000D\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x006C\x000A\x0021\x0000\u016b\x000A\x0012\x0000\x0040\x000A\x0002\x0000\
 x0036\x000A" + "\x0028\x0000\x000C\x000A\x0074\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0087\x000A\x0013\x0000\x000A\x0002" + "\x0007\x0000\x001A\x000A\x0006\x0000\x001A\x000A\x000A\x0000\x0001\x000B\x003A\x000B\x001F\x000A\x0003\x0000\x0006\x000A" + "\x0002\x0000\x0006\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0003\x000A\x0023\x0000";
+        
+        /// <summary> Translates characters to character classes</summary>
+        private static readonly char[] ZZ_CMAP = ZzUnpackCMap(ZZ_CMAP_PACKED);
+        
+        /// <summary> Translates DFA states to action switch labels.</summary>
+        private static readonly int[] ZZ_ACTION = ZzUnpackAction();
+        
+        private const System.String ZZ_ACTION_PACKED_0 = "\x0001\x0000\x0001\x0001\x0003\x0002\x0001\x0003\x0001\x0001\x000B\x0000\x0001\x0002\x0003\x0004" + "\x0002\x0000\x0001\x0005\x0001\x0000\x0001\x0005\x0003\x0004\x0006\x0005\x0001\x0006\x0001\x0004" + "\x0002\x0007\x0001\x0008\x0001\x0000\x0001\x0008\x0003\x0000\x0002\x0008\x0001\x0009\x0001\x000A" + "\x0001\x0004";
+        
+        private static int[] ZzUnpackAction()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = ZzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+            return result;
+        }
+        
+        private static int ZzUnpackAction(System.String packed, int offset, int[] result)
+        {
+            int i = 0; /* index in packed string  */
+            int j = offset; /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value_Renamed = packed[i++];
+                do 
+                    result[j++] = value_Renamed;
+                while (--count > 0);
+            }
+            return j;
+        }
+        
+        
+        /// <summary> Translates a state to a row index in the transition table</summary>
+        private static readonly int[] ZZ_ROWMAP = ZzUnpackRowMap();
+        
+        private const System.String ZZ_ROWMAP_PACKED_0 = "\x0000\x0000\x0000\x000E\x0000\x001C\x0000\x002A\x0000\x0038\x0000\x000E\x0000\x0046\x0000\x0054" + "\x0000\x0062\x0000\x0070\x0000\x007E\x0000\x008C\x0000\x009A\x0000\x00A8\x0000\x00B6\x0000\x00C4" + "\x0000\x00D2\x0000\x00E0\x0000\x00EE\x0000\x00FC\x0000\u010a\x0000\u0118\x0000\u0126\x0000\u0134" + "\x0000\u0142\x0000\u0150\x0000\u015e\x0000\u016c\x0000\u017a\x0000\u0188\x0000\u0196\x0000\u01a4" + "\x0000\u01b2\x0000\u01c0\x0000\u01ce\x0000\u01dc\x0000\u01ea\x0000\u01f8\x0000\x00D2\x0000\u0206" + "\x0000\u0214\x0000\u0222\x0000\u0230\x0000\u023e\x0000\u024c\x0000\u025a\x0000\x0054\x0000\x008C" + "\x0000\u0268\x0000\u0276\x0000\u0284";
+        
+        private static int[] ZzUnpackRowMap()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = ZzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+            return result;
+        }
+        
+        private static int ZzUnpackRowMap(System.String packed, int offset, int[] result)
+        {
+            int i = 0; /* index in packed string  */
+            int j = offset; /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int high = packed[i++] << 16;
+                result[j++] = high | packed[i++];
+            }
+            return j;
+        }
+        
+        /// <summary> The transition table of the DFA</summary>
+        private static readonly int[] ZZ_TRANS = ZzUnpackTrans();
+        
+        private const System.String ZZ_TRANS_PACKED_0 = "\x0001\x0002\x0001\x0003\x0001\x0004\x0007\x0002\x0001\x0005\x0001\x0006\x0001\x0007\x0001\x0002" + "\x000F\x0000\x0002\x0003\x0001\x0000\x0001\x0008\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x0003\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x0000\x0001\x000C\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x0004\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x000F\x0001\x0010" + "\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0010\x0000\x0001\x0002\x0001\x0000" + "\x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0017" + "\x0004\x0000\x0001\x0018\x0001\x0019\x0007\x0000\x0001\x001A\x0005\x0000\x0001\x001B\x0007\x0000" + "\x0001\x000B\x0004\x0000\x0001\x001C\x0001\x001D\x0007\x0000\x0001\x001E\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0007\x0000\x0001\x0021\x0004\x0000\x0001\x0022\x0001\x0023\x0007\x0000\x0001\x0024" + "\x000D\x0000\x0001\x0025\x0004\
 x0000\x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0026\x000D\x0000" + "\x0001\x0027\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0028\x0004\x0000\x0001\x0003\x0001\x0004" + "\x0001\x000F\x0001\x0008\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0004\x0000" + "\x0002\x0014\x0001\x0000\x0001\x0029\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014" + "\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x002B\x0001\x0000\x0001\x0009\x0002\x002C" + "\x0001\x002D\x0001\x0015\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x0029\x0001\x0000" + "\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0001\x0000\x0001\x002E" + "\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0017\x0004\x0000\x0002\x0018\x0001\x0000\x0001\x002A" + "\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0018\x0004\x0000\x0001\x0018\x0001\x0019" + "\x0001\x0000\x0001\x002C\x0001\x0000\x0001\x0009\x0002\x002C\x0001\x002D\x0001\x0019\x0004\x0000" + 
+            "\x0001\x0018\x0001\x0019\x0001\x0000\x0001\x002A\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000" + "\x0001\x001A\x0005\x0000\x0001\x001B\x0001\x0000\x0001\x002D\x0002\x0000\x0003\x002D\x0001\x001B" + "\x0004\x0000\x0002\x001C\x0001\x0000\x0001\x002F\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x001C\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x0030\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x001D\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x002F" + "\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001E\x0004\x0000\x0002\x001F\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001F\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0009\x0002\x000D\x0001\x000E\x0001\x0020" + "\x0004\x0000\x0001\x001F\x0001\x0020\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A" + "\x0001\x000B\x0001\x0021\x0004\x0000\x0002\x0022\x0001\x0000\x0001\x000B\x0
 002\x0000\x0003\x000B" + "\x0001\x0022\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000E\x0002\x0000\x0003\x000E" + "\x0001\x0023\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000B\x0002\x0000\x0003\x000B" + "\x0001\x0024\x0006\x0000\x0001\x000F\x0006\x0000\x0001\x0025\x0004\x0000\x0001\x0014\x0001\x0015" + "\x0001\x0000\x0001\x0031\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000" + "\x0002\x0017\x0001\x0000\x0001\x002E\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0028\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0014\x0004\x0000\x0002\x0018\x0007\x0000\x0001\x0018\x0004\x0000" + "\x0002\x001C\x0007\x0000\x0001\x001C\x0004\x0000\x0002\x001F\x0007\x0000\x0001\x001F\x0004\x0000" + "\x0002\x0022\x0007\x0000\x0001\x0022\x0004\x0000\x0002\x0032\x0007\x0000\x0001\x0032\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0033\x0004\x0000\x0002\x0032\x0001\x0000\x0001\x002E\x0002\x0000" + "\x0001\x002E\x0002\x0000\x0001\x0032\x0004\x0000\x0002\x0014\x
 0001\x0000\x0001\x0031\x0001\x0000" + 
+            "\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014\x0003\x0000";
+        
+        private static int[] ZzUnpackTrans()
+        {
+            int[] result = new int[658];
+            int offset = 0;
+            offset = ZzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+            return result;
+        }
+        
+        private static int ZzUnpackTrans(System.String packed, int offset, int[] result)
+        {
+            int i = 0; /* index in packed string  */
+            int j = offset; /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value_Renamed = packed[i++];
+                value_Renamed--;
+                do 
+                    result[j++] = value_Renamed;
+                while (--count > 0);
+            }
+            return j;
+        }
+        
+        
+        /* error codes */
+        private const int ZZ_UNKNOWN_ERROR = 0;
+        private const int ZZ_NO_MATCH = 1;
+        private const int ZZ_PUSHBACK_2BIG = 2;
+        
+        /* error messages for the codes above */
+        private static readonly System.String[] ZZ_ERROR_MSG = new System.String[]{"Unkown internal scanner error", "Error: could not match input", "Error: pushback value was too large"};
+        
+        /// <summary> ZZ_ATTRIBUTE[aState] contains the attributes of state <c>aState</c></summary>
+        private static readonly int[] ZZ_ATTRIBUTE = ZzUnpackAttribute();
+        
+        private const System.String ZZ_ATTRIBUTE_PACKED_0 = "\x0001\x0000\x0001\x0009\x0003\x0001\x0001\x0009\x0001\x0001\x000B\x0000\x0004\x0001\x0002\x0000" + "\x0001\x0001\x0001\x0000\x000F\x0001\x0001\x0000\x0001\x0001\x0003\x0000\x0005\x0001";
+        
+        private static int[] ZzUnpackAttribute()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = ZzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+            return result;
+        }
+        
+        private static int ZzUnpackAttribute(System.String packed, int offset, int[] result)
+        {
+            int i = 0; /* index in packed string  */
+            int j = offset; /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value_Renamed = packed[i++];
+                do 
+                    result[j++] = value_Renamed;
+                while (--count > 0);
+            }
+            return j;
+        }
+        
+        /// <summary>the input device </summary>
+        private System.IO.TextReader zzReader;
+        
+        /// <summary>the current state of the DFA </summary>
+        private int zzState;
+        
+        /// <summary>the current lexical state </summary>
+        private int zzLexicalState = YYINITIAL;
+        
+        /// <summary>this buffer contains the current text to be matched and is
+        /// the source of the yytext() string 
+        /// </summary>
+        private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+        
+        /// <summary>the textposition at the last accepting state </summary>
+        private int zzMarkedPos;
+        
+        /// <summary>the textposition at the last state to be included in yytext </summary>
+        private int zzPushbackPos;
+        
+        /// <summary>the current text position in the buffer </summary>
+        private int zzCurrentPos;
+        
+        /// <summary>startRead marks the beginning of the yytext() string in the buffer </summary>
+        private int zzStartRead;
+        
+        /// <summary>endRead marks the last character in the buffer, that has been read
+        /// from input 
+        /// </summary>
+        private int zzEndRead;
+        
+        /// <summary>number of newlines encountered up to the start of the matched text </summary>
+        private int yyline;
+        
+        /// <summary>the number of characters up to the start of the matched text </summary>
+        private int yychar;
+        
+        /// <summary> the number of characters from the last newline up to the start of the 
+        /// matched text
+        /// </summary>
+        private int yycolumn;
 
         /// <summary> zzAtBOL == true &lt;=&gt; the scanner is currently at the beginning of a line</summary>
-		private bool zzAtBOL = true;
+        private bool zzAtBOL = true;
 
         /// <summary>zzAtEOF == true &lt;=&gt; the scanner is at the EOF </summary>
-		private bool zzAtEOF;
-		
-		/* user code: */
-		
-		public static readonly int ALPHANUM;
-		public static readonly int APOSTROPHE;
-		public static readonly int ACRONYM;
-		public static readonly int COMPANY;
-		public static readonly int EMAIL;
-		public static readonly int HOST;
-		public static readonly int NUM;
-		public static readonly int CJ;
-		/// <deprecated> this solves a bug where HOSTs that end with '.' are identified
-		/// as ACRONYMs.
-		/// </deprecated>
+        private bool zzAtEOF;
+        
+        /* user code: */
+        
+        public static readonly int ALPHANUM;
+        public static readonly int APOSTROPHE;
+        public static readonly int ACRONYM;
+        public static readonly int COMPANY;
+        public static readonly int EMAIL;
+        public static readonly int HOST;
+        public static readonly int NUM;
+        public static readonly int CJ;
+        /// <deprecated> this solves a bug where HOSTs that end with '.' are identified
+        /// as ACRONYMs.
+        /// </deprecated>
         [Obsolete("this solves a bug where HOSTs that end with '.' are identified as ACRONYMs")]
-		public static readonly int ACRONYM_DEP;
-		
-		public static readonly System.String[] TOKEN_TYPES;
-		
-		public int Yychar()
-		{
-			return yychar;
-		}
+        public static readonly int ACRONYM_DEP;
+        
+        public static readonly System.String[] TOKEN_TYPES;
+        
+        public int Yychar()
+        {
+            return yychar;
+        }
 
         /*
         * Resets the Tokenizer to a new Reader.
@@ -270,438 +270,438 @@ namespace Lucene.Net.Analysis.Standard
             }
             Yyreset(r);
         }
-		
-		/// <summary> Fills Lucene token with the current token text.</summary>
-		internal void  GetText(Token t)
-		{
-			t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
-		}
-		
-		/// <summary> Fills TermAttribute with the current token text.</summary>
-		internal void  GetText(ITermAttribute t)
-		{
-			t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
-		}
-		
-		
-		/// <summary> Creates a new scanner
-		/// There is also a java.io.InputStream version of this constructor.
-		/// 
-		/// </summary>
+        
+        /// <summary> Fills Lucene token with the current token text.</summary>
+        internal void  GetText(Token t)
+        {
+            t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+        
+        /// <summary> Fills TermAttribute with the current token text.</summary>
+        internal void  GetText(ITermAttribute t)
+        {
+            t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+        
+        
+        /// <summary> Creates a new scanner
+        /// There is also a java.io.InputStream version of this constructor.
+        /// 
+        /// </summary>
         /// <param name="in_Renamed"> the java.io.Reader to read input from.
-		/// </param>
-		internal StandardTokenizerImpl(System.IO.TextReader in_Renamed)
-		{
-			this.zzReader = in_Renamed;
-		}
-		
-		/// <summary> Creates a new scanner.
-		/// There is also java.io.Reader version of this constructor.
-		/// 
-		/// </summary>
+        /// </param>
+        internal StandardTokenizerImpl(System.IO.TextReader in_Renamed)
+        {
+            this.zzReader = in_Renamed;
+        }
+        
+        /// <summary> Creates a new scanner.
+        /// There is also java.io.Reader version of this constructor.
+        /// 
+        /// </summary>
         /// <param name="in_Renamed"> the java.io.Inputstream to read input from.
-		/// </param>
-		internal StandardTokenizerImpl(System.IO.Stream in_Renamed):this(new System.IO.StreamReader(in_Renamed, System.Text.Encoding.Default))
-		{
-		}
-		
-		/// <summary> Unpacks the compressed character translation table.
-		/// 
-		/// </summary>
-		/// <param name="packed">  the packed character translation table
-		/// </param>
-		/// <returns>         the unpacked character translation table
-		/// </returns>
-		private static char[] ZzUnpackCMap(System.String packed)
-		{
-			char[] map = new char[0x10000];
-			int i = 0; /* index in packed string  */
-			int j = 0; /* index in unpacked array */
-			while (i < 1154)
-			{
-				int count = packed[i++];
-				char value_Renamed = packed[i++];
-				do 
-					map[j++] = value_Renamed;
-				while (--count > 0);
-			}
-			return map;
-		}
-		
-		
-		/// <summary> Refills the input buffer.
-		/// </summary>
-		/// <returns><c>false</c>, iff there was new input.
-		/// 
-		/// </returns>
-		/// <exception cref="System.IO.IOException"> if any I/O-Error occurs
-		/// </exception>
-		private bool ZzRefill()
-		{
-			
-			/* first: make room (if you can) */
-			if (zzStartRead > 0)
-			{
-				Array.Copy(zzBuffer, zzStartRead, zzBuffer, 0, zzEndRead - zzStartRead);
-				
-				/* translate stored positions */
-				zzEndRead -= zzStartRead;
-				zzCurrentPos -= zzStartRead;
-				zzMarkedPos -= zzStartRead;
-				zzPushbackPos -= zzStartRead;
-				zzStartRead = 0;
-			}
-			
-			/* is the buffer big enough? */
-			if (zzCurrentPos >= zzBuffer.Length)
-			{
-				/* if not: blow it up */
-				char[] newBuffer = new char[zzCurrentPos * 2];
-				Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
-				zzBuffer = newBuffer;
-			}
-			
-			/* finally: fill the buffer with new input */
-			int numRead = zzReader.Read(zzBuffer, zzEndRead, zzBuffer.Length - zzEndRead);
-			
-			if (numRead <= 0)
-			{
-				return true;
-			}
-			else
-			{
-				zzEndRead += numRead;
-				return false;
-			}
-		}
-		
-		
-		/// <summary> Closes the input stream.</summary>
-		public void  Yyclose()
-		{
-			zzAtEOF = true; /* indicate end of file */
-			zzEndRead = zzStartRead; /* invalidate buffer    */
-			
-			if (zzReader != null)
-				zzReader.Close();
-		}
-		
-		
-		/// <summary> Resets the scanner to read from a new input stream.
-		/// Does not close the old reader.
-		/// 
-		/// All internal variables are reset, the old input stream 
-		/// <b>cannot</b> be reused (internal buffer is discarded and lost).
-		/// Lexical state is set to <tt>ZZ_INITIAL</tt>.
-		/// 
-		/// </summary>
-		/// <param name="reader">  the new input stream 
-		/// </param>
-		public void  Yyreset(System.IO.TextReader reader)
-		{
-			zzReader = reader;
-			zzAtBOL = true;
-			zzAtEOF = false;
-			zzEndRead = zzStartRead = 0;
-			zzCurrentPos = zzMarkedPos = zzPushbackPos = 0;
-			yyline = yychar = yycolumn = 0;
-			zzLexicalState = YYINITIAL;
-		}
-		
-		
-		/// <summary> Returns the current lexical state.</summary>
-		public int Yystate()
-		{
-			return zzLexicalState;
-		}
-		
-		
-		/// <summary> Enters a new lexical state
-		/// 
-		/// </summary>
-		/// <param name="newState">the new lexical state
-		/// </param>
-		public void  Yybegin(int newState)
-		{
-			zzLexicalState = newState;
-		}
-		
-		
-		/// <summary> Returns the text matched by the current regular expression.</summary>
-		public System.String Yytext()
-		{
-			return new System.String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
-		}
-		
-		
-		/// <summary> Returns the character at position <tt>pos</tt> from the 
-		/// matched text. 
-		/// 
-		/// It is equivalent to yytext().charAt(pos), but faster
-		/// 
-		/// </summary>
-		/// <param name="pos">the position of the character to fetch. 
-		/// A value from 0 to yylength()-1.
-		/// 
-		/// </param>
-		/// <returns> the character at position pos
-		/// </returns>
-		public char Yycharat(int pos)
-		{
-			return zzBuffer[zzStartRead + pos];
-		}
-		
-		
-		/// <summary> Returns the length of the matched text region.</summary>
-		public int Yylength()
-		{
-			return zzMarkedPos - zzStartRead;
-		}
-		
-		
-		/// <summary> Reports an error that occured while scanning.
-		/// 
-		/// In a wellformed scanner (no or only correct usage of 
-		/// yypushback(int) and a match-all fallback rule) this method 
-		/// will only be called with things that "Can't Possibly Happen".
-		/// If this method is called, something is seriously wrong
-		/// (e.g. a JFlex bug producing a faulty scanner etc.).
-		/// 
-		/// Usual syntax/scanner level error handling should be done
-		/// in error fallback rules.
-		/// 
-		/// </summary>
-		/// <param name="errorCode"> the code of the errormessage to display
-		/// </param>
-		private void  ZzScanError(int errorCode)
-		{
-			System.String message;
-			try
-			{
-				message = ZZ_ERROR_MSG[errorCode];
-			}
-			catch (System.IndexOutOfRangeException)
-			{
-				message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
-			}
-			
-			throw new System.ApplicationException(message);
-		}
-		
-		
-		/// <summary> Pushes the specified amount of characters back into the input stream.
-		/// 
-		/// They will be read again by then next call of the scanning method
-		/// 
-		/// </summary>
-		/// <param name="number"> the number of characters to be read again.
-		/// This number must not be greater than yylength()!
-		/// </param>
-		public virtual void  Yypushback(int number)
-		{
-			if (number > Yylength())
-				ZzScanError(ZZ_PUSHBACK_2BIG);
-			
-			zzMarkedPos -= number;
-		}
-		
-		
-		/// <summary> Resumes scanning until the next regular expression is matched,
-		/// the end of input is encountered or an I/O-Error occurs.
-		/// 
-		/// </summary>
-		/// <returns>      the next token
-		/// </returns>
-		/// <exception cref="System.IO.IOException"> if any I/O-Error occurs
-		/// </exception>
-		public virtual int GetNextToken()
-		{
-			int zzInput;
-			int zzAction;
-			
-			// cached fields:
-			int zzCurrentPosL;
-			int zzMarkedPosL;
-			int zzEndReadL = zzEndRead;
-			char[] zzBufferL = zzBuffer;
-			char[] zzCMapL = ZZ_CMAP;
-			
-			int[] zzTransL = ZZ_TRANS;
-			int[] zzRowMapL = ZZ_ROWMAP;
-			int[] zzAttrL = ZZ_ATTRIBUTE;
-			
-			while (true)
-			{
-				zzMarkedPosL = zzMarkedPos;
-				
-				yychar += zzMarkedPosL - zzStartRead;
-				
-				zzAction = - 1;
-				
-				zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
-				
-				zzState = zzLexicalState;
-				
-				
-				{
-					while (true)
-					{
-						
-						if (zzCurrentPosL < zzEndReadL)
-							zzInput = zzBufferL[zzCurrentPosL++];
-						else if (zzAtEOF)
-						{
-							zzInput = YYEOF;
-							goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
-						}
-						else
-						{
-							// store back cached positions
-							zzCurrentPos = zzCurrentPosL;
-							zzMarkedPos = zzMarkedPosL;
-							bool eof = ZzRefill();
-							// get translated positions and possibly new buffer
-							zzCurrentPosL = zzCurrentPos;
-							zzMarkedPosL = zzMarkedPos;
-							zzBufferL = zzBuffer;
-							zzEndReadL = zzEndRead;
-							if (eof)
-							{
-								zzInput = YYEOF;
-								goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
-							}
-							else
-							{
-								zzInput = zzBufferL[zzCurrentPosL++];
-							}
-						}
-						int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
-						if (zzNext == - 1)
-						{
-							goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
-						}
-						zzState = zzNext;
-						
-						int zzAttributes = zzAttrL[zzState];
-						if ((zzAttributes & 1) == 1)
-						{
-							zzAction = zzState;
-							zzMarkedPosL = zzCurrentPosL;
-							if ((zzAttributes & 8) == 8)
-							{
-								goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
-							}
-						}
-					}
-				}
+        /// </param>
+        internal StandardTokenizerImpl(System.IO.Stream in_Renamed):this(new System.IO.StreamReader(in_Renamed, System.Text.Encoding.Default))
+        {
+        }
+        
+        /// <summary> Unpacks the compressed character translation table.
+        /// 
+        /// </summary>
+        /// <param name="packed">  the packed character translation table
+        /// </param>
+        /// <returns>         the unpacked character translation table
+        /// </returns>
+        private static char[] ZzUnpackCMap(System.String packed)
+        {
+            char[] map = new char[0x10000];
+            int i = 0; /* index in packed string  */
+            int j = 0; /* index in unpacked array */
+            while (i < 1154)
+            {
+                int count = packed[i++];
+                char value_Renamed = packed[i++];
+                do 
+                    map[j++] = value_Renamed;
+                while (--count > 0);
+            }
+            return map;
+        }
+        
+        
+        /// <summary> Refills the input buffer.
+        /// </summary>
+        /// <returns><c>false</c>, iff there was new input.
+        /// 
+        /// </returns>
+        /// <exception cref="System.IO.IOException"> if any I/O-Error occurs
+        /// </exception>
+        private bool ZzRefill()
+        {
+            
+            /* first: make room (if you can) */
+            if (zzStartRead > 0)
+            {
+                Array.Copy(zzBuffer, zzStartRead, zzBuffer, 0, zzEndRead - zzStartRead);
+                
+                /* translate stored positions */
+                zzEndRead -= zzStartRead;
+                zzCurrentPos -= zzStartRead;
+                zzMarkedPos -= zzStartRead;
+                zzPushbackPos -= zzStartRead;
+                zzStartRead = 0;
+            }
+            
+            /* is the buffer big enough? */
+            if (zzCurrentPos >= zzBuffer.Length)
+            {
+                /* if not: blow it up */
+                char[] newBuffer = new char[zzCurrentPos * 2];
+                Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                zzBuffer = newBuffer;
+            }
+            
+            /* finally: fill the buffer with new input */
+            int numRead = zzReader.Read(zzBuffer, zzEndRead, zzBuffer.Length - zzEndRead);
+            
+            if (numRead <= 0)
+            {
+                return true;
+            }
+            else
+            {
+                zzEndRead += numRead;
+                return false;
+            }
+        }
+        
+        
+        /// <summary> Closes the input stream.</summary>
+        public void  Yyclose()
+        {
+            zzAtEOF = true; /* indicate end of file */
+            zzEndRead = zzStartRead; /* invalidate buffer    */
+            
+            if (zzReader != null)
+                zzReader.Close();
+        }
+        
+        
+        /// <summary> Resets the scanner to read from a new input stream.
+        /// Does not close the old reader.
+        /// 
+        /// All internal variables are reset, the old input stream 
+        /// <b>cannot</b> be reused (internal buffer is discarded and lost).
+        /// Lexical state is set to <tt>ZZ_INITIAL</tt>.
+        /// 
+        /// </summary>
+        /// <param name="reader">  the new input stream 
+        /// </param>
+        public void  Yyreset(System.IO.TextReader reader)
+        {
+            zzReader = reader;
+            zzAtBOL = true;
+            zzAtEOF = false;
+            zzEndRead = zzStartRead = 0;
+            zzCurrentPos = zzMarkedPos = zzPushbackPos = 0;
+            yyline = yychar = yycolumn = 0;
+            zzLexicalState = YYINITIAL;
+        }
+        
+        
+        /// <summary> Returns the current lexical state.</summary>
+        public int Yystate()
+        {
+            return zzLexicalState;
+        }
+        
+        
+        /// <summary> Enters a new lexical state
+        /// 
+        /// </summary>
+        /// <param name="newState">the new lexical state
+        /// </param>
+        public void  Yybegin(int newState)
+        {
+            zzLexicalState = newState;
+        }
+        
+        
+        /// <summary> Returns the text matched by the current regular expression.</summary>
+        public System.String Yytext()
+        {
+            return new System.String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+        
+        
+        /// <summary> Returns the character at position <tt>pos</tt> from the 
+        /// matched text. 
+        /// 
+        /// It is equivalent to yytext().charAt(pos), but faster
+        /// 
+        /// </summary>
+        /// <param name="pos">the position of the character to fetch. 
+        /// A value from 0 to yylength()-1.
+        /// 
+        /// </param>
+        /// <returns> the character at position pos
+        /// </returns>
+        public char Yycharat(int pos)
+        {
+            return zzBuffer[zzStartRead + pos];
+        }
+        
+        
+        /// <summary> Returns the length of the matched text region.</summary>
+        public int Yylength()
+        {
+            return zzMarkedPos - zzStartRead;
+        }
+        
+        
+        /// <summary> Reports an error that occured while scanning.
+        /// 
+        /// In a wellformed scanner (no or only correct usage of 
+        /// yypushback(int) and a match-all fallback rule) this method 
+        /// will only be called with things that "Can't Possibly Happen".
+        /// If this method is called, something is seriously wrong
+        /// (e.g. a JFlex bug producing a faulty scanner etc.).
+        /// 
+        /// Usual syntax/scanner level error handling should be done
+        /// in error fallback rules.
+        /// 
+        /// </summary>
+        /// <param name="errorCode"> the code of the errormessage to display
+        /// </param>
+        private void  ZzScanError(int errorCode)
+        {
+            System.String message;
+            try
+            {
+                message = ZZ_ERROR_MSG[errorCode];
+            }
+            catch (System.IndexOutOfRangeException)
+            {
+                message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+            }
+            
+            throw new System.ApplicationException(message);
+        }
+        
+        
+        /// <summary> Pushes the specified amount of characters back into the input stream.
+        /// 
+        /// They will be read again by then next call of the scanning method
+        /// 
+        /// </summary>
+        /// <param name="number"> the number of characters to be read again.
+        /// This number must not be greater than yylength()!
+        /// </param>
+        public virtual void  Yypushback(int number)
+        {
+            if (number > Yylength())
+                ZzScanError(ZZ_PUSHBACK_2BIG);
+            
+            zzMarkedPos -= number;
+        }
+        
+        
+        /// <summary> Resumes scanning until the next regular expression is matched,
+        /// the end of input is encountered or an I/O-Error occurs.
+        /// 
+        /// </summary>
+        /// <returns>      the next token
+        /// </returns>
+        /// <exception cref="System.IO.IOException"> if any I/O-Error occurs
+        /// </exception>
+        public virtual int GetNextToken()
+        {
+            int zzInput;
+            int zzAction;
+            
+            // cached fields:
+            int zzCurrentPosL;
+            int zzMarkedPosL;
+            int zzEndReadL = zzEndRead;
+            char[] zzBufferL = zzBuffer;
+            char[] zzCMapL = ZZ_CMAP;
+            
+            int[] zzTransL = ZZ_TRANS;
+            int[] zzRowMapL = ZZ_ROWMAP;
+            int[] zzAttrL = ZZ_ATTRIBUTE;
+            
+            while (true)
+            {
+                zzMarkedPosL = zzMarkedPos;
+                
+                yychar += zzMarkedPosL - zzStartRead;
+                
+                zzAction = - 1;
+                
+                zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+                
+                zzState = zzLexicalState;
+                
+                
+                {
+                    while (true)
+                    {
+                        
+                        if (zzCurrentPosL < zzEndReadL)
+                            zzInput = zzBufferL[zzCurrentPosL++];
+                        else if (zzAtEOF)
+                        {
+                            zzInput = YYEOF;
+                            goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                        }
+                        else
+                        {
+                            // store back cached positions
+                            zzCurrentPos = zzCurrentPosL;
+                            zzMarkedPos = zzMarkedPosL;
+                            bool eof = ZzRefill();
+                            // get translated positions and possibly new buffer
+                            zzCurrentPosL = zzCurrentPos;
+                            zzMarkedPosL = zzMarkedPos;
+                            zzBufferL = zzBuffer;
+                            zzEndReadL = zzEndRead;
+                            if (eof)
+                            {
+                                zzInput = YYEOF;
+                                goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                            }
+                            else
+                            {
+                                zzInput = zzBufferL[zzCurrentPosL++];
+                            }
+                        }
+                        int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                        if (zzNext == - 1)
+                        {
+                            goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                        }
+                        zzState = zzNext;
+                        
+                        int zzAttributes = zzAttrL[zzState];
+                        if ((zzAttributes & 1) == 1)
+                        {
+                            zzAction = zzState;
+                            zzMarkedPosL = zzCurrentPosL;
+                            if ((zzAttributes & 8) == 8)
+                            {
+                                goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                            }
+                        }
+                    }
+                }
 
 zzForAction_brk: ;  // {{Aroush-2.9}} this 'lable' maybe in the wrong place
-				
-				
-				// store back cached position
-				zzMarkedPos = zzMarkedPosL;
-				
-				switch (zzAction < 0?zzAction:ZZ_ACTION[zzAction])
-				{
-					
-					case 4: 
-					{
-						return HOST;
-					}
-					
-					case 11:  break;
-					
-					case 9: 
-					{
-						return ACRONYM;
-					}
-					
-					case 12:  break;
-					
-					case 8: 
-					{
-						return ACRONYM_DEP;
-					}
-					
-					case 13:  break;
-					
-					case 1: 
-						{
-							/* ignore */
-						}
-						goto case 14;
-					
-					case 14:  break;
-					
-					case 5: 
-					{
-						return NUM;
-					}
-					
-					case 15:  break;
-					
-					case 3: 
-					{
-						return CJ;
-					}
-					
-					case 16:  break;
-					
-					case 2: 
-					{
-						return ALPHANUM;
-					}
-					
-					case 17:  break;
-					
-					case 7: 
-					{
-						return COMPANY;
-					}
-					
-					case 18:  break;
-					
-					case 6: 
-					{
-						return APOSTROPHE;
-					}
-					
-					case 19:  break;
-					
-					case 10: 
-					{
-						return EMAIL;
-					}
-					
-					case 20:  break;
-					
-					default: 
-						if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
-						{
-							zzAtEOF = true;
-							return YYEOF;
-						}
-						else
-						{
-							ZzScanError(ZZ_NO_MATCH);
-						}
-						break;
-					
-				}
-			}
-		}
-		static StandardTokenizerImpl()
-		{
-			ALPHANUM = StandardTokenizer.ALPHANUM;
-			APOSTROPHE = StandardTokenizer.APOSTROPHE;
-			ACRONYM = StandardTokenizer.ACRONYM;
-			COMPANY = StandardTokenizer.COMPANY;
-			EMAIL = StandardTokenizer.EMAIL;
-			HOST = StandardTokenizer.HOST;
-			NUM = StandardTokenizer.NUM;
-			CJ = StandardTokenizer.CJ;
-			ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
-			TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;
-		}
-	}
+                
+                
+                // store back cached position
+                zzMarkedPos = zzMarkedPosL;
+                
+                switch (zzAction < 0?zzAction:ZZ_ACTION[zzAction])
+                {
+                    
+                    case 4: 
+                    {
+                        return HOST;
+                    }
+                    
+                    case 11:  break;
+                    
+                    case 9: 
+                    {
+                        return ACRONYM;
+                    }
+                    
+                    case 12:  break;
+                    
+                    case 8: 
+                    {
+                        return ACRONYM_DEP;
+                    }
+                    
+                    case 13:  break;
+                    
+                    case 1: 
+                        {
+                            /* ignore */
+                        }
+                        goto case 14;
+                    
+                    case 14:  break;
+                    
+                    case 5: 
+                    {
+                        return NUM;
+                    }
+                    
+                    case 15:  break;
+                    
+                    case 3: 
+                    {
+                        return CJ;
+                    }
+                    
+                    case 16:  break;
+                    
+                    case 2: 
+                    {
+                        return ALPHANUM;
+                    }
+                    
+                    case 17:  break;
+                    
+                    case 7: 
+                    {
+                        return COMPANY;
+                    }
+                    
+                    case 18:  break;
+                    
+                    case 6: 
+                    {
+                        return APOSTROPHE;
+                    }
+                    
+                    case 19:  break;
+                    
+                    case 10: 
+                    {
+                        return EMAIL;
+                    }
+                    
+                    case 20:  break;
+                    
+                    default: 
+                        if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                        {
+                            zzAtEOF = true;
+                            return YYEOF;
+                        }
+                        else
+                        {
+                            ZzScanError(ZZ_NO_MATCH);
+                        }
+                        break;
+                    
+                }
+            }
+        }
+        static StandardTokenizerImpl()
+        {
+            ALPHANUM = StandardTokenizer.ALPHANUM;
+            APOSTROPHE = StandardTokenizer.APOSTROPHE;
+            ACRONYM = StandardTokenizer.ACRONYM;
+            COMPANY = StandardTokenizer.COMPANY;
+            EMAIL = StandardTokenizer.EMAIL;
+            HOST = StandardTokenizer.HOST;
+            NUM = StandardTokenizer.NUM;
+            CJ = StandardTokenizer.CJ;
+            ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
+            TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;
+        }
+    }
 }
\ No newline at end of file


[10/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexReader.cs b/src/core/Index/IndexReader.cs
index 5c3bd9b..91427c0 100644
--- a/src/core/Index/IndexReader.cs
+++ b/src/core/Index/IndexReader.cs
@@ -25,712 +25,712 @@ using Similarity = Lucene.Net.Search.Similarity;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>IndexReader is an abstract class, providing an interface for accessing an
-	/// index.  Search of an index is done entirely through this abstract interface,
-	/// so that any subclass which implements it is searchable.
-	/// <p/> Concrete subclasses of IndexReader are usually constructed with a call to
-	/// one of the static <c>open()</c> methods, e.g. <see cref="Open(Lucene.Net.Store.Directory, bool)" />
-	///.
-	/// <p/> For efficiency, in this API documents are often referred to via
-	/// <i>document numbers</i>, non-negative integers which each name a unique
-	/// document in the index.  These document numbers are ephemeral--they may change
-	/// as documents are added to and deleted from an index.  Clients should thus not
-	/// rely on a given document having the same number between sessions.
-	/// <p/> An IndexReader can be opened on a directory for which an IndexWriter is
-	/// opened already, but it cannot be used to delete documents from the index then.
-	/// <p/>
-	/// <b>NOTE</b>: for backwards API compatibility, several methods are not listed 
-	/// as abstract, but have no useful implementations in this base class and 
-	/// instead always throw UnsupportedOperationException.  Subclasses are 
-	/// strongly encouraged to override these methods, but in many cases may not 
-	/// need to.
-	/// <p/>
-	/// <p/>
-	/// <b>NOTE</b>: as of 2.4, it's possible to open a read-only
-	/// IndexReader using the static open methods that accepts the
-	/// boolean readOnly parameter.  Such a reader has better
-	/// better concurrency as it's not necessary to synchronize on the
-	/// isDeleted method.  You must explicitly specify false
-	/// if you want to make changes with the resulting IndexReader.
-	/// <p/>
-	/// <a name="thread-safety"></a><p/><b>NOTE</b>: <see cref="IndexReader" />
-	/// instances are completely thread
-	/// safe, meaning multiple threads can call any of its methods,
-	/// concurrently.  If your application requires external
-	/// synchronization, you should <b>not</b> synchronize on the
-	/// <c>IndexReader</c> instance; use your own
-	/// (non-Lucene) objects instead.
-	/// </summary>
-	public abstract class IndexReader : System.ICloneable, System.IDisposable
-	{
-		private class AnonymousClassFindSegmentsFile : SegmentInfos.FindSegmentsFile
-		{
-			private void  InitBlock(Lucene.Net.Store.Directory directory2)
-			{
-				this.directory2 = directory2;
-			}
-			private Lucene.Net.Store.Directory directory2;
-			internal AnonymousClassFindSegmentsFile(Lucene.Net.Store.Directory directory2, Lucene.Net.Store.Directory Param1):base(Param1)
-			{
-				InitBlock(directory2);
-			}
-			public override System.Object DoBody(System.String segmentFileName)
-			{
-				return (long) directory2.FileModified(segmentFileName);
-			}
-		}
-		
-		/// <summary> Constants describing field properties, for example used for
-		/// <see cref="IndexReader.GetFieldNames(FieldOption)" />.
-		/// </summary>
-		public sealed class FieldOption
-		{
-			private readonly System.String option;
-			internal FieldOption()
-			{
-			}
-			internal FieldOption(System.String option)
-			{
-				this.option = option;
-			}
-			public override System.String ToString()
-			{
-				return this.option;
-			}
-			/// <summary>All fields </summary>
-			public static readonly FieldOption ALL = new FieldOption("ALL");
-			/// <summary>All indexed fields </summary>
-			public static readonly FieldOption INDEXED = new FieldOption("INDEXED");
-			/// <summary>All fields that store payloads </summary>
-			public static readonly FieldOption STORES_PAYLOADS = new FieldOption("STORES_PAYLOADS");
-			/// <summary>All fields that omit tf </summary>
-			public static readonly FieldOption OMIT_TERM_FREQ_AND_POSITIONS = new FieldOption("OMIT_TERM_FREQ_AND_POSITIONS");
-			/// <summary>All fields which are not indexed </summary>
-			public static readonly FieldOption UNINDEXED = new FieldOption("UNINDEXED");
-			/// <summary>All fields which are indexed with termvectors enabled </summary>
-			public static readonly FieldOption INDEXED_WITH_TERMVECTOR = new FieldOption("INDEXED_WITH_TERMVECTOR");
-			/// <summary>All fields which are indexed but don't have termvectors enabled </summary>
-			public static readonly FieldOption INDEXED_NO_TERMVECTOR = new FieldOption("INDEXED_NO_TERMVECTOR");
-			/// <summary>All fields with termvectors enabled. Please note that only standard termvector fields are returned </summary>
-			public static readonly FieldOption TERMVECTOR = new FieldOption("TERMVECTOR");
-			/// <summary>All fields with termvectors with position values enabled </summary>
-			public static readonly FieldOption TERMVECTOR_WITH_POSITION = new FieldOption("TERMVECTOR_WITH_POSITION");
-			/// <summary>All fields with termvectors with offset values enabled </summary>
-			public static readonly FieldOption TERMVECTOR_WITH_OFFSET = new FieldOption("TERMVECTOR_WITH_OFFSET");
-			/// <summary>All fields with termvectors with offset values and position values enabled </summary>
-			public static readonly FieldOption TERMVECTOR_WITH_POSITION_OFFSET = new FieldOption("TERMVECTOR_WITH_POSITION_OFFSET");
-		}
-		
-		private bool closed;
-		protected internal bool hasChanges;
-		
-		private int refCount;
-		
-		protected internal static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
+    
+    /// <summary>IndexReader is an abstract class, providing an interface for accessing an
+    /// index.  Search of an index is done entirely through this abstract interface,
+    /// so that any subclass which implements it is searchable.
+    /// <p/> Concrete subclasses of IndexReader are usually constructed with a call to
+    /// one of the static <c>open()</c> methods, e.g. <see cref="Open(Lucene.Net.Store.Directory, bool)" />
+    ///.
+    /// <p/> For efficiency, in this API documents are often referred to via
+    /// <i>document numbers</i>, non-negative integers which each name a unique
+    /// document in the index.  These document numbers are ephemeral--they may change
+    /// as documents are added to and deleted from an index.  Clients should thus not
+    /// rely on a given document having the same number between sessions.
+    /// <p/> An IndexReader can be opened on a directory for which an IndexWriter is
+    /// opened already, but it cannot be used to delete documents from the index then.
+    /// <p/>
+    /// <b>NOTE</b>: for backwards API compatibility, several methods are not listed 
+    /// as abstract, but have no useful implementations in this base class and 
+    /// instead always throw UnsupportedOperationException.  Subclasses are 
+    /// strongly encouraged to override these methods, but in many cases may not 
+    /// need to.
+    /// <p/>
+    /// <p/>
+    /// <b>NOTE</b>: as of 2.4, it's possible to open a read-only
+    /// IndexReader using the static open methods that accepts the
+    /// boolean readOnly parameter.  Such a reader has better
+    /// better concurrency as it's not necessary to synchronize on the
+    /// isDeleted method.  You must explicitly specify false
+    /// if you want to make changes with the resulting IndexReader.
+    /// <p/>
+    /// <a name="thread-safety"></a><p/><b>NOTE</b>: <see cref="IndexReader" />
+    /// instances are completely thread
+    /// safe, meaning multiple threads can call any of its methods,
+    /// concurrently.  If your application requires external
+    /// synchronization, you should <b>not</b> synchronize on the
+    /// <c>IndexReader</c> instance; use your own
+    /// (non-Lucene) objects instead.
+    /// </summary>
+    public abstract class IndexReader : System.ICloneable, System.IDisposable
+    {
+        private class AnonymousClassFindSegmentsFile : SegmentInfos.FindSegmentsFile
+        {
+            private void  InitBlock(Lucene.Net.Store.Directory directory2)
+            {
+                this.directory2 = directory2;
+            }
+            private Lucene.Net.Store.Directory directory2;
+            internal AnonymousClassFindSegmentsFile(Lucene.Net.Store.Directory directory2, Lucene.Net.Store.Directory Param1):base(Param1)
+            {
+                InitBlock(directory2);
+            }
+            public override System.Object DoBody(System.String segmentFileName)
+            {
+                return (long) directory2.FileModified(segmentFileName);
+            }
+        }
+        
+        /// <summary> Constants describing field properties, for example used for
+        /// <see cref="IndexReader.GetFieldNames(FieldOption)" />.
+        /// </summary>
+        public sealed class FieldOption
+        {
+            private readonly System.String option;
+            internal FieldOption()
+            {
+            }
+            internal FieldOption(System.String option)
+            {
+                this.option = option;
+            }
+            public override System.String ToString()
+            {
+                return this.option;
+            }
+            /// <summary>All fields </summary>
+            public static readonly FieldOption ALL = new FieldOption("ALL");
+            /// <summary>All indexed fields </summary>
+            public static readonly FieldOption INDEXED = new FieldOption("INDEXED");
+            /// <summary>All fields that store payloads </summary>
+            public static readonly FieldOption STORES_PAYLOADS = new FieldOption("STORES_PAYLOADS");
+            /// <summary>All fields that omit tf </summary>
+            public static readonly FieldOption OMIT_TERM_FREQ_AND_POSITIONS = new FieldOption("OMIT_TERM_FREQ_AND_POSITIONS");
+            /// <summary>All fields which are not indexed </summary>
+            public static readonly FieldOption UNINDEXED = new FieldOption("UNINDEXED");
+            /// <summary>All fields which are indexed with termvectors enabled </summary>
+            public static readonly FieldOption INDEXED_WITH_TERMVECTOR = new FieldOption("INDEXED_WITH_TERMVECTOR");
+            /// <summary>All fields which are indexed but don't have termvectors enabled </summary>
+            public static readonly FieldOption INDEXED_NO_TERMVECTOR = new FieldOption("INDEXED_NO_TERMVECTOR");
+            /// <summary>All fields with termvectors enabled. Please note that only standard termvector fields are returned </summary>
+            public static readonly FieldOption TERMVECTOR = new FieldOption("TERMVECTOR");
+            /// <summary>All fields with termvectors with position values enabled </summary>
+            public static readonly FieldOption TERMVECTOR_WITH_POSITION = new FieldOption("TERMVECTOR_WITH_POSITION");
+            /// <summary>All fields with termvectors with offset values enabled </summary>
+            public static readonly FieldOption TERMVECTOR_WITH_OFFSET = new FieldOption("TERMVECTOR_WITH_OFFSET");
+            /// <summary>All fields with termvectors with offset values and position values enabled </summary>
+            public static readonly FieldOption TERMVECTOR_WITH_POSITION_OFFSET = new FieldOption("TERMVECTOR_WITH_POSITION_OFFSET");
+        }
+        
+        private bool closed;
+        protected internal bool hasChanges;
+        
+        private int refCount;
+        
+        protected internal static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
 
-	    /// <summary>Expert: returns the current refCount for this reader </summary>
-	    public virtual int RefCount
-	    {
-	        get
-	        {
-	            lock (this)
-	            {
-	                return refCount;
-	            }
-	        }
-	    }
+        /// <summary>Expert: returns the current refCount for this reader </summary>
+        public virtual int RefCount
+        {
+            get
+            {
+                lock (this)
+                {
+                    return refCount;
+                }
+            }
+        }
 
-	    /// <summary> Expert: increments the refCount of this IndexReader
-		/// instance.  RefCounts are used to determine when a
-		/// reader can be closed safely, i.e. as soon as there are
-		/// no more references.  Be sure to always call a
-		/// corresponding <see cref="DecRef" />, in a finally clause;
-		/// otherwise the reader may never be closed.  Note that
-		/// <see cref="Close" /> simply calls decRef(), which means that
-		/// the IndexReader will not really be closed until <see cref="DecRef" />
-		/// has been called for all outstanding
-		/// references.
-		/// 
-		/// </summary>
-		/// <seealso cref="DecRef">
-		/// </seealso>
-		public virtual void  IncRef()
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(refCount > 0);
-				EnsureOpen();
-				refCount++;
-			}
-		}
-		
-		/// <summary> Expert: decreases the refCount of this IndexReader
-		/// instance.  If the refCount drops to 0, then pending
-		/// changes (if any) are committed to the index and this
-		/// reader is closed.
-		/// 
-		/// </summary>
-		/// <throws>  IOException in case an IOException occurs in commit() or doClose() </throws>
-		/// <summary> 
-		/// </summary>
-		/// <seealso cref="IncRef">
-		/// </seealso>
-		public virtual void  DecRef()
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(refCount > 0);
-				EnsureOpen();
-				if (refCount == 1)
-				{
-					Commit();
-					DoClose();
-				}
-				refCount--;
-			}
-		}
-		
-		protected internal IndexReader()
-		{
-			refCount = 1;
-		}
-		
-		/// <throws>  AlreadyClosedException if this IndexReader is closed </throws>
+        /// <summary> Expert: increments the refCount of this IndexReader
+        /// instance.  RefCounts are used to determine when a
+        /// reader can be closed safely, i.e. as soon as there are
+        /// no more references.  Be sure to always call a
+        /// corresponding <see cref="DecRef" />, in a finally clause;
+        /// otherwise the reader may never be closed.  Note that
+        /// <see cref="Close" /> simply calls decRef(), which means that
+        /// the IndexReader will not really be closed until <see cref="DecRef" />
+        /// has been called for all outstanding
+        /// references.
+        /// 
+        /// </summary>
+        /// <seealso cref="DecRef">
+        /// </seealso>
+        public virtual void  IncRef()
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(refCount > 0);
+                EnsureOpen();
+                refCount++;
+            }
+        }
+        
+        /// <summary> Expert: decreases the refCount of this IndexReader
+        /// instance.  If the refCount drops to 0, then pending
+        /// changes (if any) are committed to the index and this
+        /// reader is closed.
+        /// 
+        /// </summary>
+        /// <throws>  IOException in case an IOException occurs in commit() or doClose() </throws>
+        /// <summary> 
+        /// </summary>
+        /// <seealso cref="IncRef">
+        /// </seealso>
+        public virtual void  DecRef()
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(refCount > 0);
+                EnsureOpen();
+                if (refCount == 1)
+                {
+                    Commit();
+                    DoClose();
+                }
+                refCount--;
+            }
+        }
+        
+        protected internal IndexReader()
+        {
+            refCount = 1;
+        }
+        
+        /// <throws>  AlreadyClosedException if this IndexReader is closed </throws>
         protected internal void EnsureOpen()
-		{
-		    if (refCount <= 0)
-		    {
-		        throw new AlreadyClosedException("this IndexReader is closed");
-		    }
-		}
-		
-		/// <summary>Returns an IndexReader reading the index in the given
-		/// Directory.  You should pass readOnly=true, since it
-		/// gives much better concurrent performance, unless you
-		/// intend to do write operations (delete documents or
-		/// change norms) with the reader.
-		/// </summary>
-		/// <param name="directory">the index directory</param>
+        {
+            if (refCount <= 0)
+            {
+                throw new AlreadyClosedException("this IndexReader is closed");
+            }
+        }
+        
+        /// <summary>Returns an IndexReader reading the index in the given
+        /// Directory.  You should pass readOnly=true, since it
+        /// gives much better concurrent performance, unless you
+        /// intend to do write operations (delete documents or
+        /// change norms) with the reader.
+        /// </summary>
+        /// <param name="directory">the index directory</param>
         /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader</param>
         /// <exception cref="CorruptIndexException">CorruptIndexException if the index is corrupt</exception>
         /// <exception cref="System.IO.IOException">IOException if there is a low-level IO error</exception>
-		public static IndexReader Open(Directory directory, bool readOnly)
-		{
-			return Open(directory, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
-		}
-		
-		/// <summary>Expert: returns an IndexReader reading the index in the given
-		/// <see cref="IndexCommit" />.  You should pass readOnly=true, since it
-		/// gives much better concurrent performance, unless you
-		/// intend to do write operations (delete documents or
-		/// change norms) with the reader.
-		/// </summary>
-		/// <param name="commit">the commit point to open
-		/// </param>
-		/// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static IndexReader Open(IndexCommit commit, bool readOnly)
-		{
-			return Open(commit.Directory, null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
-		}
-		
-		/// <summary>Expert: returns an IndexReader reading the index in
-		/// the given Directory, with a custom <see cref="IndexDeletionPolicy" />
-		///.  You should pass readOnly=true,
-		/// since it gives much better concurrent performance,
-		/// unless you intend to do write operations (delete
-		/// documents or change norms) with the reader.
-		/// </summary>
-		/// <param name="directory">the index directory
-		/// </param>
-		/// <param name="deletionPolicy">a custom deletion policy (only used
-		/// if you use this reader to perform deletes or to set
-		/// norms); see <see cref="IndexWriter" /> for details.
-		/// </param>
-		/// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly)
-		{
-			return Open(directory, deletionPolicy, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
-		}
+        public static IndexReader Open(Directory directory, bool readOnly)
+        {
+            return Open(directory, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+        }
+        
+        /// <summary>Expert: returns an IndexReader reading the index in the given
+        /// <see cref="IndexCommit" />.  You should pass readOnly=true, since it
+        /// gives much better concurrent performance, unless you
+        /// intend to do write operations (delete documents or
+        /// change norms) with the reader.
+        /// </summary>
+        /// <param name="commit">the commit point to open
+        /// </param>
+        /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static IndexReader Open(IndexCommit commit, bool readOnly)
+        {
+            return Open(commit.Directory, null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+        }
+        
+        /// <summary>Expert: returns an IndexReader reading the index in
+        /// the given Directory, with a custom <see cref="IndexDeletionPolicy" />
+        ///.  You should pass readOnly=true,
+        /// since it gives much better concurrent performance,
+        /// unless you intend to do write operations (delete
+        /// documents or change norms) with the reader.
+        /// </summary>
+        /// <param name="directory">the index directory
+        /// </param>
+        /// <param name="deletionPolicy">a custom deletion policy (only used
+        /// if you use this reader to perform deletes or to set
+        /// norms); see <see cref="IndexWriter" /> for details.
+        /// </param>
+        /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly)
+        {
+            return Open(directory, deletionPolicy, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+        }
 
-		/// <summary>Expert: returns an IndexReader reading the index in
-		/// the given Directory, with a custom <see cref="IndexDeletionPolicy" />
-		///.  You should pass readOnly=true,
-		/// since it gives much better concurrent performance,
-		/// unless you intend to do write operations (delete
-		/// documents or change norms) with the reader.
-		/// </summary>
-		/// <param name="directory">the index directory
-		/// </param>
-		/// <param name="deletionPolicy">a custom deletion policy (only used
-		/// if you use this reader to perform deletes or to set
-		/// norms); see <see cref="IndexWriter" /> for details.
-		/// </param>
-		/// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
-		/// </param>
-		/// <param name="termInfosIndexDivisor">Subsamples which indexed
-		/// terms are loaded into RAM. This has the same effect as <see>
-		///                                                          <cref>IndexWriter.SetTermIndexInterval</cref>
-		///                                                        </see> except that setting
-		/// must be done at indexing time while this setting can be
-		/// set per reader.  When set to N, then one in every
-		/// N*termIndexInterval terms in the index is loaded into
-		/// memory.  By setting this to a value > 1 you can reduce
-		/// memory usage, at the expense of higher latency when
-		/// loading a TermInfo.  The default value is 1.  Set this
-		/// to -1 to skip loading the terms index entirely.
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
-		{
-			return Open(directory, deletionPolicy, null, readOnly, termInfosIndexDivisor);
-		}
-		
-		/// <summary>Expert: returns an IndexReader reading the index in
-		/// the given Directory, using a specific commit and with
-		/// a custom <see cref="IndexDeletionPolicy" />.  You should pass
-		/// readOnly=true, since it gives much better concurrent
-		/// performance, unless you intend to do write operations
-		/// (delete documents or change norms) with the reader.
-		/// </summary>
-		/// <param name="commit">the specific <see cref="IndexCommit" /> to open;
-		/// see <see cref="IndexReader.ListCommits" /> to list all commits
-		/// in a directory
-		/// </param>
-		/// <param name="deletionPolicy">a custom deletion policy (only used
-		/// if you use this reader to perform deletes or to set
-		/// norms); see <see cref="IndexWriter" /> for details.
-		/// </param>
-		/// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly)
-		{
-			return Open(commit.Directory, deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
-		}
+        /// <summary>Expert: returns an IndexReader reading the index in
+        /// the given Directory, with a custom <see cref="IndexDeletionPolicy" />
+        ///.  You should pass readOnly=true,
+        /// since it gives much better concurrent performance,
+        /// unless you intend to do write operations (delete
+        /// documents or change norms) with the reader.
+        /// </summary>
+        /// <param name="directory">the index directory
+        /// </param>
+        /// <param name="deletionPolicy">a custom deletion policy (only used
+        /// if you use this reader to perform deletes or to set
+        /// norms); see <see cref="IndexWriter" /> for details.
+        /// </param>
+        /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+        /// </param>
+        /// <param name="termInfosIndexDivisor">Subsamples which indexed
+        /// terms are loaded into RAM. This has the same effect as <see>
+        ///                                                          <cref>IndexWriter.SetTermIndexInterval</cref>
+        ///                                                        </see> except that setting
+        /// must be done at indexing time while this setting can be
+        /// set per reader.  When set to N, then one in every
+        /// N*termIndexInterval terms in the index is loaded into
+        /// memory.  By setting this to a value > 1 you can reduce
+        /// memory usage, at the expense of higher latency when
+        /// loading a TermInfo.  The default value is 1.  Set this
+        /// to -1 to skip loading the terms index entirely.
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
+        {
+            return Open(directory, deletionPolicy, null, readOnly, termInfosIndexDivisor);
+        }
+        
+        /// <summary>Expert: returns an IndexReader reading the index in
+        /// the given Directory, using a specific commit and with
+        /// a custom <see cref="IndexDeletionPolicy" />.  You should pass
+        /// readOnly=true, since it gives much better concurrent
+        /// performance, unless you intend to do write operations
+        /// (delete documents or change norms) with the reader.
+        /// </summary>
+        /// <param name="commit">the specific <see cref="IndexCommit" /> to open;
+        /// see <see cref="IndexReader.ListCommits" /> to list all commits
+        /// in a directory
+        /// </param>
+        /// <param name="deletionPolicy">a custom deletion policy (only used
+        /// if you use this reader to perform deletes or to set
+        /// norms); see <see cref="IndexWriter" /> for details.
+        /// </param>
+        /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly)
+        {
+            return Open(commit.Directory, deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+        }
 
-		/// <summary>Expert: returns an IndexReader reading the index in
-		/// the given Directory, using a specific commit and with
-		/// a custom <see cref="IndexDeletionPolicy" />.  You should pass
-		/// readOnly=true, since it gives much better concurrent
-		/// performance, unless you intend to do write operations
-		/// (delete documents or change norms) with the reader.
-		/// </summary>
-		/// <param name="commit">the specific <see cref="IndexCommit" /> to open;
-		/// see <see cref="IndexReader.ListCommits" /> to list all commits
-		/// in a directory
-		/// </param>
-		/// <param name="deletionPolicy">a custom deletion policy (only used
-		/// if you use this reader to perform deletes or to set
-		/// norms); see <see cref="IndexWriter" /> for details.
-		/// </param>
-		/// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
-		/// </param>
-		/// <param name="termInfosIndexDivisor">Subsambles which indexed
-		/// terms are loaded into RAM. This has the same effect as <see>
-		///                                                          <cref>IndexWriter.SetTermIndexInterval</cref>
-		///                                                        </see> except that setting
-		/// must be done at indexing time while this setting can be
-		/// set per reader.  When set to N, then one in every
-		/// N*termIndexInterval terms in the index is loaded into
-		/// memory.  By setting this to a value > 1 you can reduce
-		/// memory usage, at the expense of higher latency when
-		/// loading a TermInfo.  The default value is 1.  Set this
-		/// to -1 to skip loading the terms index entirely.
-		/// </param>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
-		{
-			return Open(commit.Directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
-		}
-		
-		private static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, IndexCommit commit, bool readOnly, int termInfosIndexDivisor)
-		{
-			return DirectoryReader.Open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
-		}
-		
-		/// <summary> Refreshes an IndexReader if the index has changed since this instance 
-		/// was (re)opened. 
-		/// <p/>
-		/// Opening an IndexReader is an expensive operation. This method can be used
-		/// to refresh an existing IndexReader to reduce these costs. This method 
-		/// tries to only load segments that have changed or were created after the 
-		/// IndexReader was (re)opened.
-		/// <p/>
-		/// If the index has not changed since this instance was (re)opened, then this
-		/// call is a NOOP and returns this instance. Otherwise, a new instance is 
-		/// returned. The old instance is <b>not</b> closed and remains usable.<br/>
-		/// <p/>   
-		/// If the reader is reopened, even though they share
-		/// resources internally, it's safe to make changes
-		/// (deletions, norms) with the new reader.  All shared
-		/// mutable state obeys "copy on write" semantics to ensure
-		/// the changes are not seen by other readers.
-		/// <p/>
-		/// You can determine whether a reader was actually reopened by comparing the
-		/// old instance with the instance returned by this method: 
+        /// <summary>Expert: returns an IndexReader reading the index in
+        /// the given Directory, using a specific commit and with
+        /// a custom <see cref="IndexDeletionPolicy" />.  You should pass
+        /// readOnly=true, since it gives much better concurrent
+        /// performance, unless you intend to do write operations
+        /// (delete documents or change norms) with the reader.
+        /// </summary>
+        /// <param name="commit">the specific <see cref="IndexCommit" /> to open;
+        /// see <see cref="IndexReader.ListCommits" /> to list all commits
+        /// in a directory
+        /// </param>
+        /// <param name="deletionPolicy">a custom deletion policy (only used
+        /// if you use this reader to perform deletes or to set
+        /// norms); see <see cref="IndexWriter" /> for details.
+        /// </param>
+        /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+        /// </param>
+        /// <param name="termInfosIndexDivisor">Subsambles which indexed
+        /// terms are loaded into RAM. This has the same effect as <see>
+        ///                                                          <cref>IndexWriter.SetTermIndexInterval</cref>
+        ///                                                        </see> except that setting
+        /// must be done at indexing time while this setting can be
+        /// set per reader.  When set to N, then one in every
+        /// N*termIndexInterval terms in the index is loaded into
+        /// memory.  By setting this to a value > 1 you can reduce
+        /// memory usage, at the expense of higher latency when
+        /// loading a TermInfo.  The default value is 1.  Set this
+        /// to -1 to skip loading the terms index entirely.
+        /// </param>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
+        {
+            return Open(commit.Directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
+        }
+        
+        private static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, IndexCommit commit, bool readOnly, int termInfosIndexDivisor)
+        {
+            return DirectoryReader.Open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
+        }
+        
+        /// <summary> Refreshes an IndexReader if the index has changed since this instance 
+        /// was (re)opened. 
+        /// <p/>
+        /// Opening an IndexReader is an expensive operation. This method can be used
+        /// to refresh an existing IndexReader to reduce these costs. This method 
+        /// tries to only load segments that have changed or were created after the 
+        /// IndexReader was (re)opened.
+        /// <p/>
+        /// If the index has not changed since this instance was (re)opened, then this
+        /// call is a NOOP and returns this instance. Otherwise, a new instance is 
+        /// returned. The old instance is <b>not</b> closed and remains usable.<br/>
+        /// <p/>   
+        /// If the reader is reopened, even though they share
+        /// resources internally, it's safe to make changes
+        /// (deletions, norms) with the new reader.  All shared
+        /// mutable state obeys "copy on write" semantics to ensure
+        /// the changes are not seen by other readers.
+        /// <p/>
+        /// You can determine whether a reader was actually reopened by comparing the
+        /// old instance with the instance returned by this method: 
         /// <code>
-		/// IndexReader reader = ... 
-		/// ...
-		/// IndexReader newReader = r.reopen();
-		/// if (newReader != reader) {
-		/// ...     // reader was reopened
-		/// reader.close(); 
-		/// }
-		/// reader = newReader;
-		/// ...
+        /// IndexReader reader = ... 
+        /// ...
+        /// IndexReader newReader = r.reopen();
+        /// if (newReader != reader) {
+        /// ...     // reader was reopened
+        /// reader.close(); 
+        /// }
+        /// reader = newReader;
+        /// ...
         /// </code>
-		/// 
-		/// Be sure to synchronize that code so that other threads,
-		/// if present, can never use reader after it has been
-		/// closed and before it's switched to newReader.
-		/// 
-		/// <p/><b>NOTE</b>: If this reader is a near real-time
-		/// reader (obtained from <see cref="IndexWriter.GetReader()" />,
-		/// reopen() will simply call writer.getReader() again for
-		/// you, though this may change in the future.
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual IndexReader Reopen()
-		{
-			lock (this)
-			{
-				throw new NotSupportedException("This reader does not support reopen().");
-			}
-		}
-		
-		
-		/// <summary>Just like <see cref="Reopen()" />, except you can change the
-		/// readOnly of the original reader.  If the index is
-		/// unchanged but readOnly is different then a new reader
-		/// will be returned. 
-		/// </summary>
-		public virtual IndexReader Reopen(bool openReadOnly)
-		{
-			lock (this)
-			{
-				throw new NotSupportedException("This reader does not support reopen().");
-			}
-		}
-		
-		/// <summary>Expert: reopen this reader on a specific commit point.
-		/// This always returns a readOnly reader.  If the
-		/// specified commit point matches what this reader is
-		/// already on, and this reader is already readOnly, then
-		/// this same instance is returned; if it is not already
-		/// readOnly, a readOnly clone is returned. 
-		/// </summary>
-		public virtual IndexReader Reopen(IndexCommit commit)
-		{
-			lock (this)
-			{
-				throw new NotSupportedException("This reader does not support reopen(IndexCommit).");
-			}
-		}
-		
-		/// <summary> Efficiently clones the IndexReader (sharing most
-		/// internal state).
-		/// <p/>
-		/// On cloning a reader with pending changes (deletions,
-		/// norms), the original reader transfers its write lock to
-		/// the cloned reader.  This means only the cloned reader
-		/// may make further changes to the index, and commit the
-		/// changes to the index on close, but the old reader still
-		/// reflects all changes made up until it was cloned.
-		/// <p/>
-		/// Like <see cref="Reopen()" />, it's safe to make changes to
-		/// either the original or the cloned reader: all shared
-		/// mutable state obeys "copy on write" semantics to ensure
-		/// the changes are not seen by other readers.
-		/// <p/>
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual System.Object Clone()
-		{
-			throw new System.NotSupportedException("This reader does not implement clone()");
-		}
-		
-		/// <summary> Clones the IndexReader and optionally changes readOnly.  A readOnly 
-		/// reader cannot open a writeable reader.  
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual IndexReader Clone(bool openReadOnly)
-		{
-			lock (this)
-			{
-				throw new System.NotSupportedException("This reader does not implement clone()");
-			}
-		}
-		
-		/// <summary> Returns the directory associated with this index.  The Default 
-		/// implementation returns the directory specified by subclasses when 
-		/// delegating to the IndexReader(Directory) constructor, or throws an 
-		/// UnsupportedOperationException if one was not specified.
-		/// </summary>
-		/// <throws>  UnsupportedOperationException if no directory </throws>
-		public virtual Directory Directory()
-		{
-			EnsureOpen();
+        /// 
+        /// Be sure to synchronize that code so that other threads,
+        /// if present, can never use reader after it has been
+        /// closed and before it's switched to newReader.
+        /// 
+        /// <p/><b>NOTE</b>: If this reader is a near real-time
+        /// reader (obtained from <see cref="IndexWriter.GetReader()" />,
+        /// reopen() will simply call writer.getReader() again for
+        /// you, though this may change in the future.
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public virtual IndexReader Reopen()
+        {
+            lock (this)
+            {
+                throw new NotSupportedException("This reader does not support reopen().");
+            }
+        }
+        
+        
+        /// <summary>Just like <see cref="Reopen()" />, except you can change the
+        /// readOnly of the original reader.  If the index is
+        /// unchanged but readOnly is different then a new reader
+        /// will be returned. 
+        /// </summary>
+        public virtual IndexReader Reopen(bool openReadOnly)
+        {
+            lock (this)
+            {
+                throw new NotSupportedException("This reader does not support reopen().");
+            }
+        }
+        
+        /// <summary>Expert: reopen this reader on a specific commit point.
+        /// This always returns a readOnly reader.  If the
+        /// specified commit point matches what this reader is
+        /// already on, and this reader is already readOnly, then
+        /// this same instance is returned; if it is not already
+        /// readOnly, a readOnly clone is returned. 
+        /// </summary>
+        public virtual IndexReader Reopen(IndexCommit commit)
+        {
+            lock (this)
+            {
+                throw new NotSupportedException("This reader does not support reopen(IndexCommit).");
+            }
+        }
+        
+        /// <summary> Efficiently clones the IndexReader (sharing most
+        /// internal state).
+        /// <p/>
+        /// On cloning a reader with pending changes (deletions,
+        /// norms), the original reader transfers its write lock to
+        /// the cloned reader.  This means only the cloned reader
+        /// may make further changes to the index, and commit the
+        /// changes to the index on close, but the old reader still
+        /// reflects all changes made up until it was cloned.
+        /// <p/>
+        /// Like <see cref="Reopen()" />, it's safe to make changes to
+        /// either the original or the cloned reader: all shared
+        /// mutable state obeys "copy on write" semantics to ensure
+        /// the changes are not seen by other readers.
+        /// <p/>
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public virtual System.Object Clone()
+        {
+            throw new System.NotSupportedException("This reader does not implement clone()");
+        }
+        
+        /// <summary> Clones the IndexReader and optionally changes readOnly.  A readOnly 
+        /// reader cannot open a writeable reader.  
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public virtual IndexReader Clone(bool openReadOnly)
+        {
+            lock (this)
+            {
+                throw new System.NotSupportedException("This reader does not implement clone()");
+            }
+        }
+        
+        /// <summary> Returns the directory associated with this index.  The Default 
+        /// implementation returns the directory specified by subclasses when 
+        /// delegating to the IndexReader(Directory) constructor, or throws an 
+        /// UnsupportedOperationException if one was not specified.
+        /// </summary>
+        /// <throws>  UnsupportedOperationException if no directory </throws>
+        public virtual Directory Directory()
+        {
+            EnsureOpen();
             throw new NotSupportedException("This reader does not support this method.");
-		}
-		
-		/// <summary> Returns the time the index in the named directory was last modified. 
-		/// Do not use this to check whether the reader is still up-to-date, use
-		/// <see cref="IsCurrent()" /> instead. 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static long LastModified(Directory directory2)
-		{
-			return (long) ((System.Int64) new AnonymousClassFindSegmentsFile(directory2, directory2).Run());
-		}
-		
-		/// <summary> Reads version number from segments files. The version number is
-		/// initialized with a timestamp and then increased by one for each change of
-		/// the index.
-		/// 
-		/// </summary>
-		/// <param name="directory">where the index resides.
-		/// </param>
-		/// <returns> version number.
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public static long GetCurrentVersion(Directory directory)
-		{
-			return SegmentInfos.ReadCurrentVersion(directory);
-		}
+        }
+        
+        /// <summary> Returns the time the index in the named directory was last modified. 
+        /// Do not use this to check whether the reader is still up-to-date, use
+        /// <see cref="IsCurrent()" /> instead. 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static long LastModified(Directory directory2)
+        {
+            return (long) ((System.Int64) new AnonymousClassFindSegmentsFile(directory2, directory2).Run());
+        }
+        
+        /// <summary> Reads version number from segments files. The version number is
+        /// initialized with a timestamp and then increased by one for each change of
+        /// the index.
+        /// 
+        /// </summary>
+        /// <param name="directory">where the index resides.
+        /// </param>
+        /// <returns> version number.
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public static long GetCurrentVersion(Directory directory)
+        {
+            return SegmentInfos.ReadCurrentVersion(directory);
+        }
 
         /// <summary> Reads commitUserData, previously passed to 
         /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />,
-		/// from current index segments file.  This will return null if 
+        /// from current index segments file.  This will return null if 
         /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
-		/// has never been called for this index.
-		/// </summary>
-		/// <param name="directory">where the index resides.
-		/// </param>
-		/// <returns> commit userData.
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		/// <summary> 
-		/// </summary>
-		/// <seealso cref="GetCommitUserData(Store.Directory)">
-		/// </seealso>
+        /// has never been called for this index.
+        /// </summary>
+        /// <param name="directory">where the index resides.
+        /// </param>
+        /// <returns> commit userData.
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        /// <summary> 
+        /// </summary>
+        /// <seealso cref="GetCommitUserData(Store.Directory)">
+        /// </seealso>
         public static System.Collections.Generic.IDictionary<string, string> GetCommitUserData(Directory directory)
-		{
-			return SegmentInfos.ReadCurrentUserData(directory);
-		}
+        {
+            return SegmentInfos.ReadCurrentUserData(directory);
+        }
 
-	    /// <summary> Version number when this IndexReader was opened. Not implemented in the
-	    /// IndexReader base class.
-	    /// 
-	    /// <p/>
-	    /// If this reader is based on a Directory (ie, was created by calling
-	    /// <see cref="Open(Lucene.Net.Store.Directory, bool)" />, or <see cref="Reopen()" /> 
-	    /// on a reader based on a Directory), then
-	    /// this method returns the version recorded in the commit that the reader
-	    /// opened. This version is advanced every time <see cref="IndexWriter.Commit()" /> is
-	    /// called.
-	    /// <p/>
-	    /// 
-	    /// <p/>
-	    /// If instead this reader is a near real-time reader (ie, obtained by a call
-	    /// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
-	    /// real-time reader), then this method returns the version of the last
-	    /// commit done by the writer. Note that even as further changes are made
-	    /// with the writer, the version will not changed until a commit is
-	    /// completed. Thus, you should not rely on this method to determine when a
-	    /// near real-time reader should be opened. Use <see cref="IsCurrent" /> instead.
-	    /// <p/>
-	    /// 
-	    /// </summary>
-	    /// <throws>  UnsupportedOperationException </throws>
-	    /// <summary>             unless overridden in subclass
-	    /// </summary>
-	    public virtual long Version
-	    {
-	        get { throw new System.NotSupportedException("This reader does not support this method."); }
-	    }
+        /// <summary> Version number when this IndexReader was opened. Not implemented in the
+        /// IndexReader base class.
+        /// 
+        /// <p/>
+        /// If this reader is based on a Directory (ie, was created by calling
+        /// <see cref="Open(Lucene.Net.Store.Directory, bool)" />, or <see cref="Reopen()" /> 
+        /// on a reader based on a Directory), then
+        /// this method returns the version recorded in the commit that the reader
+        /// opened. This version is advanced every time <see cref="IndexWriter.Commit()" /> is
+        /// called.
+        /// <p/>
+        /// 
+        /// <p/>
+        /// If instead this reader is a near real-time reader (ie, obtained by a call
+        /// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
+        /// real-time reader), then this method returns the version of the last
+        /// commit done by the writer. Note that even as further changes are made
+        /// with the writer, the version will not changed until a commit is
+        /// completed. Thus, you should not rely on this method to determine when a
+        /// near real-time reader should be opened. Use <see cref="IsCurrent" /> instead.
+        /// <p/>
+        /// 
+        /// </summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        /// <summary>             unless overridden in subclass
+        /// </summary>
+        public virtual long Version
+        {
+            get { throw new System.NotSupportedException("This reader does not support this method."); }
+        }
 
-	    /// <summary> Retrieve the String userData optionally passed to
-	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />.  
-	    /// This will return null if 
-	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
-	    /// has never been called for this index.
-	    /// </summary>
-	    /// <seealso cref="GetCommitUserData(Store.Directory)">
-	    /// </seealso>
-	    public virtual IDictionary<string, string> CommitUserData
-	    {
-	        get { throw new System.NotSupportedException("This reader does not support this method."); }
-	    }
+        /// <summary> Retrieve the String userData optionally passed to
+        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />.  
+        /// This will return null if 
+        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
+        /// has never been called for this index.
+        /// </summary>
+        /// <seealso cref="GetCommitUserData(Store.Directory)">
+        /// </seealso>
+        public virtual IDictionary<string, string> CommitUserData
+        {
+            get { throw new System.NotSupportedException("This reader does not support this method."); }
+        }
 
-		/// <summary> Check whether any new changes have occurred to the index since this
-		/// reader was opened.
-		/// 
-		/// <p/>
-		/// If this reader is based on a Directory (ie, was created by calling
-		/// <see>
-		///   <cref>Open(Store.Directory)</cref>
-		/// </see> , or <see cref="Reopen()" /> on a reader based on a Directory), then
-		/// this method checks if any further commits (see <see cref="IndexWriter.Commit()" />
-		/// have occurred in that directory).
-		/// <p/>
-		/// 
-		/// <p/>
-		/// If instead this reader is a near real-time reader (ie, obtained by a call
-		/// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
-		/// real-time reader), then this method checks if either a new commmit has
-		/// occurred, or any new uncommitted changes have taken place via the writer.
-		/// Note that even if the writer has only performed merging, this method will
-		/// still return false.
-		/// <p/>
-		/// 
-		/// <p/>
-		/// In any event, if this returns false, you should call <see cref="Reopen()" /> to
-		/// get a new reader that sees the changes.
-		/// <p/>
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		/// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-		public virtual bool IsCurrent()
-	    {
-	        throw new NotSupportedException("This reader does not support this method.");
-	    }
+        /// <summary> Check whether any new changes have occurred to the index since this
+        /// reader was opened.
+        /// 
+        /// <p/>
+        /// If this reader is based on a Directory (ie, was created by calling
+        /// <see>
+        ///   <cref>Open(Store.Directory)</cref>
+        /// </see> , or <see cref="Reopen()" /> on a reader based on a Directory), then
+        /// this method checks if any further commits (see <see cref="IndexWriter.Commit()" />
+        /// have occurred in that directory).
+        /// <p/>
+        /// 
+        /// <p/>
+        /// If instead this reader is a near real-time reader (ie, obtained by a call
+        /// to <see cref="IndexWriter.GetReader()" />, or by calling <see cref="Reopen()" /> on a near
+        /// real-time reader), then this method checks if either a new commmit has
+        /// occurred, or any new uncommitted changes have taken place via the writer.
+        /// Note that even if the writer has only performed merging, this method will
+        /// still return false.
+        /// <p/>
+        /// 
+        /// <p/>
+        /// In any event, if this returns false, you should call <see cref="Reopen()" /> to
+        /// get a new reader that sees the changes.
+        /// <p/>
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+        public virtual bool IsCurrent()
+        {
+            throw new NotSupportedException("This reader does not support this method.");
+        }
 
-	    /// <summary> Checks is the index is optimized (if it has a single segment and 
-	    /// no deletions).  Not implemented in the IndexReader base class.
-	    /// </summary>
-	    /// <returns> &amp;lt;c&amp;gt;true&amp;lt;/c&amp;gt; if the index is optimized; &amp;lt;c&amp;gt;false&amp;lt;/c&amp;gt; otherwise </returns>
-	    /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
-	    public virtual bool IsOptimized()
-	    {
-	        throw new NotSupportedException("This reader does not support this method.");
-	    }
+        /// <summary> Checks is the index is optimized (if it has a single segment and 
+        /// no deletions).  Not implemented in the IndexReader base class.
+        /// </summary>
+        /// <returns> &amp;lt;c&amp;gt;true&amp;lt;/c&amp;gt; if the index is optimized; &amp;lt;c&amp;gt;false&amp;lt;/c&amp;gt; otherwise </returns>
+        /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+        public virtual bool IsOptimized()
+        {
+            throw new NotSupportedException("This reader does not support this method.");
+        }
 
-	    /// <summary> Return an array of term frequency vectors for the specified document.
-		/// The array contains a vector for each vectorized field in the document.
-		/// Each vector contains terms and frequencies for all terms in a given vectorized field.
-		/// If no such fields existed, the method returns null. The term vectors that are
-		/// returned may either be of type <see cref="ITermFreqVector" />
-		/// or of type <see cref="TermPositionVector" /> if
-		/// positions or offsets have been stored.
-		/// 
-		/// </summary>
-		/// <param name="docNumber">document for which term frequency vectors are returned
-		/// </param>
-		/// <returns> array of term frequency vectors. May be null if no term vectors have been
-		/// stored for the specified document.
-		/// </returns>
-		/// <throws>  IOException if index cannot be accessed </throws>
-		/// <seealso cref="Lucene.Net.Documents.Field.TermVector">
-		/// </seealso>
-		abstract public ITermFreqVector[] GetTermFreqVectors(int docNumber);
-		
-		
-		/// <summary> Return a term frequency vector for the specified document and field. The
-		/// returned vector contains terms and frequencies for the terms in
-		/// the specified field of this document, if the field had the storeTermVector
-		/// flag set. If termvectors had been stored with positions or offsets, a 
-		/// <see cref="TermPositionVector" /> is returned.
-		/// 
-		/// </summary>
-		/// <param name="docNumber">document for which the term frequency vector is returned
-		/// </param>
-		/// <param name="field">field for which the term frequency vector is returned.
-		/// </param>
-		/// <returns> term frequency vector May be null if field does not exist in the specified
-		/// document or term vector was not stored.
-		/// </returns>
-		/// <throws>  IOException if index cannot be accessed </throws>
-		/// <seealso cref="Lucene.Net.Documents.Field.TermVector">
-		/// </seealso>
-		abstract public ITermFreqVector GetTermFreqVector(int docNumber, String field);
-		
-		/// <summary> Load the Term Vector into a user-defined data structure instead of relying on the parallel arrays of
-		/// the <see cref="ITermFreqVector" />.
-		/// </summary>
-		/// <param name="docNumber">The number of the document to load the vector for
-		/// </param>
-		/// <param name="field">The name of the field to load
-		/// </param>
-		/// <param name="mapper">The <see cref="TermVectorMapper" /> to process the vector.  Must not be null
-		/// </param>
-		/// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
-		/// <summary> 
-		/// </summary>
-		abstract public void  GetTermFreqVector(int docNumber, String field, TermVectorMapper mapper);
-		
-		/// <summary> Map all the term vectors for all fields in a Document</summary>
-		/// <param name="docNumber">The number of the document to load the vector for
-		/// </param>
-		/// <param name="mapper">The <see cref="TermVectorMapper" /> to process the vector.  Must not be null
-		/// </param>
-		/// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
-		abstract public void  GetTermFreqVector(int docNumber, TermVectorMapper mapper);
-		
-		/// <summary> Returns <c>true</c> if an index exists at the specified directory.
-		/// If the directory does not exist or if there is no index in it.
-		/// </summary>
-		/// <param name="directory">the directory to check for an index
-		/// </param>
-		/// <returns> <c>true</c> if an index exists; <c>false</c> otherwise
-		/// </returns>
-		/// <throws>  IOException if there is a problem with accessing the index </throws>
-		public static bool IndexExists(Directory directory)
-		{
-			return SegmentInfos.GetCurrentSegmentGeneration(directory) != - 1;
-		}
+        /// <summary> Return an array of term frequency vectors for the specified document.
+        /// The array contains a vector for each vectorized field in the document.
+        /// Each vector contains terms and frequencies for all terms in a given vectorized field.
+        /// If no such fields existed, the method returns null. The term vectors that are
+        /// returned may either be of type <see cref="ITermFreqVector" />
+        /// or of type <see cref="TermPositionVector" /> if
+        /// positions or offsets have been stored.
+        /// 
+        /// </summary>
+        /// <param name="docNumber">document for which term frequency vectors are returned
+        /// </param>
+        /// <returns> array of term frequency vectors. May be null if no term vectors have been
+        /// stored for the specified document.
+        /// </returns>
+        /// <throws>  IOException if index cannot be accessed </throws>
+        /// <seealso cref="Lucene.Net.Documents.Field.TermVector">
+        /// </seealso>
+        abstract public ITermFreqVector[] GetTermFreqVectors(int docNumber);
+        
+        
+        /// <summary> Return a term frequency vector for the specified document and field. The
+        /// returned vector contains terms and frequencies for the terms in
+        /// the specified field of this document, if the field had the storeTermVector
+        /// flag set. If termvectors had been stored with positions or offsets, a 
+        /// <see cref="TermPositionVector" /> is returned.
+        /// 
+        /// </summary>
+        /// <param name="docNumber">document for which the term frequency vector is returned
+        /// </param>
+        /// <param name="field">field for which the term frequency vector is returned.
+        /// </param>
+        /// <returns> term frequency vector May be null if field does not exist in the specified
+        /// document or term vector was not stored.
+        /// </returns>
+        /// <throws>  IOException if index cannot be accessed </throws>
+        /// <seealso cref="Lucene.Net.Documents.Field.TermVector">
+        /// </seealso>
+        abstract public ITermFreqVector GetTermFreqVector(int docNumber, String field);
+        
+        /// <summary> Load the Term Vector into a user-defined data structure instead of relying on the parallel arrays of
+        /// the <see cref="ITermFreqVector" />.
+        /// </summary>
+        /// <param name="docNumber">The number of the document to load the vector for
+        /// </param>
+        /// <param name="field">The name of the field to load
+        /// </param>
+        /// <param name="mapper">The <see cref="TermVectorMapper" /> to process the vector.  Must not be null
+        /// </param>
+        /// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
+        /// <summary> 
+        /// </summary>
+        abstract public void  GetTermFreqVector(int docNumber, String field, TermVectorMapper mapper);
+        
+        /// <summary> Map all the term vectors for all fields in a Document</summary>
+        /// <param name="docNumber">The number of the document to load the vector for
+        /// </param>
+        /// <param name="mapper">The <see cref="TermVectorMapper" /> to process the vector.  Must not be null
+        /// </param>
+        /// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
+        abstract public void  GetTermFreqVector(int docNumber, TermVectorMapper mapper);
+        
+        /// <summary> Returns <c>true</c> if an index exists at the specified directory.
+        /// If the directory does not exist or if there is no index in it.
+        /// </summary>
+        /// <param name="directory">the directory to check for an index
+        /// </param>
+        /// <returns> <c>true</c> if an index exists; <c>false</c> otherwise
+        /// </returns>
+        /// <throws>  IOException if there is a problem with accessing the index </throws>
+        public static bool IndexExists(Directory directory)
+        {
+            return SegmentInfos.GetCurrentSegmentGeneration(directory) != - 1;
+        }
 
-	    /// <summary>Returns the number of documents in this index. </summary>
+        /// <summary>Returns the number of documents in this index. </summary>
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public abstract int NumDocs();
 
-	    /// <summary>Returns one greater than the largest possible document number.
-	    /// This may be used to, e.g., determine how big to allocate an array which
-	    /// will have an element for every document number in an index.
-	    /// </summary>
-	    public abstract int MaxDoc { get; }
+        /// <summary>Returns one greater than the largest possible document number.
+        /// This may be used to, e.g., determine how big to allocate an array which
+        /// will have an element for every document number in an index.
+        /// </summary>
+        public abstract int MaxDoc { get; }
 
-	    /// <summary>Returns the number of deleted documents. </summary>
-	    public virtual int NumDeletedDocs
-	    {
-	        get { return MaxDoc - NumDocs(); }
-	    }
+        /// <summary>Returns the number of deleted documents. </summary>
+        public virtual int NumDeletedDocs
+        {
+            get { return MaxDoc - NumDocs(); }
+        }
 
-	    /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
-		/// <c>Document</c> in this index.
-		/// <p/>
-		/// <b>NOTE:</b> for performance reasons, this method does not check if the
-		/// requested document is deleted, and therefore asking for a deleted document
-		/// may yield unspecified results. Usually this is not required, however you
-		/// can call <see cref="IsDeleted(int)" /> with the requested document ID to verify
-		/// the document is not deleted.
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual Document Document(int n)
-		{
-			EnsureOpen();
-			return Document(n, null);
-		}
+        /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
+        /// <c>Document</c> in this index.
+        /// <p/>
+        /// <b>NOTE:</b> for performance reasons, this method does not check if the
+        /// requested document is deleted, and therefore asking for a deleted document
+        /// may yield unspecified results. Usually this is not required, however you
+        /// can call <see cref="IsDeleted(int)" /> with the requested document ID to verify
+        /// the document is not deleted.
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        public virtual Document Document(int n)
+        {
+            EnsureOpen();
+            return Document(n, null);
+        }
 
         /// <summary> Returns the stored fields of the <c>n</c><sup>th</sup>
         /// <c>Document</c> in this index.
@@ -744,119 +744,119 @@ namespace Lucene.Net.Index
         /// </summary>
         /// <throws>  CorruptIndexException if the index is corrupt </throws>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-	    public Document this[int doc]
-	    {
-	        get { return Document(doc); }
-	    }
-		
-		/// <summary> Get the <see cref="Lucene.Net.Documents.Document" /> at the <c>n</c>
-		/// <sup>th</sup> position. The <see cref="FieldSelector" /> may be used to determine
-		/// what <see cref="Lucene.Net.Documents.Field" />s to load and how they should
-		/// be loaded. <b>NOTE:</b> If this Reader (more specifically, the underlying
-		/// <c>FieldsReader</c>) is closed before the lazy
-		/// <see cref="Lucene.Net.Documents.Field" /> is loaded an exception may be
-		/// thrown. If you want the value of a lazy
-		/// <see cref="Lucene.Net.Documents.Field" /> to be available after closing you
-		/// must explicitly load it or fetch the Document again with a new loader.
-		/// <p/>
-		/// <b>NOTE:</b> for performance reasons, this method does not check if the
-		/// requested document is deleted, and therefore asking for a deleted document
-		/// may yield unspecified results. Usually this is not required, however you
-		/// can call <see cref="IsDeleted(int)" /> with the requested document ID to verify
-		/// the document is not deleted.
-		/// 
-		/// </summary>
-		/// <param name="n">Get the document at the <c>n</c><sup>th</sup> position
-		/// </param>
-		/// <param name="fieldSelector">The <see cref="FieldSelector" /> to use to determine what
-		/// Fields should be loaded on the Document. May be null, in which case
-		/// all Fields will be loaded.
-		/// </param>
-		/// <returns> The stored fields of the
-		/// <see cref="Lucene.Net.Documents.Document" /> at the nth position
-		/// </returns>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		/// <seealso cref="IFieldable">
-		/// </seealso>
-		/// <seealso cref="Lucene.Net.Documents.FieldSelector">
-		/// </seealso>
-		/// <seealso cref="Lucene.Net.Documents.SetBasedFieldSelector">
-		/// </seealso>
-		/// <seealso cref="Lucene.Net.Documents.LoadFirstFieldSelector">
-		/// </seealso>
-		// TODO (1.5): When we convert to JDK 1.5 make this Set<String>
-		public abstract Document Document(int n, FieldSelector fieldSelector);
-		
-		/// <summary>Returns true if document <i>n</i> has been deleted </summary>
-		public abstract bool IsDeleted(int n);
+        public Document this[int doc]
+        {
+            get { return Document(doc); }
+        }
+        
+        /// <summary> Get the <see cref="Lucene.Net.Documents.Document" /> at the <c>n</c>
+        /// <sup>th</sup> position. The <see cref="FieldSelector" /> may be used to determine
+        /// what <see cref="Lucene.Net.Documents.Field" />s to load and how they should
+        /// be loaded. <b>NOTE:</b> If this Reader (more specifically, the underlying
+        /// <c>FieldsReader</c>) is closed before the lazy
+        /// <see cref="Lucene.Net.Documents.Field" /> is loaded an exception may be
+        /// thrown. If you want the value of a lazy
+        /// <see cref="Lucene.Net.Documents.Field" /> to be available after closing you
+        /// must explicitly load it or fetch the Document again with a new loader.
+        /// <p/>
+        /// <b>NOTE:</b> for performance reasons, this method does not check if the
+        /// requested document is deleted, and therefore asking for a deleted document
+        /// may yield unspecified results. Usually this is not required, however you
+        /// can call <see cref="IsDeleted(int)" /> with the requested document ID to verify
+        /// the document is not deleted.
+        /// 
+        /// </summary>
+        /// <param name="n">Get the document at the <c>n</c><sup>th</sup> position
+        /// </param>
+        /// <param name="fieldSelector">The <see cref="FieldSelector" /> to use to determine what
+        /// Fields should be loaded on the Document. May be null, in which case
+        /// all Fields will be loaded.
+        /// </param>
+        /// <returns> The stored fields of the
+        /// <see cref="Lucene.Net.Documents.Document" /> at the nth position
+        /// </returns>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
+        /// <seealso cref="IFieldable">
+        /// </seealso>
+        /// <seealso cref="Lucene.Net.Documents.FieldSelector">
+        /// </seealso>
+        /// <seealso cref="Lucene.Net.Documents.SetBasedFieldSelector">
+        /// </seealso>
+        /// <seealso cref="Lucene.Net.Documents.LoadFirstFieldSelector">
+        /// </seealso>
+        // TODO (1.5): When we convert to JDK 1.5 make this Set<String>
+        public abstract Document Document(int n, FieldSelector fieldSelector);
+        
+        /// <summary>Returns true if document <i>n</i> has been deleted </summary>
+        public abstract bool IsDeleted(int n);
 
-	    /// <summary>Returns true if any documents have been deleted </summary>
-	    public abstract bool HasDeletions { get; }
+        /// <summary>Returns true if any documents have been deleted </summary>
+        public abstract bool HasDeletions { get; }
 
-	    /// <summary>Returns true if there are norms stored for this field. </summary>
-		public virtual bool HasNorms(System.String field)
-		{
-			// backward compatible implementation.
-			// SegmentReader has an efficient implementation.
-			EnsureOpen();
-			return Norms(field) != null;
-		}
+        /// <summary>Returns true if there are norms stored for this field. </summary>
+        public virtual bool HasNorms(System.String field)
+        {
+            // backward compatible implementation.
+            // SegmentReader has an efficient implementation.
+            EnsureOpen();
+            return Norms(field) != null;
+        }
 
-		/// <summary>
-		/// Returns the byte-encoded normalization factor for the named field of
-		/// every document.  This is used by the search code to score documents.
-		/// </summary>
+        /// <summary>
+        /// Returns the byte-encoded normalization factor for the named field of
+        /// every document.  This is used by the search code to score documents.
+        /// </summary>
+        /// <seealso cref="Lucene.Net.Documents.AbstractField.Boost" />
+        public abstract byte[] Norms(System.String field);
+        
+        /// <summary>
+        /// Reads the byte-encoded normalization factor for the named field of every
+        /// document.  This is used by the search code to score documents.
+        /// </summary>
         /// <seealso cref="Lucene.Net.Documents.AbstractField.Boost" />
-		public abstract byte[] Norms(System.String field);
-		
-		/// <summary>
-		/// Reads the byte-encoded normalization factor for the named field of every
-		/// document.  This is used by the search code to score documents.
-		/// </summary>
-		/// <seealso cref="Lucene.Net.Documents.AbstractField.Boost" />
-		public abstract void  Norms(System.String field, byte[] bytes, int offset);
-		
-		/// <summary>Expert: Resets the normalization factor for the named field of the named
-		/// document.  The norm represents the product of the field's <see cref="IFieldable.Boost">boost</see>
+        public abstract void  Norms(System.String field, byte[] bytes, int offset);
+        
+        /// <summary>Expert: Resets the normalization factor for the named field of the named
+        /// document.  The norm represents the product of the field's <see cref="IFieldable.Boost">boost</see>
         /// and its <see cref="Similarity.LengthNorm(String,int)">length normalization</see>.  Thus, to preserve the length normalization
-		/// values when resetting this, one should base the new value upon the old.
-		/// 
-		/// <b>NOTE:</b> If this field does not store norms, then
-		/// this method call will silently do nothing.
-		/// </summary>
-		/// <seealso cref="Norms(String)" />
-		/// <seealso cref="Similarity.DecodeNorm(byte)" />
-		/// <exception cref="StaleReaderException">
+        /// values when resetting this, one should base the new value upon the old.
+        /// 
+        /// <b>NOTE:</b> If this field does not store norms, then
+        /// this method call will silently do nothing.
+        /// </summary>
+        /// <seealso cref="Norms(String)" />
+        /// <seealso cref="Similarity.DecodeNorm(byte)" />
+        /// <exception cref="StaleReaderException">
         /// If the index has changed since this reader was opened
-		/// </exception>
+        /// </exception>
         /// <exception cref="CorruptIndexException">
         /// If the index is corrupt
-		/// </exception>
-		/// <exception cref="LockObtainFailedException">
+        /// </exception>
+        /// <exception cref="LockObtainFailedException">
         /// If another writer has this index open (<c>write.lock</c> could not be obtained)
-		/// </exception>
-		/// <exception cref="System.IO.IOException">
+        /// </exception>
+        /// <exception cref="System.IO.IOException">
         /// If there is a low-level IO error
-		/// </exception>
-		public virtual void  SetNorm(int doc, String field, byte value)
-		{
-			lock (this)
-			{
-				EnsureOpen();
-				AcquireWriteLock();
-				hasChanges = true;
-				DoSetNorm(doc, field, value);
-			}
-		}
-		
-		/// <summary>Implements setNorm in subclass.</summary>
-		protected internal abstract void  DoSetNorm(int doc, System.String field, byte value_Renamed);
-		
-		/// <summary>
-		/// Expert: Resets the normalization factor for the named field of the named document.
-		/// </summary>
-		/// <seealso cref="Norms(String)" />
+        /// </exception>
+        public virtual void  SetNorm(int doc, String field, byte value)
+        {
+            lock (this)
+            {
+                EnsureOpen();
+                AcquireWriteLock();
+                hasChanges = true;
+                DoSetNorm(doc, field, value);
+            }
+        }
+        
+        /// <summary>Implements setNorm in subclass.</summary>
+        protected internal abstract void  DoSetNorm(int doc, System.String field, byte value_Renamed);
+        
+        /// <summary>
+        /// Expert: Resets the normalization factor for the named field of the named document.
+        /// </summary>
+        /// <seealso cref="Norms(String)" />
         /// <seealso cref="Similarity.DecodeNorm(byte)" />
         /// <exception cref="StaleReaderException">
         /// If the index has changed since this reader was opened
@@ -870,139 +870,139 @@ namespace Lucene.Net.Index
         /// <exception cref="System.IO.IOException">
         /// If there is a low-level IO error
         /// </exception>
-		public virtual void  SetNorm(int doc, System.String field, float value)
-		{
-			EnsureOpen();
-			SetNorm(doc, field, Similarity.EncodeNorm(value));
-		}
-		
-		/// <summary>Returns an enumeration of all the terms in the index. The
-		/// enumeration is ordered by Term.compareTo(). Each term is greater
-		/// than all that precede it in the enumeration. Note that after
-		/// calling terms(), <see cref="TermEnum.Next()" /> must be called
-		/// on the resulting enumeration before calling other methods such as
-		/// <see cref="TermEnum.Term" />.
-		/// </summary>
-		/// <exception cref="System.IO.IOException">
+        public virtual void  SetNorm(int doc, System.String field, float value)
+        {
+            EnsureOpen();
+            SetNorm(doc, field, Similarity.EncodeNorm(value));
+        }
+        
+        /// <summary>Returns an enumeration of all the terms in the index. The
+        /// enumeration is ordered by Term.compareTo(). Each term is greater
+        /// than all that precede it in the enumeration. Note that after
+        /// calling terms(), <see cref="TermEnum.Next()" /> must be called
+        /// on the resulting enumeration before calling other methods such as
+        /// <see cref="TermEnum.Term" />.
+        /// </summary>
+        /// <exception cref="System.IO.IOException">
         /// If there is a low-level IO error 
-		/// </exception>
-		public abstract TermEnum Terms();
-		
-		/// <summary>Returns an enumeration of all terms starting at a given term. If
-		/// the given term does not exist, the enumeration is positioned at the
-		/// first term greater than the supplied term. The enumeration is
-		/// ordered by Term.compareTo(). Each term is greater than all that
-		/// precede it in the enumeration.
+        /// </exception>
+        public abstract TermEnum Terms();
+        
+        /// <summary>Returns an enumeration of all terms starting at a given term. If
+        /// the given term does not exist, the enumeration is positioned at the
+        /// first term greater than the supplied term. The enumeration is
+        /// ordered by Term.compareTo(). Each term is greater than all that
+        /// precede it in the enumeration.
         /// </summary>
         /// <exception cref="System.IO.IOException">
         /// If there is a low-level IO error
         /// </exception>
-		public abstract TermEnum Terms(Term t);
+        public abstract TermEnum Terms(Term t);
 
         /// <summary>Returns the number of documents containing the term <c>t</c>.</summary>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public abstract int DocFreq(Term t);
-		
-		/// <summary>Returns an enumeration of all the documents which contain
-		/// <c>term</c>. For each document, the document number, the frequency of
-		/// the term in that document is also provided, for use in
-		/// search scoring.  If term is null, then all non-deleted
-		/// docs are returned with freq=1.
-		/// Thus, this method implements the mapping:
-		/// <p/><list>
-		/// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq&gt;<sup>*</sup>
-		/// </list>
-		/// <p/>The enumeration is ordered by document number.  Each document number
-		/// is greater than all that precede it in the enumeration.
+        public abstract int DocFreq(Term t);
+        
+        /// <summary>Returns an enumeration of all the documents which contain
+        /// <c>term</c>. For each document, the document number, the frequency of
+        /// the term in that document is also provided, for use in
+        /// search scoring.  If term is null, then all non-deleted
+        /// docs are returned with freq=1.
+        /// Thus, this method implements the mapping:
+        /// <p/><list>
+        /// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq&gt;<sup>*</sup>
+        /// </list>
+        /// <p/>The enumeration is ordered by document number.  Each document number
+        /// is greater than all that precede it in the enumeration.
         /// </summary>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual TermDocs TermDocs(Term term)
-		{
-			EnsureOpen();
-			TermDocs termDocs = TermDocs();
-			termDocs.Seek(term);
-			return termDocs;
-		}
+        public virtual TermDocs TermDocs(Term term)
+        {
+            EnsureOpen();
+            TermDocs termDocs = TermDocs();
+            termDocs.Seek(term);
+            return termDocs;
+        }
 
         /// <summary>Returns an unpositioned <see cref="Lucene.Net.Index.TermDocs" /> enumerator.</summary>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public abstract TermDocs TermDocs();
-		
-		/// <summary>Returns an enumeration of all the documents which contain
-		/// <c>term</c>.  For each document, in addition to the document number
-		/// and frequency of the term in that document, a list of all of the ordinal
-		/// positions of the term in the document is available.  Thus, this method
-		/// implements the mapping:
-		/// 
-		/// <p/><list>
-		/// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq,
-		/// &lt;pos<sub>1</sub>, pos<sub>2</sub>, ...
-		/// pos<sub>freq-1</sub>&gt;
-		/// &gt;<sup>*</sup>
-		/// </list>
-		/// <p/> This positional information facilitates phrase and proximity searching.
-		/// <p/>The enumeration is ordered by document number.  Each document number is
-		/// greater than all that precede it in the enumeration.
+        public abstract TermDocs TermDocs();
+        
+        /// <summary>Returns an enumeration of all the documents which contain
+        /// <c>term</c>.  For each document, in addition to the document number
+        /// and frequency of the term in that document, a list of all of the ordinal
+        /// positions of the term in the document is available.  Thus, this method
+        /// implements the mapping:
+        /// 
+        /// <p/><list>
+        /// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq,
+        /// &lt;pos<sub>1</sub>, pos<sub>2</sub>, ...
+        /// pos<sub>freq-1</sub>&gt;
+        /// &gt;<sup>*</sup>
+        /// </list>
+        /// <p/> This positional information facilitates phrase and proximity searching.
+        /// <p/>The enumeration is ordered by document number.  Each document number is
+        /// greater than all that precede it in the enumeration.
         /// </summary>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual TermPositions TermPositions(Term term)
-		{
-			EnsureOpen();
-			TermPositions termPositions = TermPositions();
-			termPositions.Seek(term);
-			return termPositions;
-		}
+        public virtual TermPositions TermPositions(Term term)
+        {
+            EnsureOpen();
+            TermPositions termPositions = TermPositions();
+            termPositions.Seek(term);
+            return termPositions;
+        }
 
         /// <summary>Returns an unpositioned <see cref="Lucene.Net.Index.TermPositions" /> enumerator.</summary>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public abstract TermPositions TermPositions();
-		
-		
-		
-		/// <summary>
-		/// Deletes the document numbered <c>docNum</c>.  Once a document is
-		/// deleted it will not appear in TermDocs or TermPostitions enumerations.
-		/// Attempts to read its field with the <see cref="Document(int)" />
-		/// method will result in an error.  The presence of this document may still be
-		/// reflected in the <see cref="DocFreq" /> statistic, though
-		/// this will be corrected eventually as the index is further modified.
-		/// </summary>
-		/// <exception cref="StaleReaderException">
+        public abstract TermPositions TermPositions();
+        
+        
+        
+        /// <summary>
+        /// Deletes the document numbered <c>docNum</c>.  Once a document is
+        /// deleted it will not appear in TermDocs or TermPostitions enumerations.
+        /// Attempts to read its field with the <see cref="Document(int)" />
+        /// method will result in an error.  The presence of this document may still be
+        /// reflected in the <see cref="DocFreq" /> statistic, though
+        /// this will be corrected eventually as the index is further modified.
+        /// </summary>
+        /// <exception cref="StaleReaderException">
         /// If the index has changed since this reader was opened
-		/// </exception>
-		/// <exception cref="CorruptIndexException">If the index is corrupt</exception>
-		/// <exception cref="LockObtainFailedException">
+        /// </exception>
+        /// <exception cref="CorruptIndexException">If the index is corrupt</exception>
+        /// <exception cref="LockObtainFailedException">
         /// If another writer has this index open (<c>write.lock</c> could not be obtained)
         /// </exception>
         /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
-		public virtual void  DeleteDocument(int docNum)
-		{
-			lock (this)
-		

<TRUNCATED>

[48/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Indexing/IndexSets.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSets.cs b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSets.cs
index bf56282..e0403d8 100644
--- a/src/contrib/DistributedSearch/Distributed/Indexing/IndexSets.cs
+++ b/src/contrib/DistributedSearch/Distributed/Indexing/IndexSets.cs
@@ -48,26 +48,26 @@ namespace Lucene.Net.Distributed.Indexing
     /// </code>
     /// </summary>
     public class IndexSets
-	{
-		private bool _bCompoundFile;
-		private string _strDeltaDirectory;
-		private IndexSet[] _arIndexSet;
+    {
+        private bool _bCompoundFile;
+        private string _strDeltaDirectory;
+        private IndexSet[] _arIndexSet;
 
         /// <summary>
         /// Accessor method for the configurable master indexes.
         /// </summary>
         public static IndexSets GetConfig
-		{
+        {
             get { return (IndexSets)ConfigurationManager.GetSection("IndexSets"); }
-		}
+        }
 
         /// <summary>
         /// Strongly-typed array of IndexSet objects as defined in a configuration section.
         /// </summary>
         /// <param name="node">XmlNode definition for a given IndexSet</param>
         public void LoadIndexSetArray(XmlNode node)
-		{
-			XmlAttributeCollection attributeCollection = node.Attributes;
+        {
+            XmlAttributeCollection attributeCollection = node.Attributes;
 
             try
             {
@@ -91,66 +91,66 @@ namespace Lucene.Net.Distributed.Indexing
                 throw new ConfigurationErrorsException("No indexset definitions found " + Environment.NewLine + node.OuterXml);
             this._arIndexSet = new IndexSet[node.ChildNodes.Count];
             
-			int x=0;
-			foreach (XmlNode c in node.ChildNodes)
-			{
-				if (c.Name.ToLower()=="IndexSet")
-				{
-					IndexSet idxSet = new IndexSet(c);
-					this._arIndexSet[x] = idxSet;
-					x++;
-				}
+            int x=0;
+            foreach (XmlNode c in node.ChildNodes)
+            {
+                if (c.Name.ToLower()=="IndexSet")
+                {
+                    IndexSet idxSet = new IndexSet(c);
+                    this._arIndexSet[x] = idxSet;
+                    x++;
+                }
 
-			}
-		}
+            }
+        }
 
         /// <summary>
         /// Public constructor for IndexSets. An IndexSet is defined in XML configuration 
         /// and is loaded via a custom configuration handler.
         /// </summary>
         public IndexSets()
-		{
-		}
+        {
+        }
 
         /// <summary>
         /// Indicates if the indexes under configuration should be built in the Compound format.
         /// </summary>
-		public bool CompoundFile
-		{
-			get {return this._bCompoundFile;}
-		}
+        public bool CompoundFile
+        {
+            get {return this._bCompoundFile;}
+        }
 
         /// <summary>
         /// Filesystem location of where pending update IndexDocuments are retrieved.
         /// </summary>
-		public string DeltaDirectory
-		{
-			get {return this._strDeltaDirectory;}
-		}
+        public string DeltaDirectory
+        {
+            get {return this._strDeltaDirectory;}
+        }
 
         /// <summary>
         /// Strongly-typed array of IndexSet objects as defined in a configuration section.
         /// </summary>
         public IndexSet[] IndexSetArray
-		{
-			get {return this._arIndexSet;}
-		}
+        {
+            get {return this._arIndexSet;}
+        }
 
         /// <summary>
         /// Returns an IndexSet object for a given IndexDocument id value
         /// </summary>
         /// <param name="deleteId">Id value of the IndexDocument</param>
         /// <returns>The IndexSet containing the referenced IndexDocument</returns>
-		public IndexSet GetIndexSet(int deleteId)
-		{
-			IndexSet getSet=null;
-			foreach(IndexSet idxSet in this._arIndexSet)
-			{
-				if ((deleteId>=idxSet.BottomId)&&(deleteId<=idxSet.TopId))
-					getSet=idxSet;
-			}
-			return getSet;
-		}
+        public IndexSet GetIndexSet(int deleteId)
+        {
+            IndexSet getSet=null;
+            foreach(IndexSet idxSet in this._arIndexSet)
+            {
+                if ((deleteId>=idxSet.BottomId)&&(deleteId<=idxSet.TopId))
+                    getSet=idxSet;
+            }
+            return getSet;
+        }
 
         /// <summary>
         /// Queries the DeltaDirectory to access any IndexDocument files.  All IndexDocuments
@@ -159,112 +159,112 @@ namespace Lucene.Net.Distributed.Indexing
         /// IndexSet.
         /// </summary>
         /// <param name="sourceDir">Filesystem path to the DeltaDirectory</param>
-		public void LoadIndexDocuments(string sourceDir)
-		{
-			DirectoryInfo oDirectoryInfo = new DirectoryInfo(sourceDir);
-			FileInfo[] arFiles = oDirectoryInfo.GetFiles("*.bin");
-			Array.Sort(arFiles, new FileNameComparer());
-			IndexSet idxSet;
+        public void LoadIndexDocuments(string sourceDir)
+        {
+            DirectoryInfo oDirectoryInfo = new DirectoryInfo(sourceDir);
+            FileInfo[] arFiles = oDirectoryInfo.GetFiles("*.bin");
+            Array.Sort(arFiles, new FileNameComparer());
+            IndexSet idxSet;
 
-			foreach (FileInfo fi in arFiles)
-			{
-				FileStream fs = new FileStream(fi.FullName, FileMode.Open);
-				IndexDocument iDoc = (IndexDocument)IndexDocument.Formatter.Deserialize(fs);
+            foreach (FileInfo fi in arFiles)
+            {
+                FileStream fs = new FileStream(fi.FullName, FileMode.Open);
+                IndexDocument iDoc = (IndexDocument)IndexDocument.Formatter.Deserialize(fs);
 
-				idxSet = this.GetIndexSet(iDoc.RecordId);
-				if (idxSet != null)
-				{
-					idxSet.FileSystemDocuments.Add(fi.FullName);
-					if (idxSet.IndexDocuments.ContainsKey(iDoc.RecordId))
-					{
-						IndexDocument curDoc = (IndexDocument)idxSet.IndexDocuments[iDoc.RecordId];
-						idxSet.IndexDocuments.Add(iDoc.RecordId, iDoc);
-					}
-					else
-						idxSet.IndexDocuments.Add(iDoc.RecordId, iDoc);
-				}
-				else
-				{
-					//Handling exceptions -- write file out somewhere else?
-					if (ConfigurationManager.AppSettings["ExceptionsBasePath"] != null)
-						iDoc.Save(ConfigurationManager.AppSettings["ExceptionsBasePath"]);
-				}
-				fs.Close();
-			}
-			oDirectoryInfo=null;
-			arFiles=null;
-		}
+                idxSet = this.GetIndexSet(iDoc.RecordId);
+                if (idxSet != null)
+                {
+                    idxSet.FileSystemDocuments.Add(fi.FullName);
+                    if (idxSet.IndexDocuments.ContainsKey(iDoc.RecordId))
+                    {
+                        IndexDocument curDoc = (IndexDocument)idxSet.IndexDocuments[iDoc.RecordId];
+                        idxSet.IndexDocuments.Add(iDoc.RecordId, iDoc);
+                    }
+                    else
+                        idxSet.IndexDocuments.Add(iDoc.RecordId, iDoc);
+                }
+                else
+                {
+                    //Handling exceptions -- write file out somewhere else?
+                    if (ConfigurationManager.AppSettings["ExceptionsBasePath"] != null)
+                        iDoc.Save(ConfigurationManager.AppSettings["ExceptionsBasePath"]);
+                }
+                fs.Close();
+            }
+            oDirectoryInfo=null;
+            arFiles=null;
+        }
 
         /// <summary>
         /// Method to apply pending updates (additions & deletions) for all configured IndexSet objects.
         /// </summary>
-		public void ProcessIndexDocuments()
-		{
-			foreach(IndexSet idxSet in this._arIndexSet)
-			{
-				if (idxSet.IndexDocuments.Count>0)
-				{
-					idxSet.CurrentIndex.ProcessLocalIndexDeletes(idxSet.GetDeletionCollection());
-					idxSet.CurrentIndex.ProcessLocalIndexAdditions(idxSet.Analzyer, idxSet.Documents, this.CompoundFile);
-				}
-			}
-		}
+        public void ProcessIndexDocuments()
+        {
+            foreach(IndexSet idxSet in this._arIndexSet)
+            {
+                if (idxSet.IndexDocuments.Count>0)
+                {
+                    idxSet.CurrentIndex.ProcessLocalIndexDeletes(idxSet.GetDeletionCollection());
+                    idxSet.CurrentIndex.ProcessLocalIndexAdditions(idxSet.Analzyer, idxSet.Documents, this.CompoundFile);
+                }
+            }
+        }
 
         /// <summary>
         /// Method to apply updated index files from master index to slave indexes
         /// </summary>
-		public void CopyUpdatedFiles()
-		{
-			Hashtable htUpdates = new Hashtable();
-			bool bCopy=false;
+        public void CopyUpdatedFiles()
+        {
+            Hashtable htUpdates = new Hashtable();
+            bool bCopy=false;
             foreach (IndexSet idxSet in this._arIndexSet)
-			{
-				bCopy=false;
-				if (idxSet.CurrentIndex!=null && idxSet.CurrentIndex.CanCopy)
-					bCopy=idxSet.CurrentIndex.CopyIncremental();
-				if (bCopy && !htUpdates.ContainsKey(idxSet.CurrentIndex.StatusDirectory))
-					htUpdates.Add(idxSet.CurrentIndex.StatusDirectory, idxSet.CurrentIndex);
-			}
+            {
+                bCopy=false;
+                if (idxSet.CurrentIndex!=null && idxSet.CurrentIndex.CanCopy)
+                    bCopy=idxSet.CurrentIndex.CopyIncremental();
+                if (bCopy && !htUpdates.ContainsKey(idxSet.CurrentIndex.StatusDirectory))
+                    htUpdates.Add(idxSet.CurrentIndex.StatusDirectory, idxSet.CurrentIndex);
+            }
 
-			foreach(DictionaryEntry de in htUpdates)
-			{
-				string sTargetDir = de.Key.ToString();
-				CurrentIndex ci = (CurrentIndex)de.Value;
-				ci.UpdateRefresh();
-			}
-		}
+            foreach(DictionaryEntry de in htUpdates)
+            {
+                string sTargetDir = de.Key.ToString();
+                CurrentIndex ci = (CurrentIndex)de.Value;
+                ci.UpdateRefresh();
+            }
+        }
 
         /// <summary>
         /// Method to apply updated index files from master index to slave indexes
         /// </summary>
         /// <returns>Hashtable of updated indexes</returns>
-		public Hashtable CopyAllFiles()
-		{
-			Hashtable htUpdates = new Hashtable();
-			bool bCopy = false;
+        public Hashtable CopyAllFiles()
+        {
+            Hashtable htUpdates = new Hashtable();
+            bool bCopy = false;
             foreach (IndexSet idxSet in this._arIndexSet)
-			{
-				bCopy = false;
-				if (idxSet.CurrentIndex != null && idxSet.CurrentIndex.CanCopy)
-					bCopy = idxSet.CurrentIndex.Copy();
-				if (bCopy && !htUpdates.ContainsKey(idxSet.CurrentIndex.StatusDirectory))
-					htUpdates.Add(idxSet.CurrentIndex.StatusDirectory, idxSet.CurrentIndex);
-			}
+            {
+                bCopy = false;
+                if (idxSet.CurrentIndex != null && idxSet.CurrentIndex.CanCopy)
+                    bCopy = idxSet.CurrentIndex.Copy();
+                if (bCopy && !htUpdates.ContainsKey(idxSet.CurrentIndex.StatusDirectory))
+                    htUpdates.Add(idxSet.CurrentIndex.StatusDirectory, idxSet.CurrentIndex);
+            }
 
-			foreach (DictionaryEntry de in htUpdates)
-			{
-				string sTargetDir = de.Key.ToString();
-				CurrentIndex ci = (CurrentIndex)de.Value;
-				ci.UpdateRefresh();
-			}
+            foreach (DictionaryEntry de in htUpdates)
+            {
+                string sTargetDir = de.Key.ToString();
+                CurrentIndex ci = (CurrentIndex)de.Value;
+                ci.UpdateRefresh();
+            }
 
-			return htUpdates;
-		}
+            return htUpdates;
+        }
 
         /// <summary>
         /// Method to execute an index optimization for each configured IndexSet object
         /// </summary>
-		public void OptimizeIndexes()
+        public void OptimizeIndexes()
         {
             foreach (IndexSet idxSet in this._arIndexSet)
                 idxSet.Optimize();
@@ -274,20 +274,20 @@ namespace Lucene.Net.Distributed.Indexing
         /// Method to finalize update process for each IndexSet object
         /// </summary>
         public void CompleteUpdate()
-		{
+        {
             foreach (IndexSet idxSet in this._arIndexSet)
-			{
-				if (idxSet.FileSystemDocuments.Count>0)
-				{
-					foreach(string s in idxSet.FileSystemDocuments)
-					{
-						FileInfo fi = new FileInfo(s);
-						fi.Delete();
-					}
-					idxSet.Reset();
-				}
-			}
-		}
+            {
+                if (idxSet.FileSystemDocuments.Count>0)
+                {
+                    foreach(string s in idxSet.FileSystemDocuments)
+                    {
+                        FileInfo fi = new FileInfo(s);
+                        fi.Delete();
+                    }
+                    idxSet.Reset();
+                }
+            }
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/Distributed/Search/DistributedSearchable.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/Distributed/Search/DistributedSearchable.cs b/src/contrib/DistributedSearch/Distributed/Search/DistributedSearchable.cs
index 87d7665..3488e26 100644
--- a/src/contrib/DistributedSearch/Distributed/Search/DistributedSearchable.cs
+++ b/src/contrib/DistributedSearch/Distributed/Search/DistributedSearchable.cs
@@ -53,19 +53,19 @@ namespace Lucene.Net.Distributed.Search
         public override object InitializeLifetimeService()
         {
             DistributedSearchable.leaseTimeSpan = new TimeSpan(0, 0, DistributedSearchable.initialLeaseTime + DistributedSearchable.TIME_FACTOR);
-			if (DistributedSearchable.initialLeaseTime == -1)
-			{
-				return null;		//Permanent TTL; never get's GC'd
-			}
-			else
-			{
-				ILease oLease = (ILease) base.InitializeLifetimeService();
-				if (oLease.CurrentState == LeaseState.Initial)
-				{
-					oLease.InitialLeaseTime = TimeSpan.FromSeconds(DistributedSearchable.leaseTimeSpan.TotalSeconds);
-					oLease.RenewOnCallTime = TimeSpan.FromSeconds(DistributedSearchable.TIME_FACTOR);
-				}
-				return oLease;
+            if (DistributedSearchable.initialLeaseTime == -1)
+            {
+                return null;        //Permanent TTL; never get's GC'd
+            }
+            else
+            {
+                ILease oLease = (ILease) base.InitializeLifetimeService();
+                if (oLease.CurrentState == LeaseState.Initial)
+                {
+                    oLease.InitialLeaseTime = TimeSpan.FromSeconds(DistributedSearchable.leaseTimeSpan.TotalSeconds);
+                    oLease.RenewOnCallTime = TimeSpan.FromSeconds(DistributedSearchable.TIME_FACTOR);
+                }
+                return oLease;
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/LuceneMonitor/LuceneMonitor.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/LuceneMonitor/LuceneMonitor.cs b/src/contrib/DistributedSearch/LuceneMonitor/LuceneMonitor.cs
index 153d6f7..e36beb7 100644
--- a/src/contrib/DistributedSearch/LuceneMonitor/LuceneMonitor.cs
+++ b/src/contrib/DistributedSearch/LuceneMonitor/LuceneMonitor.cs
@@ -36,149 +36,149 @@ namespace Lucene.Net.Distributed.Operations
     /// <summary>
     /// A Windows service that provides system ping checking against LuceneServer.
     /// </summary>
-	public class LuceneMonitor : System.ServiceProcess.ServiceBase
-	{
-		/// <summary> 
-		/// Required designer variable.
-		/// </summary>
-		private System.ComponentModel.Container components = null;
-		private ServiceController scMonitor = new ServiceController();
-		private Thread serviceThread;
-		private int sleepTime = 5000;
-		private bool bRun = true;
-		private string ipAddress = "";
-		private int port = 0;
-		private static readonly log4net.ILog oLog = log4net.LogManager.GetLogger(System.Reflection.MethodBase.GetCurrentMethod().DeclaringType);
+    public class LuceneMonitor : System.ServiceProcess.ServiceBase
+    {
+        /// <summary> 
+        /// Required designer variable.
+        /// </summary>
+        private System.ComponentModel.Container components = null;
+        private ServiceController scMonitor = new ServiceController();
+        private Thread serviceThread;
+        private int sleepTime = 5000;
+        private bool bRun = true;
+        private string ipAddress = "";
+        private int port = 0;
+        private static readonly log4net.ILog oLog = log4net.LogManager.GetLogger(System.Reflection.MethodBase.GetCurrentMethod().DeclaringType);
 
 
         public LuceneMonitor()
-		{
-			// This call is required by the Windows.Forms Component Designer.
-			InitializeComponent();
+        {
+            // This call is required by the Windows.Forms Component Designer.
+            InitializeComponent();
 
-			// TODO: Add any initialization after the InitComponent call
-		}
+            // TODO: Add any initialization after the InitComponent call
+        }
 
-		// The main entry point for the process
-		static void Main()
-		{
-			System.ServiceProcess.ServiceBase[] ServicesToRun;
+        // The main entry point for the process
+        static void Main()
+        {
+            System.ServiceProcess.ServiceBase[] ServicesToRun;
             ServicesToRun = new System.ServiceProcess.ServiceBase[] { new LuceneMonitor() };
-			System.ServiceProcess.ServiceBase.Run(ServicesToRun);
-		}
-
-		/// <summary> 
-		/// Required method for Designer support - do not modify 
-		/// the contents of this method with the code editor.
-		/// </summary>
-		private void InitializeComponent()
-		{
-			components = new System.ComponentModel.Container();
-			this.ServiceName = "LuceneMonitor";
-		}
-
-		/// <summary>
-		/// Clean up any resources being used.
-		/// </summary>
-		protected override void Dispose( bool disposing )
-		{
-			if( disposing )
-			{
-				if (components != null) 
-				{
-					components.Dispose();
-				}
-			}
-			base.Dispose( disposing );
-		}
-
-		/// <summary>
-		/// Set things in motion so your service can do its work.
-		/// </summary>
-		protected override void OnStart(string[] args)
-		{
-			ThreadStart threadStart = new ThreadStart(MonitorService);
-			serviceThread = new Thread(threadStart);
-			serviceThread.Start();
-		}
-
-		private void LogMessage(string message)
-		{
-			this.LogMessage(message, Level.Info);
-		}
-		private void LogMessage(string message, Level msgLevel)
-		{
-			if (msgLevel==Level.Info)
-			{
-				if (oLog.IsInfoEnabled)
-					oLog.Info(message);
-			}
-			else if (msgLevel==Level.Warn)
-			{
-				if (oLog.IsWarnEnabled)
-					oLog.Warn(message);
-			}
-		}
-		private void LogMessage(string message, Level msgLevel, int ErrorLevel)
-		{
-			if (msgLevel==Level.Error)
-			{
-				if (oLog.IsErrorEnabled)
-				{
-					oLog.Error(message);
-					EventLog.WriteEntry(this.ServiceName, message, EventLogEntryType.Error, ErrorLevel);
-				}
-			}
-		}
-
-		private void MonitorService()
-		{
-			this.LogMessage(this.ServiceName+" started");
-			scMonitor.ServiceName="LuceneServer";
+            System.ServiceProcess.ServiceBase.Run(ServicesToRun);
+        }
+
+        /// <summary> 
+        /// Required method for Designer support - do not modify 
+        /// the contents of this method with the code editor.
+        /// </summary>
+        private void InitializeComponent()
+        {
+            components = new System.ComponentModel.Container();
+            this.ServiceName = "LuceneMonitor";
+        }
+
+        /// <summary>
+        /// Clean up any resources being used.
+        /// </summary>
+        protected override void Dispose( bool disposing )
+        {
+            if( disposing )
+            {
+                if (components != null) 
+                {
+                    components.Dispose();
+                }
+            }
+            base.Dispose( disposing );
+        }
+
+        /// <summary>
+        /// Set things in motion so your service can do its work.
+        /// </summary>
+        protected override void OnStart(string[] args)
+        {
+            ThreadStart threadStart = new ThreadStart(MonitorService);
+            serviceThread = new Thread(threadStart);
+            serviceThread.Start();
+        }
+
+        private void LogMessage(string message)
+        {
+            this.LogMessage(message, Level.Info);
+        }
+        private void LogMessage(string message, Level msgLevel)
+        {
+            if (msgLevel==Level.Info)
+            {
+                if (oLog.IsInfoEnabled)
+                    oLog.Info(message);
+            }
+            else if (msgLevel==Level.Warn)
+            {
+                if (oLog.IsWarnEnabled)
+                    oLog.Warn(message);
+            }
+        }
+        private void LogMessage(string message, Level msgLevel, int ErrorLevel)
+        {
+            if (msgLevel==Level.Error)
+            {
+                if (oLog.IsErrorEnabled)
+                {
+                    oLog.Error(message);
+                    EventLog.WriteEntry(this.ServiceName, message, EventLogEntryType.Error, ErrorLevel);
+                }
+            }
+        }
+
+        private void MonitorService()
+        {
+            this.LogMessage(this.ServiceName+" started");
+            scMonitor.ServiceName="LuceneServer";
             this.sleepTime = (ConfigurationManager.AppSettings["ServiceSleepTime"] != null ? Convert.ToInt32(ConfigurationManager.AppSettings["ServiceSleepTime"]) : this.sleepTime);
             this.ipAddress = (ConfigurationManager.AppSettings["IPAddress"] != null ? ConfigurationManager.AppSettings["IPAddress"] : "");
             this.port = (ConfigurationManager.AppSettings["Port"] != null ? Convert.ToInt32(ConfigurationManager.AppSettings["Port"]) : 0);
-			this.LogMessage("ServiceSleepTime = "+this.sleepTime.ToString()+"; ipAddress="+this.ipAddress+"; port="+this.port.ToString());
-
-			while (bRun)
-			{
-				this.CheckService();
-				Thread.Sleep(sleepTime);
-			}
-		}
-
-		private void CheckService()
-		{
-			try
-			{
-				scMonitor.Refresh();
-
-				if (scMonitor.Status.Equals(ServiceControllerStatus.StopPending))
-					scMonitor.WaitForStatus(ServiceControllerStatus.Stopped);
-
-				if (scMonitor.Status.Equals(ServiceControllerStatus.Stopped))
-				{
-					// Start the service if the current status is stopped.
-					foreach (IChannel ic in ChannelServices.RegisteredChannels)
-						ChannelServices.UnregisterChannel(ic);
-					scMonitor.Start();
-					this.LogMessage(scMonitor.ServiceName + " started (Service stopped or StopPending)", Level.Error, 99);
-				}
-			}
-			catch (Exception e)
-			{
-				this.LogMessage(scMonitor.ServiceName + " error: "+e.Message+e.StackTrace, Level.Error, 199);
-			}
-
-		}
-
-
-		/// <summary>
-		/// Stop this service.
-		/// </summary>
-		protected override void OnStop()
-		{
-			this.bRun=false;
-		}
-	}
+            this.LogMessage("ServiceSleepTime = "+this.sleepTime.ToString()+"; ipAddress="+this.ipAddress+"; port="+this.port.ToString());
+
+            while (bRun)
+            {
+                this.CheckService();
+                Thread.Sleep(sleepTime);
+            }
+        }
+
+        private void CheckService()
+        {
+            try
+            {
+                scMonitor.Refresh();
+
+                if (scMonitor.Status.Equals(ServiceControllerStatus.StopPending))
+                    scMonitor.WaitForStatus(ServiceControllerStatus.Stopped);
+
+                if (scMonitor.Status.Equals(ServiceControllerStatus.Stopped))
+                {
+                    // Start the service if the current status is stopped.
+                    foreach (IChannel ic in ChannelServices.RegisteredChannels)
+                        ChannelServices.UnregisterChannel(ic);
+                    scMonitor.Start();
+                    this.LogMessage(scMonitor.ServiceName + " started (Service stopped or StopPending)", Level.Error, 99);
+                }
+            }
+            catch (Exception e)
+            {
+                this.LogMessage(scMonitor.ServiceName + " error: "+e.Message+e.StackTrace, Level.Error, 199);
+            }
+
+        }
+
+
+        /// <summary>
+        /// Stop this service.
+        /// </summary>
+        protected override void OnStop()
+        {
+            this.bRun=false;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/DistributedSearch/LuceneMonitor/ProjectInstaller.cs
----------------------------------------------------------------------
diff --git a/src/contrib/DistributedSearch/LuceneMonitor/ProjectInstaller.cs b/src/contrib/DistributedSearch/LuceneMonitor/ProjectInstaller.cs
index b802cb0..9b635b5 100644
--- a/src/contrib/DistributedSearch/LuceneMonitor/ProjectInstaller.cs
+++ b/src/contrib/DistributedSearch/LuceneMonitor/ProjectInstaller.cs
@@ -24,118 +24,118 @@ using Microsoft.Win32;
 
 namespace LuceneMonitorInstall
 {
-	/// <summary>
-	/// Summary description for ProjectInstaller.
-	/// </summary>
-	[RunInstallerAttribute(true)]
-	public class ProjectInstaller : Installer
-	{
-		private ServiceProcessInstaller processInstaller;
-		private ServiceInstaller serviceInstaller;
-		/// <summary>
-		/// Required designer variable.
-		/// </summary>
-		private System.ComponentModel.Container components = null;
-
-		public ProjectInstaller()
-		{
-			// This call is required by the Designer.
-			InitializeComponent();
-
-			// TODO: Add any initialization after the InitializeComponent call
-		}
-
-		/// <summary> 
-		/// Clean up any resources being used.
-		/// </summary>
-		protected override void Dispose( bool disposing )
-		{
-			if( disposing )
-			{
-				if(components != null)
-				{
-					components.Dispose();
-				}
-			}
-			base.Dispose( disposing );
-		}
-
-
-		#region Component Designer generated code
-		/// <summary>
-		/// Required method for Designer support - do not modify
-		/// the contents of this method with the code editor.
-		/// </summary>
-		private void InitializeComponent()
-		{
-			this.processInstaller = new ServiceProcessInstaller();
-			this.serviceInstaller = new ServiceInstaller();
-			this.processInstaller.Account = ServiceAccount.LocalSystem;
-
-			this.serviceInstaller.ServiceName = "LuceneMonitor";
-			this.serviceInstaller.StartType = ServiceStartMode.Manual;
-
-			Installers.Add(this.processInstaller);
-			Installers.Add(this.serviceInstaller);
-
-		}
-		#endregion
-
-		public override void Install(IDictionary stateSaver)
-		{
-			RegistryKey system;
-			RegistryKey currentControlSet;	//HKEY_LOCAL_MACHINE\Services\CurrentControlSet
-			RegistryKey services;			//...\Services
-			RegistryKey service;			//...\<Service Name>
-
-			try
-			{
-				//Let the project installer do its job
-				base.Install(stateSaver);
-
-				system = Microsoft.Win32.Registry.LocalMachine.OpenSubKey("System");	//Open the HKEY_LOCAL_MACHINE\SYSTEM key
-				currentControlSet = system.OpenSubKey("CurrentControlSet");				//Open CurrentControlSet
-				services = currentControlSet.OpenSubKey("Services");					//Go to the services key
-				service = services.OpenSubKey(this.serviceInstaller.ServiceName, true);	//Open the key for serviceInstaller
-
-				service.SetValue("Description", "Lucene Monitor");
-
-
-			}
-			catch(Exception e)
-			{
-				Console.WriteLine("An exception was thrown during service installation:\n" + e.ToString());
-			}
-		}
-
-		public override void Uninstall(IDictionary savedState)
-		{
-			RegistryKey system;
-			RegistryKey currentControlSet;	//HKEY_LOCAL_MACHINE\Services\CurrentControlSet
-			RegistryKey services;			//...\Services
-			RegistryKey service;			//...\<Service Name>
-
-			try
-			{
-				//Drill down to the service key and open it with write permission
-				system = Registry.LocalMachine.OpenSubKey("System");
-				currentControlSet = system.OpenSubKey("CurrentControlSet");
-				services = currentControlSet.OpenSubKey("Services");
-				service = services.OpenSubKey(this.serviceInstaller.ServiceName, true);
-				service.DeleteSubKeyTree("Description");		//Delete keys created during installation
-
-			}
-			catch(Exception e)
-			{
-				Console.WriteLine("Exception encountered while uninstalling service:\n" + e.ToString());
-			}
-			finally
-			{
-				//Let the project installer do its job
-				base.Uninstall(savedState);
-			}
-		}
-
-
-	}
+    /// <summary>
+    /// Summary description for ProjectInstaller.
+    /// </summary>
+    [RunInstallerAttribute(true)]
+    public class ProjectInstaller : Installer
+    {
+        private ServiceProcessInstaller processInstaller;
+        private ServiceInstaller serviceInstaller;
+        /// <summary>
+        /// Required designer variable.
+        /// </summary>
+        private System.ComponentModel.Container components = null;
+
+        public ProjectInstaller()
+        {
+            // This call is required by the Designer.
+            InitializeComponent();
+
+            // TODO: Add any initialization after the InitializeComponent call
+        }
+
+        /// <summary> 
+        /// Clean up any resources being used.
+        /// </summary>
+        protected override void Dispose( bool disposing )
+        {
+            if( disposing )
+            {
+                if(components != null)
+                {
+                    components.Dispose();
+                }
+            }
+            base.Dispose( disposing );
+        }
+
+
+        #region Component Designer generated code
+        /// <summary>
+        /// Required method for Designer support - do not modify
+        /// the contents of this method with the code editor.
+        /// </summary>
+        private void InitializeComponent()
+        {
+            this.processInstaller = new ServiceProcessInstaller();
+            this.serviceInstaller = new ServiceInstaller();
+            this.processInstaller.Account = ServiceAccount.LocalSystem;
+
+            this.serviceInstaller.ServiceName = "LuceneMonitor";
+            this.serviceInstaller.StartType = ServiceStartMode.Manual;
+
+            Installers.Add(this.processInstaller);
+            Installers.Add(this.serviceInstaller);
+
+        }
+        #endregion
+
+        public override void Install(IDictionary stateSaver)
+        {
+            RegistryKey system;
+            RegistryKey currentControlSet;    //HKEY_LOCAL_MACHINE\Services\CurrentControlSet
+            RegistryKey services;            //...\Services
+            RegistryKey service;            //...\<Service Name>
+
+            try
+            {
+                //Let the project installer do its job
+                base.Install(stateSaver);
+
+                system = Microsoft.Win32.Registry.LocalMachine.OpenSubKey("System");    //Open the HKEY_LOCAL_MACHINE\SYSTEM key
+                currentControlSet = system.OpenSubKey("CurrentControlSet");                //Open CurrentControlSet
+                services = currentControlSet.OpenSubKey("Services");                    //Go to the services key
+                service = services.OpenSubKey(this.serviceInstaller.ServiceName, true);    //Open the key for serviceInstaller
+
+                service.SetValue("Description", "Lucene Monitor");
+
+
+            }
+            catch(Exception e)
+            {
+                Console.WriteLine("An exception was thrown during service installation:\n" + e.ToString());
+            }
+        }
+
+        public override void Uninstall(IDictionary savedState)
+        {
+            RegistryKey system;
+            RegistryKey currentControlSet;    //HKEY_LOCAL_MACHINE\Services\CurrentControlSet
+            RegistryKey services;            //...\Services
+            RegistryKey service;            //...\<Service Name>
+
+            try
+            {
+                //Drill down to the service key and open it with write permission
+                system = Registry.LocalMachine.OpenSubKey("System");
+                currentControlSet = system.OpenSubKey("CurrentControlSet");
+                services = currentControlSet.OpenSubKey("Services");
+                service = services.OpenSubKey(this.serviceInstaller.ServiceName, true);
+                service.DeleteSubKeyTree("Description");        //Delete keys created during installation
+
+            }
+            catch(Exception e)
+            {
+                Console.WriteLine("Exception encountered while uninstalling service:\n" + e.ToString());
+            }
+            finally
+            {
+                //Let the project installer do its job
+                base.Uninstall(savedState);
+            }
+        }
+
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/FastVectorHighlighter/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/FastVectorHighlighter/Properties/AssemblyInfo.cs b/src/contrib/FastVectorHighlighter/Properties/AssemblyInfo.cs
index 9a545d9..e44909c 100644
--- a/src/contrib/FastVectorHighlighter/Properties/AssemblyInfo.cs
+++ b/src/contrib/FastVectorHighlighter/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/FastVectorHighlighter/StringUtils.cs
----------------------------------------------------------------------
diff --git a/src/contrib/FastVectorHighlighter/StringUtils.cs b/src/contrib/FastVectorHighlighter/StringUtils.cs
index 7b27259..995d714 100644
--- a/src/contrib/FastVectorHighlighter/StringUtils.cs
+++ b/src/contrib/FastVectorHighlighter/StringUtils.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs
----------------------------------------------------------------------
diff --git a/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs b/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs
index da2966c..c9c6ced 100644
--- a/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs
+++ b/src/contrib/FastVectorHighlighter/VectorHighlightMapper.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/DefaultEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/DefaultEncoder.cs b/src/contrib/Highlighter/DefaultEncoder.cs
index 54500a5..ff1efb7 100644
--- a/src/contrib/Highlighter/DefaultEncoder.cs
+++ b/src/contrib/Highlighter/DefaultEncoder.cs
@@ -17,12 +17,12 @@
 
 namespace Lucene.Net.Search.Highlight
 {
-	/// <summary>Simple <see cref="IEncoder"/> implementation that does not modify the output</summary>
-	public class DefaultEncoder : IEncoder
-	{
-		public virtual System.String EncodeText(System.String originalText)
-		{
-			return originalText;
-		}
-	}
+    /// <summary>Simple <see cref="IEncoder"/> implementation that does not modify the output</summary>
+    public class DefaultEncoder : IEncoder
+    {
+        public virtual System.String EncodeText(System.String originalText)
+        {
+            return originalText;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/GradientFormatter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/GradientFormatter.cs b/src/contrib/Highlighter/GradientFormatter.cs
index 6b102a0..3b50d42 100644
--- a/src/contrib/Highlighter/GradientFormatter.cs
+++ b/src/contrib/Highlighter/GradientFormatter.cs
@@ -19,194 +19,194 @@ using System;
 
 namespace Lucene.Net.Search.Highlight
 {
-	/// <summary>
-	/// Formats text with different color intensity depending on the score of the term.
-	/// </summary>
-	public class GradientFormatter : IFormatter
-	{
-		private float maxScore;
+    /// <summary>
+    /// Formats text with different color intensity depending on the score of the term.
+    /// </summary>
+    public class GradientFormatter : IFormatter
+    {
+        private float maxScore;
 
         protected internal int fgRMin, fgGMin, fgBMin;
         protected internal int fgRMax, fgGMax, fgBMax;
-		protected bool highlightForeground;
+        protected bool highlightForeground;
         protected internal int bgRMin, bgGMin, bgBMin;
         protected internal int bgRMax, bgGMax, bgBMax;
-		protected bool highlightBackground;
-		
-		/// <summary> Sets the color range for the IDF scores</summary>
+        protected bool highlightBackground;
+        
+        /// <summary> Sets the color range for the IDF scores</summary>
         /// <param name="maxScore">
-		/// The score (and above) displayed as maxColor (See QueryScorer.getMaxWeight 
-		/// which can be used to callibrate scoring scale)
-		/// </param>
+        /// The score (and above) displayed as maxColor (See QueryScorer.getMaxWeight 
+        /// which can be used to callibrate scoring scale)
+        /// </param>
         /// <param name="minForegroundColor">
-		/// The hex color used for representing IDF scores of zero eg
-		/// #FFFFFF (white) or null if no foreground color required
-		/// </param>
+        /// The hex color used for representing IDF scores of zero eg
+        /// #FFFFFF (white) or null if no foreground color required
+        /// </param>
         /// <param name="maxForegroundColor">
-		/// The largest hex color used for representing IDF scores eg
-		/// #000000 (black) or null if no foreground color required
-		/// </param>
+        /// The largest hex color used for representing IDF scores eg
+        /// #000000 (black) or null if no foreground color required
+        /// </param>
         /// <param name="minBackgroundColor">
-		/// The hex color used for representing IDF scores of zero eg
-		/// #FFFFFF (white) or null if no background color required
-		/// </param>
+        /// The hex color used for representing IDF scores of zero eg
+        /// #FFFFFF (white) or null if no background color required
+        /// </param>
         /// <param name="maxBackgroundColor">
-		/// The largest hex color used for representing IDF scores eg
-		/// #000000 (black) or null if no background color required
-		/// </param>
-		public GradientFormatter(float maxScore, string minForegroundColor, string maxForegroundColor, string minBackgroundColor, string maxBackgroundColor)
-		{
-		    highlightForeground = (minForegroundColor != null) && (maxForegroundColor != null);
+        /// The largest hex color used for representing IDF scores eg
+        /// #000000 (black) or null if no background color required
+        /// </param>
+        public GradientFormatter(float maxScore, string minForegroundColor, string maxForegroundColor, string minBackgroundColor, string maxBackgroundColor)
+        {
+            highlightForeground = (minForegroundColor != null) && (maxForegroundColor != null);
 
             if (highlightForeground)
-			{
-				if (minForegroundColor.Length != 7)
-				{
-					throw new ArgumentException("minForegroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
-				}
-				if (maxForegroundColor.Length != 7)
-				{
-					throw new ArgumentException("minForegroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
-				}
-				fgRMin = HexToInt(minForegroundColor.Substring(1, 2));
-				fgGMin = HexToInt(minForegroundColor.Substring(3, 2));
-				fgBMin = HexToInt(minForegroundColor.Substring(5, 2));
-				
-				fgRMax = HexToInt(maxForegroundColor.Substring(1, 2));
-				fgGMax = HexToInt(maxForegroundColor.Substring(3, 2));
-				fgBMax = HexToInt(maxForegroundColor.Substring(5, 2));
-			}
-			
-			highlightBackground = (minBackgroundColor != null) && (maxBackgroundColor != null);
-			if (highlightBackground)
-			{
-				if (minBackgroundColor.Length != 7)
-				{
-					throw new System.ArgumentException("minBackgroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
-				}
-				if (maxBackgroundColor.Length != 7)
-				{
-					throw new System.ArgumentException("minBackgroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
-				}
-				bgRMin = HexToInt(minBackgroundColor.Substring(1, 2));
-				bgGMin = HexToInt(minBackgroundColor.Substring(3, 2));
-				bgBMin = HexToInt(minBackgroundColor.Substring(5, 2));
-				
-				bgRMax = HexToInt(maxBackgroundColor.Substring(1, 2));
-				bgGMax = HexToInt(maxBackgroundColor.Substring(3, 2));
-				bgBMax = HexToInt(maxBackgroundColor.Substring(5, 2));
-			}
-			//        this.corpusReader = corpusReader;
-			this.maxScore = maxScore;
-			//        totalNumDocs = corpusReader.numDocs();
-		}
-		
-		public virtual string HighlightTerm(string originalText, TokenGroup tokenGroup)
-		{
-			if (tokenGroup.TotalScore == 0)
-				return originalText;
-			float score = tokenGroup.TotalScore;
-			if (score == 0)
-			{
-				return originalText;
-			}
+            {
+                if (minForegroundColor.Length != 7)
+                {
+                    throw new ArgumentException("minForegroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
+                }
+                if (maxForegroundColor.Length != 7)
+                {
+                    throw new ArgumentException("minForegroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
+                }
+                fgRMin = HexToInt(minForegroundColor.Substring(1, 2));
+                fgGMin = HexToInt(minForegroundColor.Substring(3, 2));
+                fgBMin = HexToInt(minForegroundColor.Substring(5, 2));
+                
+                fgRMax = HexToInt(maxForegroundColor.Substring(1, 2));
+                fgGMax = HexToInt(maxForegroundColor.Substring(3, 2));
+                fgBMax = HexToInt(maxForegroundColor.Substring(5, 2));
+            }
+            
+            highlightBackground = (minBackgroundColor != null) && (maxBackgroundColor != null);
+            if (highlightBackground)
+            {
+                if (minBackgroundColor.Length != 7)
+                {
+                    throw new System.ArgumentException("minBackgroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
+                }
+                if (maxBackgroundColor.Length != 7)
+                {
+                    throw new System.ArgumentException("minBackgroundColor is not 7 bytes long eg a hex " + "RGB value such as #FFFFFF");
+                }
+                bgRMin = HexToInt(minBackgroundColor.Substring(1, 2));
+                bgGMin = HexToInt(minBackgroundColor.Substring(3, 2));
+                bgBMin = HexToInt(minBackgroundColor.Substring(5, 2));
+                
+                bgRMax = HexToInt(maxBackgroundColor.Substring(1, 2));
+                bgGMax = HexToInt(maxBackgroundColor.Substring(3, 2));
+                bgBMax = HexToInt(maxBackgroundColor.Substring(5, 2));
+            }
+            //        this.corpusReader = corpusReader;
+            this.maxScore = maxScore;
+            //        totalNumDocs = corpusReader.numDocs();
+        }
+        
+        public virtual string HighlightTerm(string originalText, TokenGroup tokenGroup)
+        {
+            if (tokenGroup.TotalScore == 0)
+                return originalText;
+            float score = tokenGroup.TotalScore;
+            if (score == 0)
+            {
+                return originalText;
+            }
 
-			var sb = new System.Text.StringBuilder();
-			sb.Append("<font ");
-			if (highlightForeground)
-			{
-				sb.Append("color=\"");
-				sb.Append(GetForegroundColorString(score));
-				sb.Append("\" ");
-			}
-			if (highlightBackground)
-			{
-				sb.Append("bgcolor=\"");
-				sb.Append(GetBackgroundColorString(score));
-				sb.Append("\" ");
-			}
-			sb.Append(">");
-			sb.Append(originalText);
-			sb.Append("</font>");
-			return sb.ToString();
-		}
-		
-		protected internal virtual string GetForegroundColorString(float score)
-		{
-			int rVal = GetColorVal(fgRMin, fgRMax, score);
-			int gVal = GetColorVal(fgGMin, fgGMax, score);
-			int bVal = GetColorVal(fgBMin, fgBMax, score);
-			var sb = new System.Text.StringBuilder();
-			sb.Append("#");
-			sb.Append(IntToHex(rVal));
-			sb.Append(IntToHex(gVal));
-			sb.Append(IntToHex(bVal));
-			return sb.ToString();
-		}
-		
-		protected internal virtual string GetBackgroundColorString(float score)
-		{
-			int rVal = GetColorVal(bgRMin, bgRMax, score);
-			int gVal = GetColorVal(bgGMin, bgGMax, score);
-			int bVal = GetColorVal(bgBMin, bgBMax, score);
-			var sb = new System.Text.StringBuilder();
-			sb.Append("#");
-			sb.Append(IntToHex(rVal));
-			sb.Append(IntToHex(gVal));
-			sb.Append(IntToHex(bVal));
-			return sb.ToString();
-		}
-		
-		private int GetColorVal(int colorMin, int colorMax, float score)
-		{
-			if (colorMin == colorMax)
-			{
-				return colorMin;
-			}
-			float scale = Math.Abs(colorMin - colorMax);
-			float relScorePercent = Math.Min(maxScore, score) / maxScore;
-			float colScore = scale * relScorePercent;
-			return Math.Min(colorMin, colorMax) + (int) colScore;
-		}
-		
-		private static char[] hexDigits = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
-		
-		private static string IntToHex(int i)
-		{
-			return "" + hexDigits[(i & 0xF0) >> 4] + hexDigits[i & 0x0F];
-		}
-		
-		/// <summary> Converts a hex string into an int. Integer.parseInt(hex, 16) assumes the
-		/// input is nonnegative unless there is a preceding minus sign. This method
-		/// reads the input as twos complement instead, so if the input is 8 bytes
-		/// long, it will correctly restore a negative int produced by
-		/// Integer.toHexString() but not neccesarily one produced by
-		/// Integer.toString(x,16) since that method will produce a string like '-FF'
-		/// for negative integer values.
-		/// 
-		/// </summary>
+            var sb = new System.Text.StringBuilder();
+            sb.Append("<font ");
+            if (highlightForeground)
+            {
+                sb.Append("color=\"");
+                sb.Append(GetForegroundColorString(score));
+                sb.Append("\" ");
+            }
+            if (highlightBackground)
+            {
+                sb.Append("bgcolor=\"");
+                sb.Append(GetBackgroundColorString(score));
+                sb.Append("\" ");
+            }
+            sb.Append(">");
+            sb.Append(originalText);
+            sb.Append("</font>");
+            return sb.ToString();
+        }
+        
+        protected internal virtual string GetForegroundColorString(float score)
+        {
+            int rVal = GetColorVal(fgRMin, fgRMax, score);
+            int gVal = GetColorVal(fgGMin, fgGMax, score);
+            int bVal = GetColorVal(fgBMin, fgBMax, score);
+            var sb = new System.Text.StringBuilder();
+            sb.Append("#");
+            sb.Append(IntToHex(rVal));
+            sb.Append(IntToHex(gVal));
+            sb.Append(IntToHex(bVal));
+            return sb.ToString();
+        }
+        
+        protected internal virtual string GetBackgroundColorString(float score)
+        {
+            int rVal = GetColorVal(bgRMin, bgRMax, score);
+            int gVal = GetColorVal(bgGMin, bgGMax, score);
+            int bVal = GetColorVal(bgBMin, bgBMax, score);
+            var sb = new System.Text.StringBuilder();
+            sb.Append("#");
+            sb.Append(IntToHex(rVal));
+            sb.Append(IntToHex(gVal));
+            sb.Append(IntToHex(bVal));
+            return sb.ToString();
+        }
+        
+        private int GetColorVal(int colorMin, int colorMax, float score)
+        {
+            if (colorMin == colorMax)
+            {
+                return colorMin;
+            }
+            float scale = Math.Abs(colorMin - colorMax);
+            float relScorePercent = Math.Min(maxScore, score) / maxScore;
+            float colScore = scale * relScorePercent;
+            return Math.Min(colorMin, colorMax) + (int) colScore;
+        }
+        
+        private static char[] hexDigits = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
+        
+        private static string IntToHex(int i)
+        {
+            return "" + hexDigits[(i & 0xF0) >> 4] + hexDigits[i & 0x0F];
+        }
+        
+        /// <summary> Converts a hex string into an int. Integer.parseInt(hex, 16) assumes the
+        /// input is nonnegative unless there is a preceding minus sign. This method
+        /// reads the input as twos complement instead, so if the input is 8 bytes
+        /// long, it will correctly restore a negative int produced by
+        /// Integer.toHexString() but not neccesarily one produced by
+        /// Integer.toString(x,16) since that method will produce a string like '-FF'
+        /// for negative integer values.
+        /// 
+        /// </summary>
         /// <param name="hex">
-		/// A string in capital or lower case hex, of no more then 16
-		/// characters.
-		/// </param>
+        /// A string in capital or lower case hex, of no more then 16
+        /// characters.
+        /// </param>
         /// <exception cref="FormatException">if the string is more than 16 characters long, or if any
-		/// character is not in the set [0-9a-fA-f]</exception>
-		public static int HexToInt(string hex)
-		{
-			int len = hex.Length;
-			if (len > 16)
-				throw new FormatException();
-			
-			int l = 0;
-			for (int i = 0; i < len; i++)
-			{
-				l <<= 4;
-				int c = (int) System.Char.GetNumericValue(hex[i]);
-				if (c < 0)
-					throw new FormatException();
-				l |= c;
-			}
-			return l;
-		}
-	}
+        /// character is not in the set [0-9a-fA-f]</exception>
+        public static int HexToInt(string hex)
+        {
+            int len = hex.Length;
+            if (len > 16)
+                throw new FormatException();
+            
+            int l = 0;
+            for (int i = 0; i < len; i++)
+            {
+                l <<= 4;
+                int c = (int) System.Char.GetNumericValue(hex[i]);
+                if (c < 0)
+                    throw new FormatException();
+                l |= c;
+            }
+            return l;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/Highlighter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/Highlighter.cs b/src/contrib/Highlighter/Highlighter.cs
index 28e920e..239c0f7 100644
--- a/src/contrib/Highlighter/Highlighter.cs
+++ b/src/contrib/Highlighter/Highlighter.cs
@@ -233,10 +233,10 @@ namespace Lucene.Net.Search.Highlight
 
                     tokenGroup.AddToken(_fragmentScorer.GetTokenScore());
 
-                    //				if(lastEndOffset>maxDocBytesToAnalyze)
-                    //				{
-                    //					break;
-                    //				}
+                    //                if(lastEndOffset>maxDocBytesToAnalyze)
+                    //                {
+                    //                    break;
+                    //                }
                 }
                 currentFrag.Score = _fragmentScorer.FragmentScore;
 
@@ -256,10 +256,10 @@ namespace Lucene.Net.Search.Highlight
 
                 //Test what remains of the original text beyond the point where we stopped analyzing 
                 if (
-                    //					if there is text beyond the last token considered..
+                    //                    if there is text beyond the last token considered..
                     (lastEndOffset < text.Length)
                     &&
-                    //					and that text is not too large...
+                    //                    and that text is not too large...
                     (text.Length <= _maxDocCharsToAnalyze)
                     )
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/IEncoder.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/IEncoder.cs b/src/contrib/Highlighter/IEncoder.cs
index d8cff13..0e45d49 100644
--- a/src/contrib/Highlighter/IEncoder.cs
+++ b/src/contrib/Highlighter/IEncoder.cs
@@ -17,10 +17,10 @@
 
 namespace Lucene.Net.Search.Highlight
 {
-	/// <summary>Encodes original text. The IEncoder works with the Formatter to generate the output.</summary>
-	public interface IEncoder
-	{
-		/// <param name="originalText">The section of text being output</param>
-		string EncodeText(System.String originalText);
-	}
+    /// <summary>Encodes original text. The IEncoder works with the Formatter to generate the output.</summary>
+    public interface IEncoder
+    {
+        /// <param name="originalText">The section of text being output</param>
+        string EncodeText(System.String originalText);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/IFormatter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/IFormatter.cs b/src/contrib/Highlighter/IFormatter.cs
index 90ca8b3..2164afd 100644
--- a/src/contrib/Highlighter/IFormatter.cs
+++ b/src/contrib/Highlighter/IFormatter.cs
@@ -17,13 +17,13 @@
 
 namespace Lucene.Net.Search.Highlight
 {
-	/// <summary> Processes terms found in the original text, typically by applying some form 
-	/// of mark-up to highlight terms in HTML search results pages.</summary>
-	public interface IFormatter
-	{
-		/// <param name="originalText">The section of text being considered for markup</param>
-		/// <param name="tokenGroup">contains one or several overlapping Tokens along with
-		/// their scores and positions.</param>
-		string HighlightTerm(System.String originalText, TokenGroup tokenGroup);
-	}
+    /// <summary> Processes terms found in the original text, typically by applying some form 
+    /// of mark-up to highlight terms in HTML search results pages.</summary>
+    public interface IFormatter
+    {
+        /// <param name="originalText">The section of text being considered for markup</param>
+        /// <param name="tokenGroup">contains one or several overlapping Tokens along with
+        /// their scores and positions.</param>
+        string HighlightTerm(System.String originalText, TokenGroup tokenGroup);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/NullFragmenter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/NullFragmenter.cs b/src/contrib/Highlighter/NullFragmenter.cs
index fa73736..c86dda1 100644
--- a/src/contrib/Highlighter/NullFragmenter.cs
+++ b/src/contrib/Highlighter/NullFragmenter.cs
@@ -19,18 +19,18 @@ using Lucene.Net.Analysis;
 
 namespace Lucene.Net.Search.Highlight
 {
-	
-	/// <summary> <see cref="IFragmenter"/> implementation which does not fragment the text.
-	/// This is useful for highlighting the entire content of a document or field.
-	/// </summary>
-	public class NullFragmenter : IFragmenter
-	{
-	    public virtual void Start(string originalText, TokenStream tokenStream)
-	    { }
+    
+    /// <summary> <see cref="IFragmenter"/> implementation which does not fragment the text.
+    /// This is useful for highlighting the entire content of a document or field.
+    /// </summary>
+    public class NullFragmenter : IFragmenter
+    {
+        public virtual void Start(string originalText, TokenStream tokenStream)
+        { }
 
-	    public virtual bool IsNewFragment()
-	    {
+        public virtual bool IsNewFragment()
+        {
             return false;
-	    }
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/QueryTermExtractor.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/QueryTermExtractor.cs b/src/contrib/Highlighter/QueryTermExtractor.cs
index ba2e1e2..2a5bc7e 100644
--- a/src/contrib/Highlighter/QueryTermExtractor.cs
+++ b/src/contrib/Highlighter/QueryTermExtractor.cs
@@ -24,36 +24,36 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Search.Highlight
 {
-	
-	/// <summary> Utility class used to extract the terms used in a query, plus any weights.
-	/// This class will not find terms for MultiTermQuery, RangeQuery and PrefixQuery classes
-	/// so the caller must pass a rewritten query (see Query.rewrite) to obtain a list of 
-	/// expanded terms.</summary>
-	public static class QueryTermExtractor
-	{
-		
-		/// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
-		/// 
-		/// </summary>
-		/// <param name="query">     Query to extract term texts from
-		/// </param>
-		/// <returns> an array of the terms used in a query, plus their weights.
-		/// </returns>
-		public static WeightedTerm[] GetTerms(Query query)
-		{
-			return GetTerms(query, false);
-		}
-		
-		/// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
-		/// 
-		/// </summary>
-		/// <param name="query">Query to extract term texts from</param>
-		/// <param name="reader">used to compute IDF which can be used to a) score selected fragments better 
-		/// b) use graded highlights eg chaning intensity of font color</param>
-		/// <param name="fieldName">the field on which Inverse Document Frequency (IDF) calculations are based</param>
-		/// <returns> an array of the terms used in a query, plus their weights.</returns>
-		public static WeightedTerm[] GetIdfWeightedTerms(Query query, IndexReader reader, string fieldName)
-		{
+    
+    /// <summary> Utility class used to extract the terms used in a query, plus any weights.
+    /// This class will not find terms for MultiTermQuery, RangeQuery and PrefixQuery classes
+    /// so the caller must pass a rewritten query (see Query.rewrite) to obtain a list of 
+    /// expanded terms.</summary>
+    public static class QueryTermExtractor
+    {
+        
+        /// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
+        /// 
+        /// </summary>
+        /// <param name="query">     Query to extract term texts from
+        /// </param>
+        /// <returns> an array of the terms used in a query, plus their weights.
+        /// </returns>
+        public static WeightedTerm[] GetTerms(Query query)
+        {
+            return GetTerms(query, false);
+        }
+        
+        /// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
+        /// 
+        /// </summary>
+        /// <param name="query">Query to extract term texts from</param>
+        /// <param name="reader">used to compute IDF which can be used to a) score selected fragments better 
+        /// b) use graded highlights eg chaning intensity of font color</param>
+        /// <param name="fieldName">the field on which Inverse Document Frequency (IDF) calculations are based</param>
+        /// <returns> an array of the terms used in a query, plus their weights.</returns>
+        public static WeightedTerm[] GetIdfWeightedTerms(Query query, IndexReader reader, string fieldName)
+        {
             WeightedTerm[] terms = GetTerms(query, false, fieldName);
             int totalNumDocs = reader.NumDocs();
             foreach (WeightedTerm t in terms)
@@ -75,16 +75,16 @@ namespace Lucene.Net.Search.Highlight
                     //ignore 
                 }
             }
-		    return terms;
-		}
-		
-		/// <summary>Extracts all terms texts of a given Query into an array of WeightedTerms</summary>
-		/// <param name="query">Query to extract term texts from</param>
-		/// <param name="prohibited"><c>true</c> to extract "prohibited" terms, too </param>
-		/// <param name="fieldName"> The fieldName used to filter query terms</param>
-		/// <returns>an array of the terms used in a query, plus their weights.</returns>
-		public static WeightedTerm[] GetTerms(Query query, bool prohibited, string fieldName)
-		{
+            return terms;
+        }
+        
+        /// <summary>Extracts all terms texts of a given Query into an array of WeightedTerms</summary>
+        /// <param name="query">Query to extract term texts from</param>
+        /// <param name="prohibited"><c>true</c> to extract "prohibited" terms, too </param>
+        /// <param name="fieldName"> The fieldName used to filter query terms</param>
+        /// <returns>an array of the terms used in a query, plus their weights.</returns>
+        public static WeightedTerm[] GetTerms(Query query, bool prohibited, string fieldName)
+        {
             var terms = new HashSet<WeightedTerm>();
             if (fieldName != null)
             {
@@ -92,23 +92,23 @@ namespace Lucene.Net.Search.Highlight
             }
             GetTerms(query, terms, prohibited, fieldName);
             return terms.ToArray();
-		}
-		
-		/// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
-		/// 
-		/// </summary>
-		/// <param name="query">     Query to extract term texts from
-		/// </param>
-		/// <param name="prohibited"><c>true</c> to extract "prohibited" terms, too
-		/// </param>
-		/// <returns> an array of the terms used in a query, plus their weights.
-		/// </returns>
-		public static WeightedTerm[] GetTerms(Query query, bool prohibited)
-		{
-			return GetTerms(query, prohibited, null);
-		}
-		
-		//fieldname MUST be interned prior to this call
+        }
+        
+        /// <summary> Extracts all terms texts of a given Query into an array of WeightedTerms
+        /// 
+        /// </summary>
+        /// <param name="query">     Query to extract term texts from
+        /// </param>
+        /// <param name="prohibited"><c>true</c> to extract "prohibited" terms, too
+        /// </param>
+        /// <returns> an array of the terms used in a query, plus their weights.
+        /// </returns>
+        public static WeightedTerm[] GetTerms(Query query, bool prohibited)
+        {
+            return GetTerms(query, prohibited, null);
+        }
+        
+        //fieldname MUST be interned prior to this call
         private static void GetTerms(Query query, HashSet<WeightedTerm> terms, bool prohibited, string fieldName)
         {
             try
@@ -136,28 +136,28 @@ namespace Lucene.Net.Search.Highlight
             }
         }
 
-	    /// <summary> extractTerms is currently the only query-independent means of introspecting queries but it only reveals
-		/// a list of terms for that query - not the boosts each individual term in that query may or may not have.
-		/// "Container" queries such as BooleanQuery should be unwrapped to get at the boost info held
-		/// in each child element. 
-		/// Some discussion around this topic here:
-		/// http://www.gossamer-threads.com/lists/lucene/java-dev/34208?search_string=introspection;#34208
-		/// Unfortunately there seemed to be limited interest in requiring all Query objects to implement
-		/// something common which would allow access to child queries so what follows here are query-specific
-		/// implementations for accessing embedded query elements. 
-		/// </summary>
-		private static void  GetTermsFromBooleanQuery(BooleanQuery query, HashSet<WeightedTerm> terms, bool prohibited, string fieldName)
-		{
+        /// <summary> extractTerms is currently the only query-independent means of introspecting queries but it only reveals
+        /// a list of terms for that query - not the boosts each individual term in that query may or may not have.
+        /// "Container" queries such as BooleanQuery should be unwrapped to get at the boost info held
+        /// in each child element. 
+        /// Some discussion around this topic here:
+        /// http://www.gossamer-threads.com/lists/lucene/java-dev/34208?search_string=introspection;#34208
+        /// Unfortunately there seemed to be limited interest in requiring all Query objects to implement
+        /// something common which would allow access to child queries so what follows here are query-specific
+        /// implementations for accessing embedded query elements. 
+        /// </summary>
+        private static void  GetTermsFromBooleanQuery(BooleanQuery query, HashSet<WeightedTerm> terms, bool prohibited, string fieldName)
+        {
             BooleanClause[] queryClauses = query.GetClauses();
             for (int i = 0; i < queryClauses.Length; i++)
             {
                 if (prohibited || queryClauses[i].Occur != Occur.MUST_NOT)
                     GetTerms(queryClauses[i].Query, terms, prohibited, fieldName);
             }
-		}
+        }
         private static void GetTermsFromFilteredQuery(FilteredQuery query, HashSet<WeightedTerm> terms, bool prohibited, string fieldName)
-		{
-			GetTerms(query.Query, terms, prohibited, fieldName);
-		}
-	}
+        {
+            GetTerms(query.Query, terms, prohibited, fieldName);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/SimpleHTMLFormatter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/SimpleHTMLFormatter.cs b/src/contrib/Highlighter/SimpleHTMLFormatter.cs
index 0013f2a..ea51b05 100644
--- a/src/contrib/Highlighter/SimpleHTMLFormatter.cs
+++ b/src/contrib/Highlighter/SimpleHTMLFormatter.cs
@@ -17,47 +17,47 @@
 
 namespace Lucene.Net.Search.Highlight
 {
-	/// <summary> Simple <see cref="IFormatter"/> implementation to highlight terms with a pre and post tag</summary>
-	/// <author>  MAHarwood
-	/// 
-	/// </author>
-	public class SimpleHTMLFormatter : IFormatter
-	{
-		internal System.String preTag;
-		internal System.String postTag;
-		
-		
-		public SimpleHTMLFormatter(System.String preTag, System.String postTag)
-		{
-			this.preTag = preTag;
-			this.postTag = postTag;
-		}
-		
-		/// <summary> Default constructor uses HTML: &lt;B&gt; tags to markup terms
-		/// 
-		/// 
-		/// </summary>
-		public SimpleHTMLFormatter()
-		{
-			this.preTag = "<B>";
-			this.postTag = "</B>";
-		}
-		
-		/* (non-Javadoc)
-		* <see cref="Lucene.Net.Highlight.Formatter.highlightTerm(java.lang.String, Lucene.Net.Highlight.TokenGroup)"/>
-		*/
-		public virtual System.String HighlightTerm(System.String originalText, TokenGroup tokenGroup)
-		{
-			System.Text.StringBuilder returnBuffer;
-			if (tokenGroup.TotalScore > 0)
-			{
-				returnBuffer = new System.Text.StringBuilder();
-				returnBuffer.Append(preTag);
-				returnBuffer.Append(originalText);
-				returnBuffer.Append(postTag);
-				return returnBuffer.ToString();
-			}
-			return originalText;
-		}
-	}
+    /// <summary> Simple <see cref="IFormatter"/> implementation to highlight terms with a pre and post tag</summary>
+    /// <author>  MAHarwood
+    /// 
+    /// </author>
+    public class SimpleHTMLFormatter : IFormatter
+    {
+        internal System.String preTag;
+        internal System.String postTag;
+        
+        
+        public SimpleHTMLFormatter(System.String preTag, System.String postTag)
+        {
+            this.preTag = preTag;
+            this.postTag = postTag;
+        }
+        
+        /// <summary> Default constructor uses HTML: &lt;B&gt; tags to markup terms
+        /// 
+        /// 
+        /// </summary>
+        public SimpleHTMLFormatter()
+        {
+            this.preTag = "<B>";
+            this.postTag = "</B>";
+        }
+        
+        /* (non-Javadoc)
+        * <see cref="Lucene.Net.Highlight.Formatter.highlightTerm(java.lang.String, Lucene.Net.Highlight.TokenGroup)"/>
+        */
+        public virtual System.String HighlightTerm(System.String originalText, TokenGroup tokenGroup)
+        {
+            System.Text.StringBuilder returnBuffer;
+            if (tokenGroup.TotalScore > 0)
+            {
+                returnBuffer = new System.Text.StringBuilder();
+                returnBuffer.Append(preTag);
+                returnBuffer.Append(originalText);
+                returnBuffer.Append(postTag);
+                return returnBuffer.ToString();
+            }
+            return originalText;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/TokenSources.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/TokenSources.cs b/src/contrib/Highlighter/TokenSources.cs
index dc44bb5..745ac2a 100644
--- a/src/contrib/Highlighter/TokenSources.cs
+++ b/src/contrib/Highlighter/TokenSources.cs
@@ -144,8 +144,8 @@ namespace Lucene.Net.Search.Highlight
         /// - reanalyzing the original content - 980 milliseconds
         /// 
         /// The re-analyze timings will typically vary depending on -
-        /// 	1) The complexity of the analyzer code (timings above were using a 
-        /// 	   stemmer/lowercaser/stopword combo)
+        ///     1) The complexity of the analyzer code (timings above were using a 
+        ///        stemmer/lowercaser/stopword combo)
         ///  2) The  number of other fields (Lucene reads ALL fields off the disk 
         ///     when accessing just one document field - can cost dear!)
         ///  3) Use of compression on field storage - could be faster due to compression (less disk IO)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Highlighter/WeightedSpanTerm.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Highlighter/WeightedSpanTerm.cs b/src/contrib/Highlighter/WeightedSpanTerm.cs
index 7d94383..d89b915 100644
--- a/src/contrib/Highlighter/WeightedSpanTerm.cs
+++ b/src/contrib/Highlighter/WeightedSpanTerm.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/BooleanFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/BooleanFilter.cs b/src/contrib/Queries/BooleanFilter.cs
index e964498..de4b8fd 100644
--- a/src/contrib/Queries/BooleanFilter.cs
+++ b/src/contrib/Queries/BooleanFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/BoostingQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/BoostingQuery.cs b/src/contrib/Queries/BoostingQuery.cs
index d2716be..5d57d2c 100644
--- a/src/contrib/Queries/BoostingQuery.cs
+++ b/src/contrib/Queries/BoostingQuery.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/DuplicateFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/DuplicateFilter.cs b/src/contrib/Queries/DuplicateFilter.cs
index 274f8ab..d5f4745 100644
--- a/src/contrib/Queries/DuplicateFilter.cs
+++ b/src/contrib/Queries/DuplicateFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/FilterClause.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/FilterClause.cs b/src/contrib/Queries/FilterClause.cs
index 06f6ef4..f292914 100644
--- a/src/contrib/Queries/FilterClause.cs
+++ b/src/contrib/Queries/FilterClause.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/FuzzyLikeThisQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/FuzzyLikeThisQuery.cs b/src/contrib/Queries/FuzzyLikeThisQuery.cs
index c28d94c..09f1c8c 100644
--- a/src/contrib/Queries/FuzzyLikeThisQuery.cs
+++ b/src/contrib/Queries/FuzzyLikeThisQuery.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/Properties/AssemblyInfo.cs b/src/contrib/Queries/Properties/AssemblyInfo.cs
index fd774f2..0938b45 100644
--- a/src/contrib/Queries/Properties/AssemblyInfo.cs
+++ b/src/contrib/Queries/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/Similar/MoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/Similar/MoreLikeThis.cs b/src/contrib/Queries/Similar/MoreLikeThis.cs
index 7b5f335..62883a8 100644
--- a/src/contrib/Queries/Similar/MoreLikeThis.cs
+++ b/src/contrib/Queries/Similar/MoreLikeThis.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -780,30 +780,30 @@ namespace Lucene.Net.Search.Similar
         private void AddTermFrequencies(System.IO.TextReader r, IDictionary<string,Int> termFreqMap, System.String fieldName)
         {
             TokenStream ts = analyzer.TokenStream(fieldName, r);
-			int tokenCount=0;
-			// for every token
+            int tokenCount=0;
+            // for every token
             ITermAttribute termAtt = ts.AddAttribute<ITermAttribute>();
-			
-			while (ts.IncrementToken()) {
-				string word = termAtt.Term;
-				tokenCount++;
-				if(tokenCount>maxNumTokensParsed)
-				{
-					break;
-				}
-				if(IsNoiseWord(word)){
-					continue;
-				}
-				
-				// increment frequency
-				Int cnt = termFreqMap[word];
-				if (cnt == null) {
+            
+            while (ts.IncrementToken()) {
+                string word = termAtt.Term;
+                tokenCount++;
+                if(tokenCount>maxNumTokensParsed)
+                {
+                    break;
+                }
+                if(IsNoiseWord(word)){
+                    continue;
+                }
+                
+                // increment frequency
+                Int cnt = termFreqMap[word];
+                if (cnt == null) {
                     termFreqMap[word] = new Int();
-				}
-				else {
-					cnt.x++;
-				}
-			}
+                }
+                else {
+                    cnt.x++;
+                }
+            }
         }
 
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/Similar/MoreLikeThisQuery.cs b/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
index 7cd4681..ae071c0 100644
--- a/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
+++ b/src/contrib/Queries/Similar/MoreLikeThisQuery.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/Similar/SimilarityQueries.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/Similar/SimilarityQueries.cs b/src/contrib/Queries/Similar/SimilarityQueries.cs
index 8a1f38b..2bbd5d4 100644
--- a/src/contrib/Queries/Similar/SimilarityQueries.cs
+++ b/src/contrib/Queries/Similar/SimilarityQueries.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Queries/TermsFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Queries/TermsFilter.cs b/src/contrib/Queries/TermsFilter.cs
index 263d2f5..3426732 100644
--- a/src/contrib/Queries/TermsFilter.cs
+++ b/src/contrib/Queries/TermsFilter.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/CSharpRegexCapabilities.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/CSharpRegexCapabilities.cs b/src/contrib/Regex/CSharpRegexCapabilities.cs
index 312ec4a..21174a2 100644
--- a/src/contrib/Regex/CSharpRegexCapabilities.cs
+++ b/src/contrib/Regex/CSharpRegexCapabilities.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -19,73 +19,73 @@ using System;
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// C# Regex based implementation of <see cref="IRegexCapabilities"/>.
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java.htm</remarks>
-	public class CSharpRegexCapabilities : IRegexCapabilities, IEquatable<CSharpRegexCapabilities>
-	{
-		private System.Text.RegularExpressions.Regex _rPattern;
+    /// <summary>
+    /// C# Regex based implementation of <see cref="IRegexCapabilities"/>.
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java.htm</remarks>
+    public class CSharpRegexCapabilities : IRegexCapabilities, IEquatable<CSharpRegexCapabilities>
+    {
+        private System.Text.RegularExpressions.Regex _rPattern;
 
-		/// <summary>
-		/// Called by the constructor of <see cref="RegexTermEnum"/> allowing implementations to cache 
-		/// a compiled version of the regular expression pattern.
-		/// </summary>
-		/// <param name="pattern">regular expression pattern</param>
-		public void Compile(string pattern)
-		{
-			_rPattern = new System.Text.RegularExpressions.Regex(pattern, 
-				System.Text.RegularExpressions.RegexOptions.Compiled);
-		}
+        /// <summary>
+        /// Called by the constructor of <see cref="RegexTermEnum"/> allowing implementations to cache 
+        /// a compiled version of the regular expression pattern.
+        /// </summary>
+        /// <param name="pattern">regular expression pattern</param>
+        public void Compile(string pattern)
+        {
+            _rPattern = new System.Text.RegularExpressions.Regex(pattern, 
+                System.Text.RegularExpressions.RegexOptions.Compiled);
+        }
 
-		/// <summary>
-		/// True on match.
-		/// </summary>
-		/// <param name="s">text to match</param>
-		/// <returns>true on match</returns>
-		public bool Match(string s)
-		{
-			return _rPattern.IsMatch(s);
-		}
+        /// <summary>
+        /// True on match.
+        /// </summary>
+        /// <param name="s">text to match</param>
+        /// <returns>true on match</returns>
+        public bool Match(string s)
+        {
+            return _rPattern.IsMatch(s);
+        }
 
-		/// <summary>
-		/// A wise prefix implementation can reduce the term enumeration (and thus performance)
-		/// of RegexQuery dramatically.
-		/// </summary>
-		/// <returns>static non-regex prefix of the pattern last passed to <see cref="IRegexCapabilities.Compile"/>.
-		///   May return null</returns>
-		public string Prefix()
-		{
-			return null;
-		}
+        /// <summary>
+        /// A wise prefix implementation can reduce the term enumeration (and thus performance)
+        /// of RegexQuery dramatically.
+        /// </summary>
+        /// <returns>static non-regex prefix of the pattern last passed to <see cref="IRegexCapabilities.Compile"/>.
+        ///   May return null</returns>
+        public string Prefix()
+        {
+            return null;
+        }
 
-		/// <summary>
-		/// Indicates whether the current object is equal to another object of the same type.
-		/// </summary>
-		/// <returns>
-		/// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
-		/// </returns>
-		/// <param name="other">An object to compare with this object</param>
-		public bool Equals(CSharpRegexCapabilities other)
-		{
-			if (other == null) return false;
-			if (this == other) return true;
+        /// <summary>
+        /// Indicates whether the current object is equal to another object of the same type.
+        /// </summary>
+        /// <returns>
+        /// true if the current object is equal to the <paramref name="other"/> parameter; otherwise, false.
+        /// </returns>
+        /// <param name="other">An object to compare with this object</param>
+        public bool Equals(CSharpRegexCapabilities other)
+        {
+            if (other == null) return false;
+            if (this == other) return true;
 
-			if (_rPattern != null ? !_rPattern.Equals(other._rPattern) : other._rPattern != null)
-				return false;
+            if (_rPattern != null ? !_rPattern.Equals(other._rPattern) : other._rPattern != null)
+                return false;
 
-			return true;
-		}
+            return true;
+        }
 
-		public override bool Equals(object obj)
-		{
-			if (obj as CSharpRegexCapabilities == null) return false;
-			return Equals((CSharpRegexCapabilities) obj);
-		}
+        public override bool Equals(object obj)
+        {
+            if (obj as CSharpRegexCapabilities == null) return false;
+            return Equals((CSharpRegexCapabilities) obj);
+        }
 
-		public override int GetHashCode()
-		{
-			return (_rPattern != null ? _rPattern.GetHashCode() : 0);
-		}
-	}
+        public override int GetHashCode()
+        {
+            return (_rPattern != null ? _rPattern.GetHashCode() : 0);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/IRegexCapabilities.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/IRegexCapabilities.cs b/src/contrib/Regex/IRegexCapabilities.cs
index 64f71ea..e5225eb 100644
--- a/src/contrib/Regex/IRegexCapabilities.cs
+++ b/src/contrib/Regex/IRegexCapabilities.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -17,32 +17,32 @@
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// Defines basic operations needed by <see cref="RegexQuery"/> for a regular expression implementation.
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexCapabilities.java.htm</remarks>
-	public interface IRegexCapabilities
-	{
-		/// <summary>
-		/// Called by the constructor of <see cref="RegexTermEnum"/> allowing implementations to cache 
-		/// a compiled version of the regular expression pattern.
-		/// </summary>
-		/// <param name="pattern">regular expression pattern</param>
-		void Compile(string pattern);
+    /// <summary>
+    /// Defines basic operations needed by <see cref="RegexQuery"/> for a regular expression implementation.
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexCapabilities.java.htm</remarks>
+    public interface IRegexCapabilities
+    {
+        /// <summary>
+        /// Called by the constructor of <see cref="RegexTermEnum"/> allowing implementations to cache 
+        /// a compiled version of the regular expression pattern.
+        /// </summary>
+        /// <param name="pattern">regular expression pattern</param>
+        void Compile(string pattern);
 
-		/// <summary>
-		/// True on match.
-		/// </summary>
-		/// <param name="s">text to match</param>
-		/// <returns>true on match</returns>
-		bool Match(string s);
+        /// <summary>
+        /// True on match.
+        /// </summary>
+        /// <param name="s">text to match</param>
+        /// <returns>true on match</returns>
+        bool Match(string s);
 
-		/// <summary>
-		/// A wise prefix implementation can reduce the term enumeration (and thus performance)
-		/// of RegexQuery dramatically.
-		/// </summary>
-		/// <returns>static non-regex prefix of the pattern last passed to <see cref="Compile"/>.
-		///   May return null</returns>
-		string Prefix();
-	}
+        /// <summary>
+        /// A wise prefix implementation can reduce the term enumeration (and thus performance)
+        /// of RegexQuery dramatically.
+        /// </summary>
+        /// <returns>static non-regex prefix of the pattern last passed to <see cref="Compile"/>.
+        ///   May return null</returns>
+        string Prefix();
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/IRegexQueryCapable.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/IRegexQueryCapable.cs b/src/contrib/Regex/IRegexQueryCapable.cs
index d9692b6..b65513b 100644
--- a/src/contrib/Regex/IRegexQueryCapable.cs
+++ b/src/contrib/Regex/IRegexQueryCapable.cs
@@ -1,4 +1,4 @@
-/* 
+/* 
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -17,12 +17,12 @@
 
 namespace Contrib.Regex
 {
-	/// <summary>
-	/// Defines methods for regular expression supporting queries to use.
-	/// </summary>
-	/// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexQueryCapable.java.htm</remarks>
-	public interface IRegexQueryCapable
-	{
-	    IRegexCapabilities RegexImplementation { set; get; }
-	}
+    /// <summary>
+    /// Defines methods for regular expression supporting queries to use.
+    /// </summary>
+    /// <remarks>http://www.java2s.com/Open-Source/Java-Document/Net/lucene-connector/org/apache/lucene/search/regex/RegexQueryCapable.java.htm</remarks>
+    public interface IRegexQueryCapable
+    {
+        IRegexCapabilities RegexImplementation { set; get; }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Regex/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Regex/Properties/AssemblyInfo.cs b/src/contrib/Regex/Properties/AssemblyInfo.cs
index 36cbb93..c9ff782 100644
--- a/src/contrib/Regex/Properties/AssemblyInfo.cs
+++ b/src/contrib/Regex/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.


[02/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/SegmentReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentReader.cs b/src/core/Index/SegmentReader.cs
index 8cc5d3b..e233718 100644
--- a/src/core/Index/SegmentReader.cs
+++ b/src/core/Index/SegmentReader.cs
@@ -30,126 +30,126 @@ using DefaultSimilarity = Lucene.Net.Search.DefaultSimilarity;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <version>  $Id 
-	/// </version>
-	/// <summary> <p/><b>NOTE:</b> This API is new and still experimental
-	/// (subject to change suddenly in the next release)<p/>
-	/// </summary>
-	public class SegmentReader : IndexReader
-	{
-		public SegmentReader()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			fieldsReaderLocal = new FieldsReaderLocal(this);
-		}
-		protected internal bool readOnly;
-		
-		private SegmentInfo si;
-		private int readBufferSize;
-		
-		internal CloseableThreadLocal<FieldsReader> fieldsReaderLocal;
+    
+    /// <version>  $Id 
+    /// </version>
+    /// <summary> <p/><b>NOTE:</b> This API is new and still experimental
+    /// (subject to change suddenly in the next release)<p/>
+    /// </summary>
+    public class SegmentReader : IndexReader
+    {
+        public SegmentReader()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            fieldsReaderLocal = new FieldsReaderLocal(this);
+        }
+        protected internal bool readOnly;
+        
+        private SegmentInfo si;
+        private int readBufferSize;
+        
+        internal CloseableThreadLocal<FieldsReader> fieldsReaderLocal;
         internal CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
-		
-		internal BitVector deletedDocs = null;
-		internal Ref deletedDocsRef = null;
-		private bool deletedDocsDirty = false;
-		private bool normsDirty = false;
-		private int pendingDeleteCount;
-		
-		private bool rollbackHasChanges = false;
-		private bool rollbackDeletedDocsDirty = false;
-		private bool rollbackNormsDirty = false;
+        
+        internal BitVector deletedDocs = null;
+        internal Ref deletedDocsRef = null;
+        private bool deletedDocsDirty = false;
+        private bool normsDirty = false;
+        private int pendingDeleteCount;
+        
+        private bool rollbackHasChanges = false;
+        private bool rollbackDeletedDocsDirty = false;
+        private bool rollbackNormsDirty = false;
         private SegmentInfo rollbackSegmentInfo;
-		private int rollbackPendingDeleteCount;
-		
-		// optionally used for the .nrm file shared by multiple norms
-		private IndexInput singleNormStream;
-		private Ref singleNormRef;
-		
-		internal CoreReaders core;
-		
-		// Holds core readers that are shared (unchanged) when
-		// SegmentReader is cloned or reopened
-		public /*internal*/ sealed class CoreReaders
-		{
-			
-			// Counts how many other reader share the core objects
-			// (freqStream, proxStream, tis, etc.) of this reader;
-			// when coreRef drops to 0, these core objects may be
-			// closed.  A given insance of SegmentReader may be
-			// closed, even those it shares core objects with other
-			// SegmentReaders:
-			private readonly Ref ref_Renamed = new Ref();
-			
-			internal System.String segment;
-			internal FieldInfos fieldInfos;
-			internal IndexInput freqStream;
-			internal IndexInput proxStream;
-			internal TermInfosReader tisNoIndex;
-			
-			internal Directory dir;
-			internal Directory cfsDir;
-			internal int readBufferSize;
-			internal int termsIndexDivisor;
+        private int rollbackPendingDeleteCount;
+        
+        // optionally used for the .nrm file shared by multiple norms
+        private IndexInput singleNormStream;
+        private Ref singleNormRef;
+        
+        internal CoreReaders core;
+        
+        // Holds core readers that are shared (unchanged) when
+        // SegmentReader is cloned or reopened
+        public /*internal*/ sealed class CoreReaders
+        {
+            
+            // Counts how many other reader share the core objects
+            // (freqStream, proxStream, tis, etc.) of this reader;
+            // when coreRef drops to 0, these core objects may be
+            // closed.  A given insance of SegmentReader may be
+            // closed, even those it shares core objects with other
+            // SegmentReaders:
+            private readonly Ref ref_Renamed = new Ref();
+            
+            internal System.String segment;
+            internal FieldInfos fieldInfos;
+            internal IndexInput freqStream;
+            internal IndexInput proxStream;
+            internal TermInfosReader tisNoIndex;
+            
+            internal Directory dir;
+            internal Directory cfsDir;
+            internal int readBufferSize;
+            internal int termsIndexDivisor;
 
             internal SegmentReader origInstance;
-			
-			internal TermInfosReader tis;
-			internal FieldsReader fieldsReaderOrig;
-			internal TermVectorsReader termVectorsReaderOrig;
-			internal CompoundFileReader cfsReader;
-			internal CompoundFileReader storeCFSReader;
+            
+            internal TermInfosReader tis;
+            internal FieldsReader fieldsReaderOrig;
+            internal TermVectorsReader termVectorsReaderOrig;
+            internal CompoundFileReader cfsReader;
+            internal CompoundFileReader storeCFSReader;
 
             internal CoreReaders(SegmentReader origInstance, Directory dir, SegmentInfo si, int readBufferSize, int termsIndexDivisor)
-			{
-				segment = si.name;
-				this.readBufferSize = readBufferSize;
-				this.dir = dir;
-				
-				bool success = false;
-				
-				try
-				{
-					Directory dir0 = dir;
-					if (si.GetUseCompoundFile())
-					{
-						cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
-						dir0 = cfsReader;
-					}
-					cfsDir = dir0;
-					
-					fieldInfos = new FieldInfos(cfsDir, segment + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
-					
-					this.termsIndexDivisor = termsIndexDivisor;
-					var reader = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize, termsIndexDivisor);
-					if (termsIndexDivisor == - 1)
-					{
-						tisNoIndex = reader;
-					}
-					else
-					{
-						tis = reader;
-						tisNoIndex = null;
-					}
-					
-					// make sure that all index files have been read or are kept open
-					// so that if an index update removes them we'll still have them
-					freqStream = cfsDir.OpenInput(segment + "." + IndexFileNames.FREQ_EXTENSION, readBufferSize);
-					
-					proxStream = fieldInfos.HasProx() ? cfsDir.OpenInput(segment + "." + IndexFileNames.PROX_EXTENSION, readBufferSize) : null;
-					success = true;
-				}
-				finally
-				{
-					if (!success)
-					{
-						DecRef();
-					}
-				}
+            {
+                segment = si.name;
+                this.readBufferSize = readBufferSize;
+                this.dir = dir;
+                
+                bool success = false;
+                
+                try
+                {
+                    Directory dir0 = dir;
+                    if (si.GetUseCompoundFile())
+                    {
+                        cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                        dir0 = cfsReader;
+                    }
+                    cfsDir = dir0;
+                    
+                    fieldInfos = new FieldInfos(cfsDir, segment + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
+                    
+                    this.termsIndexDivisor = termsIndexDivisor;
+                    var reader = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize, termsIndexDivisor);
+                    if (termsIndexDivisor == - 1)
+                    {
+                        tisNoIndex = reader;
+                    }
+                    else
+                    {
+                        tis = reader;
+                        tisNoIndex = null;
+                    }
+                    
+                    // make sure that all index files have been read or are kept open
+                    // so that if an index update removes them we'll still have them
+                    freqStream = cfsDir.OpenInput(segment + "." + IndexFileNames.FREQ_EXTENSION, readBufferSize);
+                    
+                    proxStream = fieldInfos.HasProx() ? cfsDir.OpenInput(segment + "." + IndexFileNames.PROX_EXTENSION, readBufferSize) : null;
+                    success = true;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        DecRef();
+                    }
+                }
 
 
                 // Must assign this at the end -- if we hit an
@@ -157,522 +157,522 @@ namespace Lucene.Net.Index
                 // purge the FieldCache (will hit NPE because core is
                 // not assigned yet).
                 this.origInstance = origInstance;
-			}
-			
-			internal TermVectorsReader GetTermVectorsReaderOrig()
-			{
-				lock (this)
-				{
-					return termVectorsReaderOrig;
-				}
-			}
-			
-			internal FieldsReader GetFieldsReaderOrig()
-			{
-				lock (this)
-				{
-					return fieldsReaderOrig;
-				}
-			}
-			
-			internal void  IncRef()
-			{
-				lock (this)
-				{
-					ref_Renamed.IncRef();
-				}
-			}
-			
-			internal Directory GetCFSReader()
-			{
-				lock (this)
-				{
-					return cfsReader;
-				}
-			}
-			
-			internal TermInfosReader GetTermsReader()
-			{
-				lock (this)
-				{
-					if (tis != null)
-					{
-						return tis;
-					}
-					else
-					{
-						return tisNoIndex;
-					}
-				}
-			}
-			
-			internal bool TermsIndexIsLoaded()
-			{
-				lock (this)
-				{
-					return tis != null;
-				}
-			}
-			
-			// NOTE: only called from IndexWriter when a near
-			// real-time reader is opened, or applyDeletes is run,
-			// sharing a segment that's still being merged.  This
-			// method is not fully thread safe, and relies on the
-			// synchronization in IndexWriter
-			internal void  LoadTermsIndex(SegmentInfo si, int termsIndexDivisor)
-			{
-				lock (this)
-				{
-					if (tis == null)
-					{
-						Directory dir0;
-						if (si.GetUseCompoundFile())
-						{
-							// In some cases, we were originally opened when CFS
-							// was not used, but then we are asked to open the
-							// terms reader with index, the segment has switched
-							// to CFS
-							if (cfsReader == null)
-							{
-								cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
-							}
-							dir0 = cfsReader;
-						}
-						else
-						{
-							dir0 = dir;
-						}
-						
-						tis = new TermInfosReader(dir0, segment, fieldInfos, readBufferSize, termsIndexDivisor);
-					}
-				}
-			}
-			
-			internal void  DecRef()
-			{
-				lock (this)
-				{
-					
-					if (ref_Renamed.DecRef() == 0)
-					{
-						
-						// close everything, nothing is shared anymore with other readers
-						if (tis != null)
-						{
-							tis.Dispose();
-							// null so if an app hangs on to us we still free most ram
-							tis = null;
-						}
-						
-						if (tisNoIndex != null)
-						{
-							tisNoIndex.Dispose();
-						}
-						
-						if (freqStream != null)
-						{
-							freqStream.Close();
-						}
-						
-						if (proxStream != null)
-						{
-							proxStream.Close();
-						}
-						
-						if (termVectorsReaderOrig != null)
-						{
-							termVectorsReaderOrig.Dispose();
-						}
-						
-						if (fieldsReaderOrig != null)
-						{
+            }
+            
+            internal TermVectorsReader GetTermVectorsReaderOrig()
+            {
+                lock (this)
+                {
+                    return termVectorsReaderOrig;
+                }
+            }
+            
+            internal FieldsReader GetFieldsReaderOrig()
+            {
+                lock (this)
+                {
+                    return fieldsReaderOrig;
+                }
+            }
+            
+            internal void  IncRef()
+            {
+                lock (this)
+                {
+                    ref_Renamed.IncRef();
+                }
+            }
+            
+            internal Directory GetCFSReader()
+            {
+                lock (this)
+                {
+                    return cfsReader;
+                }
+            }
+            
+            internal TermInfosReader GetTermsReader()
+            {
+                lock (this)
+                {
+                    if (tis != null)
+                    {
+                        return tis;
+                    }
+                    else
+                    {
+                        return tisNoIndex;
+                    }
+                }
+            }
+            
+            internal bool TermsIndexIsLoaded()
+            {
+                lock (this)
+                {
+                    return tis != null;
+                }
+            }
+            
+            // NOTE: only called from IndexWriter when a near
+            // real-time reader is opened, or applyDeletes is run,
+            // sharing a segment that's still being merged.  This
+            // method is not fully thread safe, and relies on the
+            // synchronization in IndexWriter
+            internal void  LoadTermsIndex(SegmentInfo si, int termsIndexDivisor)
+            {
+                lock (this)
+                {
+                    if (tis == null)
+                    {
+                        Directory dir0;
+                        if (si.GetUseCompoundFile())
+                        {
+                            // In some cases, we were originally opened when CFS
+                            // was not used, but then we are asked to open the
+                            // terms reader with index, the segment has switched
+                            // to CFS
+                            if (cfsReader == null)
+                            {
+                                cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                            }
+                            dir0 = cfsReader;
+                        }
+                        else
+                        {
+                            dir0 = dir;
+                        }
+                        
+                        tis = new TermInfosReader(dir0, segment, fieldInfos, readBufferSize, termsIndexDivisor);
+                    }
+                }
+            }
+            
+            internal void  DecRef()
+            {
+                lock (this)
+                {
+                    
+                    if (ref_Renamed.DecRef() == 0)
+                    {
+                        
+                        // close everything, nothing is shared anymore with other readers
+                        if (tis != null)
+                        {
+                            tis.Dispose();
+                            // null so if an app hangs on to us we still free most ram
+                            tis = null;
+                        }
+                        
+                        if (tisNoIndex != null)
+                        {
+                            tisNoIndex.Dispose();
+                        }
+                        
+                        if (freqStream != null)
+                        {
+                            freqStream.Close();
+                        }
+                        
+                        if (proxStream != null)
+                        {
+                            proxStream.Close();
+                        }
+                        
+                        if (termVectorsReaderOrig != null)
+                        {
+                            termVectorsReaderOrig.Dispose();
+                        }
+                        
+                        if (fieldsReaderOrig != null)
+                        {
                             fieldsReaderOrig.Dispose();
-						}
-						
-						if (cfsReader != null)
-						{
-							cfsReader.Close();
-						}
-						
-						if (storeCFSReader != null)
-						{
-							storeCFSReader.Close();
-						}
+                        }
+                        
+                        if (cfsReader != null)
+                        {
+                            cfsReader.Close();
+                        }
+                        
+                        if (storeCFSReader != null)
+                        {
+                            storeCFSReader.Close();
+                        }
 
                         // Force FieldCache to evict our entries at this point
                         if (origInstance != null)
                         {
                             Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(origInstance);
                         }
-					}
-				}
-			}
-			
-			internal void  OpenDocStores(SegmentInfo si)
-			{
-				lock (this)
-				{
-					
-					System.Diagnostics.Debug.Assert(si.name.Equals(segment));
-					
-					if (fieldsReaderOrig == null)
-					{
-						Directory storeDir;
-						if (si.DocStoreOffset != - 1)
-						{
-							if (si.DocStoreIsCompoundFile)
-							{
-								System.Diagnostics.Debug.Assert(storeCFSReader == null);
-								storeCFSReader = new CompoundFileReader(dir, si.DocStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION, readBufferSize);
-								storeDir = storeCFSReader;
-								System.Diagnostics.Debug.Assert(storeDir != null);
-							}
-							else
-							{
-								storeDir = dir;
-								System.Diagnostics.Debug.Assert(storeDir != null);
-							}
-						}
-						else if (si.GetUseCompoundFile())
-						{
-							// In some cases, we were originally opened when CFS
-							// was not used, but then we are asked to open doc
-							// stores after the segment has switched to CFS
-							if (cfsReader == null)
-							{
-								cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
-							}
-							storeDir = cfsReader;
-							System.Diagnostics.Debug.Assert(storeDir != null);
-						}
-						else
-						{
-							storeDir = dir;
-							System.Diagnostics.Debug.Assert(storeDir != null);
-						}
+                    }
+                }
+            }
+            
+            internal void  OpenDocStores(SegmentInfo si)
+            {
+                lock (this)
+                {
+                    
+                    System.Diagnostics.Debug.Assert(si.name.Equals(segment));
+                    
+                    if (fieldsReaderOrig == null)
+                    {
+                        Directory storeDir;
+                        if (si.DocStoreOffset != - 1)
+                        {
+                            if (si.DocStoreIsCompoundFile)
+                            {
+                                System.Diagnostics.Debug.Assert(storeCFSReader == null);
+                                storeCFSReader = new CompoundFileReader(dir, si.DocStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION, readBufferSize);
+                                storeDir = storeCFSReader;
+                                System.Diagnostics.Debug.Assert(storeDir != null);
+                            }
+                            else
+                            {
+                                storeDir = dir;
+                                System.Diagnostics.Debug.Assert(storeDir != null);
+                            }
+                        }
+                        else if (si.GetUseCompoundFile())
+                        {
+                            // In some cases, we were originally opened when CFS
+                            // was not used, but then we are asked to open doc
+                            // stores after the segment has switched to CFS
+                            if (cfsReader == null)
+                            {
+                                cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                            }
+                            storeDir = cfsReader;
+                            System.Diagnostics.Debug.Assert(storeDir != null);
+                        }
+                        else
+                        {
+                            storeDir = dir;
+                            System.Diagnostics.Debug.Assert(storeDir != null);
+                        }
 
-						string storesSegment = si.DocStoreOffset != - 1 ? si.DocStoreSegment : segment;
-						
-						fieldsReaderOrig = new FieldsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
-						
-						// Verify two sources of "maxDoc" agree:
-						if (si.DocStoreOffset == - 1 && fieldsReaderOrig.Size() != si.docCount)
-						{
-							throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.Size() + " but segmentInfo shows " + si.docCount);
-						}
-						
-						if (fieldInfos.HasVectors())
-						{
-							// open term vector files only as needed
-							termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
-						}
-					}
-				}
-			}
+                        string storesSegment = si.DocStoreOffset != - 1 ? si.DocStoreSegment : segment;
+                        
+                        fieldsReaderOrig = new FieldsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
+                        
+                        // Verify two sources of "maxDoc" agree:
+                        if (si.DocStoreOffset == - 1 && fieldsReaderOrig.Size() != si.docCount)
+                        {
+                            throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.Size() + " but segmentInfo shows " + si.docCount);
+                        }
+                        
+                        if (fieldInfos.HasVectors())
+                        {
+                            // open term vector files only as needed
+                            termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.DocStoreOffset, si.docCount);
+                        }
+                    }
+                }
+            }
 
             public FieldInfos fieldInfos_ForNUnit
             {
                 get { return fieldInfos; }
             }
-		}
-		
-		/// <summary> Sets the initial value </summary>
-		private class FieldsReaderLocal : CloseableThreadLocal<FieldsReader>
-		{
-			public FieldsReaderLocal(SegmentReader enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(SegmentReader enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private SegmentReader enclosingInstance;
-			public SegmentReader Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			public /*protected internal*/ override FieldsReader InitialValue()
-			{
-				return (FieldsReader) Enclosing_Instance.core.GetFieldsReaderOrig().Clone();
-			}
-		}
-		
-		public /*internal*/ class Ref
-		{
-			private int refCount = 1;
-			
-			public override System.String ToString()
-			{
-				return "refcount: " + refCount;
-			}
-			
-			public virtual int RefCount()
-			{
-				lock (this)
-				{
-					return refCount;
-				}
-			}
-			
-			public virtual int IncRef()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0);
-					refCount++;
-					return refCount;
-				}
-			}
-			
-			public virtual int DecRef()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0);
-					refCount--;
-					return refCount;
-				}
-			}
-		}
-		
-		/// <summary> Byte[] referencing is used because a new norm object needs 
-		/// to be created for each clone, and the byte array is all 
-		/// that is needed for sharing between cloned readers.  The 
-		/// current norm referencing is for sharing between readers 
-		/// whereas the byte[] referencing is for copy on write which 
-		/// is independent of reader references (i.e. incRef, decRef).
-		/// </summary>
-		
-		public /*internal*/ sealed class Norm : System.ICloneable
-		{
-			private void  InitBlock(SegmentReader enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private SegmentReader enclosingInstance;
-			public SegmentReader Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal /*private*/ int refCount = 1;
-			
-			// If this instance is a clone, the originalNorm
-			// references the Norm that has a real open IndexInput:
-			private Norm origNorm;
-			
-			private IndexInput in_Renamed;
-			private readonly long normSeek;
-			
-			// null until bytes is set
-			private Ref bytesRef;
-			internal /*private*/ byte[] bytes;
-			internal /*private*/ bool dirty;
-			internal /*private*/ int number;
-			internal /*private*/ bool rollbackDirty;
-			
-			public Norm(SegmentReader enclosingInstance, IndexInput in_Renamed, int number, long normSeek)
-			{
-				InitBlock(enclosingInstance);
-				this.in_Renamed = in_Renamed;
-				this.number = number;
-				this.normSeek = normSeek;
-			}
-			
-			public void  IncRef()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
-					refCount++;
-				}
-			}
-			
-			private void  CloseInput()
-			{
-				if (in_Renamed != null)
-				{
-					if (in_Renamed != Enclosing_Instance.singleNormStream)
-					{
-						// It's private to us -- just close it
-						in_Renamed.Dispose();
-					}
-					else
-					{
-						// We are sharing this with others -- decRef and
-						// maybe close the shared norm stream
-						if (Enclosing_Instance.singleNormRef.DecRef() == 0)
-						{
-							Enclosing_Instance.singleNormStream.Dispose();
-							Enclosing_Instance.singleNormStream = null;
-						}
-					}
-					
-					in_Renamed = null;
-				}
-			}
-			
-			public void  DecRef()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
-					
-					if (--refCount == 0)
-					{
-						if (origNorm != null)
-						{
-							origNorm.DecRef();
-							origNorm = null;
-						}
-						else
-						{
-							CloseInput();
-						}
-						
-						if (bytes != null)
-						{
-							System.Diagnostics.Debug.Assert(bytesRef != null);
-							bytesRef.DecRef();
-							bytes = null;
-							bytesRef = null;
-						}
-						else
-						{
-							System.Diagnostics.Debug.Assert(bytesRef == null);
-						}
-					}
-				}
-			}
-			
-			// Load bytes but do not cache them if they were not
-			// already cached
-			public void  Bytes(byte[] bytesOut, int offset, int len)
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
-					if (bytes != null)
-					{
-						// Already cached -- copy from cache:
-						System.Diagnostics.Debug.Assert(len <= Enclosing_Instance.MaxDoc);
-						Array.Copy(bytes, 0, bytesOut, offset, len);
-					}
-					else
-					{
-						// Not cached
-						if (origNorm != null)
-						{
-							// Ask origNorm to load
-							origNorm.Bytes(bytesOut, offset, len);
-						}
-						else
-						{
-							// We are orig -- read ourselves from disk:
-							lock (in_Renamed)
-							{
-								in_Renamed.Seek(normSeek);
-								in_Renamed.ReadBytes(bytesOut, offset, len, false);
-							}
-						}
-					}
-				}
-			}
-			
-			// Load & cache full bytes array.  Returns bytes.
-			public byte[] Bytes()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
-					if (bytes == null)
-					{
-						// value not yet read
-						System.Diagnostics.Debug.Assert(bytesRef == null);
-						if (origNorm != null)
-						{
-							// Ask origNorm to load so that for a series of
-							// reopened readers we share a single read-only
-							// byte[]
-							bytes = origNorm.Bytes();
-							bytesRef = origNorm.bytesRef;
-							bytesRef.IncRef();
-							
-							// Once we've loaded the bytes we no longer need
-							// origNorm:
-							origNorm.DecRef();
-							origNorm = null;
-						}
-						else
-						{
-							// We are the origNorm, so load the bytes for real
-							// ourself:
-							int count = Enclosing_Instance.MaxDoc;
-							bytes = new byte[count];
-							
-							// Since we are orig, in must not be null
-							System.Diagnostics.Debug.Assert(in_Renamed != null);
-							
-							// Read from disk.
-							lock (in_Renamed)
-							{
-								in_Renamed.Seek(normSeek);
-								in_Renamed.ReadBytes(bytes, 0, count, false);
-							}
-							
-							bytesRef = new Ref();
-							CloseInput();
-						}
-					}
-					
-					return bytes;
-				}
-			}
-			
-			// Only for testing
-			public /*internal*/ Ref BytesRef()
-			{
-				return bytesRef;
-			}
-			
-			// Called if we intend to change a norm value.  We make a
-			// private copy of bytes if it's shared with others:
-			public byte[] CopyOnWrite()
-			{
-				lock (this)
-				{
-					System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
-					Bytes();
-					System.Diagnostics.Debug.Assert(bytes != null);
-					System.Diagnostics.Debug.Assert(bytesRef != null);
-					if (bytesRef.RefCount() > 1)
-					{
-						// I cannot be the origNorm for another norm
-						// instance if I'm being changed.  Ie, only the
-						// "head Norm" can be changed:
-						System.Diagnostics.Debug.Assert(refCount == 1);
-						Ref oldRef = bytesRef;
-						bytes = Enclosing_Instance.CloneNormBytes(bytes);
-						bytesRef = new Ref();
-						oldRef.DecRef();
-					}
-					dirty = true;
-					return bytes;
-				}
-			}
-			
-			// Returns a copy of this Norm instance that shares
-			// IndexInput & bytes with the original one
-			public System.Object Clone()
-			{
+        }
+        
+        /// <summary> Sets the initial value </summary>
+        private class FieldsReaderLocal : CloseableThreadLocal<FieldsReader>
+        {
+            public FieldsReaderLocal(SegmentReader enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(SegmentReader enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private SegmentReader enclosingInstance;
+            public SegmentReader Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            public /*protected internal*/ override FieldsReader InitialValue()
+            {
+                return (FieldsReader) Enclosing_Instance.core.GetFieldsReaderOrig().Clone();
+            }
+        }
+        
+        public /*internal*/ class Ref
+        {
+            private int refCount = 1;
+            
+            public override System.String ToString()
+            {
+                return "refcount: " + refCount;
+            }
+            
+            public virtual int RefCount()
+            {
+                lock (this)
+                {
+                    return refCount;
+                }
+            }
+            
+            public virtual int IncRef()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0);
+                    refCount++;
+                    return refCount;
+                }
+            }
+            
+            public virtual int DecRef()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0);
+                    refCount--;
+                    return refCount;
+                }
+            }
+        }
+        
+        /// <summary> Byte[] referencing is used because a new norm object needs 
+        /// to be created for each clone, and the byte array is all 
+        /// that is needed for sharing between cloned readers.  The 
+        /// current norm referencing is for sharing between readers 
+        /// whereas the byte[] referencing is for copy on write which 
+        /// is independent of reader references (i.e. incRef, decRef).
+        /// </summary>
+        
+        public /*internal*/ sealed class Norm : System.ICloneable
+        {
+            private void  InitBlock(SegmentReader enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private SegmentReader enclosingInstance;
+            public SegmentReader Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal /*private*/ int refCount = 1;
+            
+            // If this instance is a clone, the originalNorm
+            // references the Norm that has a real open IndexInput:
+            private Norm origNorm;
+            
+            private IndexInput in_Renamed;
+            private readonly long normSeek;
+            
+            // null until bytes is set
+            private Ref bytesRef;
+            internal /*private*/ byte[] bytes;
+            internal /*private*/ bool dirty;
+            internal /*private*/ int number;
+            internal /*private*/ bool rollbackDirty;
+            
+            public Norm(SegmentReader enclosingInstance, IndexInput in_Renamed, int number, long normSeek)
+            {
+                InitBlock(enclosingInstance);
+                this.in_Renamed = in_Renamed;
+                this.number = number;
+                this.normSeek = normSeek;
+            }
+            
+            public void  IncRef()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                    refCount++;
+                }
+            }
+            
+            private void  CloseInput()
+            {
+                if (in_Renamed != null)
+                {
+                    if (in_Renamed != Enclosing_Instance.singleNormStream)
+                    {
+                        // It's private to us -- just close it
+                        in_Renamed.Dispose();
+                    }
+                    else
+                    {
+                        // We are sharing this with others -- decRef and
+                        // maybe close the shared norm stream
+                        if (Enclosing_Instance.singleNormRef.DecRef() == 0)
+                        {
+                            Enclosing_Instance.singleNormStream.Dispose();
+                            Enclosing_Instance.singleNormStream = null;
+                        }
+                    }
+                    
+                    in_Renamed = null;
+                }
+            }
+            
+            public void  DecRef()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                    
+                    if (--refCount == 0)
+                    {
+                        if (origNorm != null)
+                        {
+                            origNorm.DecRef();
+                            origNorm = null;
+                        }
+                        else
+                        {
+                            CloseInput();
+                        }
+                        
+                        if (bytes != null)
+                        {
+                            System.Diagnostics.Debug.Assert(bytesRef != null);
+                            bytesRef.DecRef();
+                            bytes = null;
+                            bytesRef = null;
+                        }
+                        else
+                        {
+                            System.Diagnostics.Debug.Assert(bytesRef == null);
+                        }
+                    }
+                }
+            }
+            
+            // Load bytes but do not cache them if they were not
+            // already cached
+            public void  Bytes(byte[] bytesOut, int offset, int len)
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                    if (bytes != null)
+                    {
+                        // Already cached -- copy from cache:
+                        System.Diagnostics.Debug.Assert(len <= Enclosing_Instance.MaxDoc);
+                        Array.Copy(bytes, 0, bytesOut, offset, len);
+                    }
+                    else
+                    {
+                        // Not cached
+                        if (origNorm != null)
+                        {
+                            // Ask origNorm to load
+                            origNorm.Bytes(bytesOut, offset, len);
+                        }
+                        else
+                        {
+                            // We are orig -- read ourselves from disk:
+                            lock (in_Renamed)
+                            {
+                                in_Renamed.Seek(normSeek);
+                                in_Renamed.ReadBytes(bytesOut, offset, len, false);
+                            }
+                        }
+                    }
+                }
+            }
+            
+            // Load & cache full bytes array.  Returns bytes.
+            public byte[] Bytes()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                    if (bytes == null)
+                    {
+                        // value not yet read
+                        System.Diagnostics.Debug.Assert(bytesRef == null);
+                        if (origNorm != null)
+                        {
+                            // Ask origNorm to load so that for a series of
+                            // reopened readers we share a single read-only
+                            // byte[]
+                            bytes = origNorm.Bytes();
+                            bytesRef = origNorm.bytesRef;
+                            bytesRef.IncRef();
+                            
+                            // Once we've loaded the bytes we no longer need
+                            // origNorm:
+                            origNorm.DecRef();
+                            origNorm = null;
+                        }
+                        else
+                        {
+                            // We are the origNorm, so load the bytes for real
+                            // ourself:
+                            int count = Enclosing_Instance.MaxDoc;
+                            bytes = new byte[count];
+                            
+                            // Since we are orig, in must not be null
+                            System.Diagnostics.Debug.Assert(in_Renamed != null);
+                            
+                            // Read from disk.
+                            lock (in_Renamed)
+                            {
+                                in_Renamed.Seek(normSeek);
+                                in_Renamed.ReadBytes(bytes, 0, count, false);
+                            }
+                            
+                            bytesRef = new Ref();
+                            CloseInput();
+                        }
+                    }
+                    
+                    return bytes;
+                }
+            }
+            
+            // Only for testing
+            public /*internal*/ Ref BytesRef()
+            {
+                return bytesRef;
+            }
+            
+            // Called if we intend to change a norm value.  We make a
+            // private copy of bytes if it's shared with others:
+            public byte[] CopyOnWrite()
+            {
+                lock (this)
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                    Bytes();
+                    System.Diagnostics.Debug.Assert(bytes != null);
+                    System.Diagnostics.Debug.Assert(bytesRef != null);
+                    if (bytesRef.RefCount() > 1)
+                    {
+                        // I cannot be the origNorm for another norm
+                        // instance if I'm being changed.  Ie, only the
+                        // "head Norm" can be changed:
+                        System.Diagnostics.Debug.Assert(refCount == 1);
+                        Ref oldRef = bytesRef;
+                        bytes = Enclosing_Instance.CloneNormBytes(bytes);
+                        bytesRef = new Ref();
+                        oldRef.DecRef();
+                    }
+                    dirty = true;
+                    return bytes;
+                }
+            }
+            
+            // Returns a copy of this Norm instance that shares
+            // IndexInput & bytes with the original one
+            public System.Object Clone()
+            {
                 lock (this) //LUCENENET-375
                 {
                     System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0));
@@ -713,30 +713,30 @@ namespace Lucene.Net.Index
 
                     return clone;
                 }
-			}
-			
-			// Flush all pending changes to the next generation
-			// separate norms file.
-			public void  ReWrite(SegmentInfo si)
-			{
-				System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0), "refCount=" + refCount + " origNorm=" + origNorm);
-				
-				// NOTE: norms are re-written in regular directory, not cfs
-				si.AdvanceNormGen(this.number);
-				string normFileName = si.GetNormFileName(this.number);
+            }
+            
+            // Flush all pending changes to the next generation
+            // separate norms file.
+            public void  ReWrite(SegmentInfo si)
+            {
+                System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0), "refCount=" + refCount + " origNorm=" + origNorm);
+                
+                // NOTE: norms are re-written in regular directory, not cfs
+                si.AdvanceNormGen(this.number);
+                string normFileName = si.GetNormFileName(this.number);
                 IndexOutput @out = enclosingInstance.Directory().CreateOutput(normFileName);
                 bool success = false;
-				try
-				{
-					try {
+                try
+                {
+                    try {
                         @out.WriteBytes(bytes, enclosingInstance.MaxDoc);
                     } finally {
                         @out.Close();
                     }
                     success = true;
-				}
-				finally
-				{
+                }
+                finally
+                {
                     if (!success)
                     {
                         try
@@ -749,62 +749,62 @@ namespace Lucene.Net.Index
                             // original exception
                         }
                     }
-				}
-				this.dirty = false;
-			}
-		}
-		
-		internal System.Collections.Generic.IDictionary<string, Norm> norms = new HashMap<string, Norm>();
-		
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public static SegmentReader Get(bool readOnly, SegmentInfo si, int termInfosIndexDivisor)
-		{
-			return Get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
-		}
-		
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public static SegmentReader Get(bool readOnly, Directory dir, SegmentInfo si, int readBufferSize, bool doOpenStores, int termInfosIndexDivisor)
-		{
-			SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
-			instance.readOnly = readOnly;
-			instance.si = si;
-			instance.readBufferSize = readBufferSize;
-			
-			bool success = false;
-			
-			try
-			{
-				instance.core = new CoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
-				if (doOpenStores)
-				{
-					instance.core.OpenDocStores(si);
-				}
-				instance.LoadDeletedDocs();
-				instance.OpenNorms(instance.core.cfsDir, readBufferSize);
-				success = true;
-			}
-			finally
-			{
-				
-				// With lock-less commits, it's entirely possible (and
-				// fine) to hit a FileNotFound exception above.  In
-				// this case, we want to explicitly close any subset
-				// of things that were opened so that we don't have to
-				// wait for a GC to do so.
-				if (!success)
-				{
-					instance.DoClose();
-				}
-			}
-			return instance;
-		}
-		
-		internal virtual void  OpenDocStores()
-		{
-			core.OpenDocStores(si);
-		}
+                }
+                this.dirty = false;
+            }
+        }
+        
+        internal System.Collections.Generic.IDictionary<string, Norm> norms = new HashMap<string, Norm>();
+        
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public static SegmentReader Get(bool readOnly, SegmentInfo si, int termInfosIndexDivisor)
+        {
+            return Get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
+        }
+        
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public static SegmentReader Get(bool readOnly, Directory dir, SegmentInfo si, int readBufferSize, bool doOpenStores, int termInfosIndexDivisor)
+        {
+            SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
+            instance.readOnly = readOnly;
+            instance.si = si;
+            instance.readBufferSize = readBufferSize;
+            
+            bool success = false;
+            
+            try
+            {
+                instance.core = new CoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
+                if (doOpenStores)
+                {
+                    instance.core.OpenDocStores(si);
+                }
+                instance.LoadDeletedDocs();
+                instance.OpenNorms(instance.core.cfsDir, readBufferSize);
+                success = true;
+            }
+            finally
+            {
+                
+                // With lock-less commits, it's entirely possible (and
+                // fine) to hit a FileNotFound exception above.  In
+                // this case, we want to explicitly close any subset
+                // of things that were opened so that we don't have to
+                // wait for a GC to do so.
+                if (!success)
+                {
+                    instance.DoClose();
+                }
+            }
+            return instance;
+        }
+        
+        internal virtual void  OpenDocStores()
+        {
+            core.OpenDocStores(si);
+        }
 
         private bool CheckDeletedCounts()
         {
@@ -820,178 +820,178 @@ namespace Lucene.Net.Index
 
             return true;
         }
-		
-		private void  LoadDeletedDocs()
-		{
-			// NOTE: the bitvector is stored using the regular directory, not cfs
+        
+        private void  LoadDeletedDocs()
+        {
+            // NOTE: the bitvector is stored using the regular directory, not cfs
             //if(HasDeletions(si))
-			if (si.HasDeletions())
-			{
-				deletedDocs = new BitVector(Directory(), si.GetDelFileName());
-				deletedDocsRef = new Ref();
+            if (si.HasDeletions())
+            {
+                deletedDocs = new BitVector(Directory(), si.GetDelFileName());
+                deletedDocsRef = new Ref();
 
                 System.Diagnostics.Debug.Assert(CheckDeletedCounts());
-			}
-			else 
-				System.Diagnostics.Debug.Assert(si.GetDelCount() == 0);
-		}
-		
-		/// <summary> Clones the norm bytes.  May be overridden by subclasses.  New and experimental.</summary>
-		/// <param name="bytes">Byte array to clone
-		/// </param>
-		/// <returns> New BitVector
-		/// </returns>
-		protected internal virtual byte[] CloneNormBytes(byte[] bytes)
-		{
-			var cloneBytes = new byte[bytes.Length];
-			Array.Copy(bytes, 0, cloneBytes, 0, bytes.Length);
-			return cloneBytes;
-		}
-		
-		/// <summary> Clones the deleteDocs BitVector.  May be overridden by subclasses. New and experimental.</summary>
-		/// <param name="bv">BitVector to clone
-		/// </param>
-		/// <returns> New BitVector
-		/// </returns>
-		protected internal virtual BitVector CloneDeletedDocs(BitVector bv)
-		{
-			return (BitVector) bv.Clone();
-		}
-		
-		public override System.Object Clone()
-		{
+            }
+            else 
+                System.Diagnostics.Debug.Assert(si.GetDelCount() == 0);
+        }
+        
+        /// <summary> Clones the norm bytes.  May be overridden by subclasses.  New and experimental.</summary>
+        /// <param name="bytes">Byte array to clone
+        /// </param>
+        /// <returns> New BitVector
+        /// </returns>
+        protected internal virtual byte[] CloneNormBytes(byte[] bytes)
+        {
+            var cloneBytes = new byte[bytes.Length];
+            Array.Copy(bytes, 0, cloneBytes, 0, bytes.Length);
+            return cloneBytes;
+        }
+        
+        /// <summary> Clones the deleteDocs BitVector.  May be overridden by subclasses. New and experimental.</summary>
+        /// <param name="bv">BitVector to clone
+        /// </param>
+        /// <returns> New BitVector
+        /// </returns>
+        protected internal virtual BitVector CloneDeletedDocs(BitVector bv)
+        {
+            return (BitVector) bv.Clone();
+        }
+        
+        public override System.Object Clone()
+        {
             lock (this)
             {
                 try
                 {
                     return Clone(readOnly); // Preserve current readOnly
                 }
-                catch (System.Exception ex)
+                catch (System.Exception ex)
+                {
+                    throw new System.SystemException(ex.Message, ex);
+                }
+            }
+        }
+        
+        public override IndexReader Clone(bool openReadOnly)
+        {
+            lock (this)
+            {
+                return ReopenSegment(si, true, openReadOnly);
+            }
+        }
+        
+        internal virtual SegmentReader ReopenSegment(SegmentInfo si, bool doClone, bool openReadOnly)
+        {
+            lock (this)
+            {
+                bool deletionsUpToDate = (this.si.HasDeletions() == si.HasDeletions()) && (!si.HasDeletions() || this.si.GetDelFileName().Equals(si.GetDelFileName()));
+                bool normsUpToDate = true;
+                
+                bool[] fieldNormsChanged = new bool[core.fieldInfos.Size()];
+                int fieldCount = core.fieldInfos.Size();
+                for (int i = 0; i < fieldCount; i++)
+                {
+                    if (!this.si.GetNormFileName(i).Equals(si.GetNormFileName(i)))
+                    {
+                        normsUpToDate = false;
+                        fieldNormsChanged[i] = true;
+                    }
+                }
+                
+                // if we're cloning we need to run through the reopenSegment logic
+                // also if both old and new readers aren't readonly, we clone to avoid sharing modifications
+                if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly)
+                {
+                    return this;
+                }
+                
+                // When cloning, the incoming SegmentInfos should not
+                // have any changes in it:
+                System.Diagnostics.Debug.Assert(!doClone ||(normsUpToDate && deletionsUpToDate));
+                
+                // clone reader
+                SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
+                
+                bool success = false;
+                try
+                {
+                    core.IncRef();
+                    clone.core = core;
+                    clone.readOnly = openReadOnly;
+                    clone.si = si;
+                    clone.readBufferSize = readBufferSize;
+                    
+                    if (!openReadOnly && hasChanges)
+                    {
+                        // My pending changes transfer to the new reader
+                        clone.pendingDeleteCount = pendingDeleteCount;
+                        clone.deletedDocsDirty = deletedDocsDirty;
+                        clone.normsDirty = normsDirty;
+                        clone.hasChanges = hasChanges;
+                        hasChanges = false;
+                    }
+                    
+                    if (doClone)
+                    {
+                        if (deletedDocs != null)
+                        {
+                            deletedDocsRef.IncRef();
+                            clone.deletedDocs = deletedDocs;
+                            clone.deletedDocsRef = deletedDocsRef;
+                        }
+                    }
+                    else
+                    {
+                        if (!deletionsUpToDate)
+                        {
+                            // load deleted docs
+                            System.Diagnostics.Debug.Assert(clone.deletedDocs == null);
+                            clone.LoadDeletedDocs();
+                        }
+                        else if (deletedDocs != null)
+                        {
+                            deletedDocsRef.IncRef();
+                            clone.deletedDocs = deletedDocs;
+                            clone.deletedDocsRef = deletedDocsRef;
+                        }
+                    }
+                    
+                    clone.norms = new HashMap<string, Norm>();
+                    
+                    // Clone norms
+                    for (int i = 0; i < fieldNormsChanged.Length; i++)
+                    {
+                        
+                        // Clone unchanged norms to the cloned reader
+                        if (doClone || !fieldNormsChanged[i])
+                        {
+                            System.String curField = core.fieldInfos.FieldInfo(i).name;
+                            Norm norm = this.norms[curField];
+                            if (norm != null)
+                                clone.norms[curField] = (Norm)norm.Clone();
+                        }
+                    }
+                    
+                    // If we are not cloning, then this will open anew
+                    // any norms that have changed:
+                    clone.OpenNorms(si.GetUseCompoundFile()?core.GetCFSReader():Directory(), readBufferSize);
+                    
+                    success = true;
+                }
+                finally
                 {
-                    throw new System.SystemException(ex.Message, ex);
+                    if (!success)
+                    {
+                        // An exception occured during reopen, we have to decRef the norms
+                        // that we incRef'ed already and close singleNormsStream and FieldsReader
+                        clone.DecRef();
+                    }
                 }
+                
+                return clone;
             }
-		}
-		
-		public override IndexReader Clone(bool openReadOnly)
-		{
-			lock (this)
-			{
-				return ReopenSegment(si, true, openReadOnly);
-			}
-		}
-		
-		internal virtual SegmentReader ReopenSegment(SegmentInfo si, bool doClone, bool openReadOnly)
-		{
-			lock (this)
-			{
-				bool deletionsUpToDate = (this.si.HasDeletions() == si.HasDeletions()) && (!si.HasDeletions() || this.si.GetDelFileName().Equals(si.GetDelFileName()));
-				bool normsUpToDate = true;
-				
-				bool[] fieldNormsChanged = new bool[core.fieldInfos.Size()];
-				int fieldCount = core.fieldInfos.Size();
-				for (int i = 0; i < fieldCount; i++)
-				{
-					if (!this.si.GetNormFileName(i).Equals(si.GetNormFileName(i)))
-					{
-						normsUpToDate = false;
-						fieldNormsChanged[i] = true;
-					}
-				}
-				
-				// if we're cloning we need to run through the reopenSegment logic
-				// also if both old and new readers aren't readonly, we clone to avoid sharing modifications
-				if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly)
-				{
-					return this;
-				}
-				
-				// When cloning, the incoming SegmentInfos should not
-				// have any changes in it:
-				System.Diagnostics.Debug.Assert(!doClone ||(normsUpToDate && deletionsUpToDate));
-				
-				// clone reader
-				SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
-				
-				bool success = false;
-				try
-				{
-					core.IncRef();
-					clone.core = core;
-					clone.readOnly = openReadOnly;
-					clone.si = si;
-					clone.readBufferSize = readBufferSize;
-					
-					if (!openReadOnly && hasChanges)
-					{
-						// My pending changes transfer to the new reader
-						clone.pendingDeleteCount = pendingDeleteCount;
-						clone.deletedDocsDirty = deletedDocsDirty;
-						clone.normsDirty = normsDirty;
-						clone.hasChanges = hasChanges;
-						hasChanges = false;
-					}
-					
-					if (doClone)
-					{
-						if (deletedDocs != null)
-						{
-							deletedDocsRef.IncRef();
-							clone.deletedDocs = deletedDocs;
-							clone.deletedDocsRef = deletedDocsRef;
-						}
-					}
-					else
-					{
-						if (!deletionsUpToDate)
-						{
-							// load deleted docs
-							System.Diagnostics.Debug.Assert(clone.deletedDocs == null);
-							clone.LoadDeletedDocs();
-						}
-						else if (deletedDocs != null)
-						{
-							deletedDocsRef.IncRef();
-							clone.deletedDocs = deletedDocs;
-							clone.deletedDocsRef = deletedDocsRef;
-						}
-					}
-					
-					clone.norms = new HashMap<string, Norm>();
-					
-					// Clone norms
-					for (int i = 0; i < fieldNormsChanged.Length; i++)
-					{
-						
-						// Clone unchanged norms to the cloned reader
-						if (doClone || !fieldNormsChanged[i])
-						{
-							System.String curField = core.fieldInfos.FieldInfo(i).name;
-							Norm norm = this.norms[curField];
-							if (norm != null)
-								clone.norms[curField] = (Norm)norm.Clone();
-						}
-					}
-					
-					// If we are not cloning, then this will open anew
-					// any norms that have changed:
-					clone.OpenNorms(si.GetUseCompoundFile()?core.GetCFSReader():Directory(), readBufferSize);
-					
-					success = true;
-				}
-				finally
-				{
-					if (!success)
-					{
-						// An exception occured during reopen, we have to decRef the norms
-						// that we incRef'ed already and close singleNormsStream and FieldsReader
-						clone.DecRef();
-					}
-				}
-				
-				return clone;
-			}
-		}
+        }
 
         protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
         {
@@ -1071,605 +1071,605 @@ namespace Lucene.Net.Index
             hasChanges = false;
         }
         
-		internal virtual FieldsReader GetFieldsReader()
-		{
-			return fieldsReaderLocal.Get();
-		}
-		
-		protected internal override void  DoClose()
-		{
-			termVectorsLocal.Close();
-			fieldsReaderLocal.Close();
-			
-			if (deletedDocs != null)
-			{
-				deletedDocsRef.DecRef();
-				// null so if an app hangs on to us we still free most ram
-				deletedDocs = null;
-			}
-			
-			foreach(Norm norm in norms.Values)
-			{
-				norm.DecRef();
-			}
-			if (core != null)
-			{
-				core.DecRef();
-			}
-		}
-		
+        internal virtual FieldsReader GetFieldsReader()
+        {
+            return fieldsReaderLocal.Get();
+        }
+        
+        protected internal override void  DoClose()
+        {
+            termVectorsLocal.Close();
+            fieldsReaderLocal.Close();
+            
+            if (deletedDocs != null)
+            {
+                deletedDocsRef.DecRef();
+                // null so if an app hangs on to us we still free most ram
+                deletedDocs = null;
+            }
+            
+            foreach(Norm norm in norms.Values)
+            {
+                norm.DecRef();
+            }
+            if (core != null)
+            {
+                core.DecRef();
+            }
+        }
+        
         //internal static bool HasDeletions(SegmentInfo si)
         //{
         //    // Don't call ensureOpen() here (it could affect performance)
         //    return si.HasDeletions();
         //}
 
-	    public override bool HasDeletions
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return deletedDocs != null;
-	        }
-	    }
+        public override bool HasDeletions
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return deletedDocs != null;
+            }
+        }
 
-	    internal static bool UsesCompoundFile(SegmentInfo si)
-		{
-			return si.GetUseCompoundFile();
-		}
-		
-		internal static bool HasSeparateNorms(SegmentInfo si)
-		{
-			return si.HasSeparateNorms();
-		}
-		
-		protected internal override void  DoDelete(int docNum)
-		{
-			if (deletedDocs == null)
-			{
-				deletedDocs = new BitVector(MaxDoc);
-				deletedDocsRef = new Ref();
-			}
-			// there is more than 1 SegmentReader with a reference to this
-			// deletedDocs BitVector so decRef the current deletedDocsRef,
-			// clone the BitVector, create a new deletedDocsRef
-			if (deletedDocsRef.RefCount() > 1)
-			{
-				Ref oldRef = deletedDocsRef;
-				deletedDocs = CloneDeletedDocs(deletedDocs);
-				deletedDocsRef = new Ref();
-				oldRef.DecRef();
-			}
-			deletedDocsDirty = true;
-			if (!deletedDocs.GetAndSet(docNum))
-				pendingDeleteCount++;
-		}
-		
-		protected internal override void  DoUndeleteAll()
-		{
-			deletedDocsDirty = false;
-			if (deletedDocs != null)
-			{
-				System.Diagnostics.Debug.Assert(deletedDocsRef != null);
-				deletedDocsRef.DecRef();
-				deletedDocs = null;
-				deletedDocsRef = null;
-				pendingDeleteCount = 0;
-				si.ClearDelGen();
-				si.SetDelCount(0);
-			}
-			else
-			{
-				System.Diagnostics.Debug.Assert(deletedDocsRef == null);
-				System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
-			}
-		}
-		
-		internal virtual System.Collections.Generic.IList<string> Files()
-		{
-			return si.Files();
-		}
-		
-		public override TermEnum Terms()
-		{
-			EnsureOpen();
-			return core.GetTermsReader().Terms();
-		}
-		
-		public override TermEnum Terms(Term t)
-		{
-			EnsureOpen();
-			return core.GetTermsReader().Terms(t);
-		}
-		
-		public /*internal*/ virtual FieldInfos FieldInfos()
-		{
-			return core.fieldInfos;
-		}
-		
-		public override Document Document(int n, FieldSelector fieldSelector)
-		{
-			EnsureOpen();
-			return GetFieldsReader().Doc(n, fieldSelector);
-		}
-		
-		public override bool IsDeleted(int n)
-		{
-			lock (this)
-			{
-				return (deletedDocs != null && deletedDocs.Get(n));
-			}
-		}
-		
-		public override TermDocs TermDocs(Term term)
-		{
-			if (term == null)
-			{
-				return new AllTermDocs(this);
-			}
-			else
-			{
-				return base.TermDocs(term);
-			}
-		}
-		
-		public override TermDocs TermDocs()
-		{
-			EnsureOpen();
-			return new SegmentTermDocs(this);
-		}
-		
-		public override TermPositions TermPositions()
-		{
-			EnsureOpen();
-			return new SegmentTermPositions(this);
-		}
-		
-		public override int DocFreq(Term t)
-		{
-			EnsureOpen();
-			TermInfo ti = core.GetTermsReader().Get(t);
-			if (ti != null)
-				return ti.docFreq;
-			else
-				return 0;
-		}
+        internal static bool UsesCompoundFile(SegmentInfo si)
+        {
+            return si.GetUseCompoundFile();
+        }
+        
+        internal static bool HasSeparateNorms(SegmentInfo si)
+        {
+            return si.HasSeparateNorms();
+        }
+        
+        protected internal override void  DoDelete(int docNum)
+        {
+            if (deletedDocs == null)
+            {
+                deletedDocs = new BitVector(MaxDoc);
+                deletedDocsRef = new Ref();
+            }
+            // there is more than 1 SegmentReader with a reference to this
+            // deletedDocs BitVector so decRef the current deletedDocsRef,
+            // clone the BitVector, create a new deletedDocsRef
+            if (deletedDocsRef.RefCount() > 1)
+            {
+                Ref oldRef = deletedDocsRef;
+                deletedDocs = CloneDeletedDocs(deletedDocs);
+                deletedDocsRef = new Ref();
+                oldRef.DecRef();
+            }
+            deletedDocsDirty = true;
+            if (!deletedDocs.GetAndSet(docNum))
+                pendingDeleteCount++;
+        }
+        
+        protected internal override void  DoUndeleteAll()
+        {
+            deletedDocsDirty = false;
+            if (deletedDocs != null)
+            {
+                System.Diagnostics.Debug.Assert(deletedDocsRef != null);
+                deletedDocsRef.DecRef();
+                deletedDocs = null;
+                deletedDocsRef = null;
+                pendingDeleteCount = 0;
+                si.ClearDelGen();
+                si.SetDelCount(0);
+            }
+            else
+            {
+                System.Diagnostics.Debug.Assert(deletedDocsRef == null);
+                System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
+            }
+        }
+        
+        internal virtual System.Collections.Generic.IList<string> Files()
+        {
+            return si.Files();
+        }
+        
+        public override TermEnum Terms()
+        {
+            EnsureOpen();
+            return core.GetTermsReader().Terms();
+        }
+        
+        public override TermEnum Terms(Term t)
+        {
+            EnsureOpen();
+            return core.GetTermsReader().Terms(t);
+        }
+        
+        public /*internal*/ virtual FieldInfos FieldInfos()
+        {
+            return core.fieldInfos;
+        }
+        
+        public override Document Document(int n, FieldSelector fieldSelector)
+        {
+            EnsureOpen();
+            return GetFieldsReader().Doc(n, fieldSelector);
+        }
+        
+        public override bool IsDeleted(int n)
+        {
+            lock (this)
+            {
+                return (deletedDocs != null && deletedDocs.Get(n));
+            }
+        }
+        
+        public override TermDocs TermDocs(Term term)
+        {
+            if (term == null)
+            {
+                return new AllTermDocs(this);
+            }
+            else
+            {
+                return base.TermDocs(term);
+            }
+        }
+        
+        public override TermDocs TermDocs()
+        {
+            EnsureOpen();
+            return new SegmentTermDocs(this);
+        }
+        
+        public override TermPositions TermPositions()
+        {
+            EnsureOpen();
+            return new SegmentTermPositions(this);
+        }
+        
+        public override int DocFreq(Term t)
+        {
+            EnsureOpen();
+            TermInfo ti = core.GetTermsReader().Get(t);
+            if (ti != null)
+                return ti.docFreq;
+            else
+                return 0;
+        }
 
-	    public override int NumDocs()
-	    {
-	        // Don't call ensureOpen() here (it could affect performance)
-	        int n = MaxDoc;
-	        if (deletedDocs != null)
-	            n -= deletedDocs.Count();
-	        return n;
-	    }
+        public override int NumDocs()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            int n = MaxDoc;
+            if (deletedDocs != null)
+                n -= deletedDocs.Count();
+            return n;
+        }
 
-	    public override int MaxDoc
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return si.docCount;
-	        }
-	    }
+        public override int MaxDoc
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return si.docCount;
+            }
+        }
 
-	    /// <seealso cref="IndexReader.GetFieldNames(IndexReader.FieldOption)">
-		/// </seealso>
+        /// <seealso cref="IndexReader.GetFieldNames(IndexReader.FieldOption)">
+        /// </seealso>
         public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldOption)
-		{
-			EnsureOpen();
+        {
+            EnsureOpen();
 
             System.Collections.Generic.ISet<string> fieldSet = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<string>();
-			for (int i = 0; i < core.fieldInfos.Size(); i++)
-			{
-				FieldInfo fi = core.fieldInfos.FieldInfo(i);
-				if (fieldOption == IndexReader.FieldOption.ALL)
-				{
-					fieldSet.Add(fi.name);
-				}
-				else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED)
+            for (int i = 0; i < core.fieldInfos.Size(); i++)
+            {
+                FieldInfo fi = core.fieldInfos.FieldInfo(i);
+                if (fieldOption == IndexReader.FieldOption.ALL)
+                {
+                    fieldSet.Add(fi.name);
+                }
+                else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS)
+                }
+                else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS)
+                }
+                else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED)
+                }
+                else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR)
+                }
+                else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.storeTermVector == true && fi.storePositionWithTermVector == false && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR)
+                }
+                else if (fi.storeTermVector == true && fi.storePositionWithTermVector == false && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR)
+                }
+                else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION)
+                }
+                else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION)
                 {
                     fieldSet.Add(fi.name);
-				}
-				else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET)
+                }
+                else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET)
                 {
                     fieldSet.Add(fi.name);
-				}
+                }
                 else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET)
                 {
                     fieldSet.Add(fi.name);
                 }
-			}
-			return fieldSet;
-		}
-		
-		
-		public override bool HasNorms(System.String field)
-		{
-			lock (this)
-			{
-				EnsureOpen();
-				return norms.ContainsKey(field);
-			}
-		}
-		
-		// can return null if norms aren't stored
-		protected internal virtual byte[] GetNorms(System.String field)
-		{
-			lock (this)
-			{
-				Norm norm = norms[field];
-				if (norm == null)
-					return null; // not indexed, or norms not stored
-				return norm.Bytes();
-			}
-		}
-		
-		// returns fake norms if norms aren't available
-		public override byte[] Norms(System.String field)
-		{
-			lock (this)
-			{
-				EnsureOpen();
-				byte[] bytes = GetNorms(field);
-				return bytes;
-			}
-		}
-		
-		protected internal override void  DoSetNorm(int doc, System.String field, byte value_Renamed)
-		{
-			Norm norm = norms[field];
-			if (norm == null)
-			// not an indexed field
-				return ;
-			
-			normsDirty = true;
-			norm.CopyOnWrite()[doc] = value_Renamed; // set the value
-		}
-		
-		/// <summary>Read norms into a pre-allocated array. </summary>
-		public override void Norms(System.String field, byte[] bytes, int offset)
-		{
-			lock (this)
-			{
-				
-				EnsureOpen();
-				Norm norm = norms[field];
-				if (norm == null)
-				{
+            }
+            return fieldSet;
+        }
+        
+        
+        public override bool HasNorms(System.String field)
+        {
+            lock (this)
+            {
+                EnsureOpen();
+                return norms.ContainsKey(field);
+            }
+        }
+        
+        // can return null if norms aren't stored
+        protected internal virtual byte[] GetNorms(System.String field)
+        {
+            lock (this)
+            {
+                Norm norm = norms[field];
+                if (norm == null)
+                    return null; // not indexed, or norms not stored
+                return norm.Bytes();
+            }
+        }
+        
+        // returns fake norms if norms aren't available
+        public override byte[] Norms(System.String field)
+        {
+            lock (this)
+            {
+                EnsureOpen();
+                byte[] bytes = GetNorms(field);
+                return bytes;
+            }
+        }
+        
+        protected internal override void  DoSetNorm(int doc, System.String field, byte value_Renamed)
+        {
+            Norm norm = norms[field];
+            if (norm == null)
+            // not an indexed field
+                return ;
+            
+            normsDirty = true;
+            norm.CopyOnWrite()[doc] = value_Renamed; // set the value
+        }
+        
+        /// <summary>Read norms into a pre-allocated array. </summary>
+        public override void Norms(System.String field, byte[] bytes, int offset)
+        {
+            lock (this)
+            {
+                
+                EnsureOpen();
+                Norm norm = norms[field];
+                if (norm == null)
+                {
                     for (int i = offset; i < bytes.Length; i++)
                     {
                         bytes[i] = (byte) DefaultSimilarity.EncodeNorm(1.0f);
                     }
-					return ;
-				}
-				
-				norm.Bytes(bytes, offset, MaxDoc);
-			}
-		}
-		
-		
-		private void  OpenNorms(Directory cfsDir, int readBufferSize)
-		{
-			long nextNormSeek = SegmentMerger.NORMS_HEADER.Length; //skip header (header unused for now)
-			int maxDoc = MaxDoc;
-			for (int i = 0; i < core.fieldInfos.Size(); i++)
-			{
-				FieldInfo fi = core.fieldInfos.FieldInfo(i);
-				if (norms.ContainsKey(fi.name))
-				{
-					// in case this SegmentReader is being re-opened, we might be able to
-					// reuse some norm instances and skip loading them here
-					continue;
-				}
-				if (fi.isIndexed && !fi.omitNorms)
-				{
-					Directory d = Directory();
-					System.String fileName = si.GetNormFileName(fi.number);
-					if (!si.HasSeparateNorms(fi.number))
-					{
-						d = cfsDir;
-					}
-					
-					// singleNormFile means multiple norms share this file
-					bool singleNormFile = fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION);
-					IndexInput normInput = null;
-					long normSeek;
-					
-					if (singleNormFile)
-					{
-						normSeek = nextNormSeek;
-						if (singleNormStream == null)
-						{
-							singleNormStream = d.OpenInput(fileName, readBufferSize);
-							singleNormRef = new Ref();
-						}
-						else
-						{
-							singleNormRef.IncRef();
-						}
-						// All norms in the .nrm file can share a single IndexInput since
-						// they are only used in a synchronized context.
-						// If this were to change in the future, a clone could be done here.
-						normInput = singleNormStream;
-					}
-					else
-					{
-						normSeek = 0;
-						normInput = d.OpenInput(fileName);
-					}
-					
-					norms[fi.name] = new Norm(this, normInput, fi.number, normSeek);
-					nextNormSeek += maxDoc; // increment also if some norms are separate
-				}
-			}
-		}
-		
-		public /*internal*/ virtual bool TermsIndexLoaded()
-		{
-			return core.TermsIndexIsLoaded();
-		}
-		
-		// NOTE: only called from IndexWriter when a near
-		// real-time reader is opened, or applyDeletes is run,
-		// sharing a segment that's still being merged.  This
-		// method is not thread safe, and relies on the
-		// synchronization in IndexWriter
-		internal virtual void  LoadTermsIndex(int termsIndexDivisor)
-		{
-			core.LoadTermsIndex(si, termsIndexDivisor);
-		}
-		
-		// for testing only
-		public /*internal*/ virtual bool NormsClosed()
-		{
-			if (singleNormStream != null)
-			{
-				return false;
-			}
-			return norms.Values.All(norm => norm.refCount <= 0);
-		}
-		
-		// for testing only
-		public /*internal*/ virtual bool NormsClosed(System.String field)
-		{
-			return norms[field].refCount == 0;
-		}
-		
-		/// <summary> Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.</summary>
-		/// <returns> TermVectorsReader
-		/// </returns>
-		internal virtual TermVectorsReader GetTermVectorsReader()
-		{
-			TermVectorsReader tvReader = termVectorsLocal.Get();
-			if (tvReader == null)
-			{
-				TermVectorsReader orig = core.GetTermVectorsReaderOrig();
-				if (orig == null)
-				{
-					return null;
-				}
-				else
-				{
-					try
-					{
-						tvReader = (TermVectorsReader) orig.Clone();
-					}
-					catch (System.Exception)
-					{
-						return null;
-					}
-				}
-				termVectorsLocal.Set(tvReader);
-			}
-			return tvReader;
-		}
-		
-		internal virtual TermVectorsReader GetTermVectorsReaderOrig()
-		{
-			return core.GetTermVectorsReaderOrig();
-		}
-		
-		/// <summary>Return a term frequency vector for the specified document and field. The
-		/// vector returned contains term numbers and frequencies for all terms in
-		/// the specified field of this document, if the field had storeTermVector
-		/// flag set.  If the flag was not set, the method returns null.
-		/// </summary>
-		/// <throws>  IOException </throws>
-		public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
-		{
-			// Check if this field is invalid or has no stored term vector
-			EnsureOpen();
-			FieldInfo fi = core.fieldInfos.FieldInfo(field);
-			if (fi == null || !fi.storeTermVector)
-				return null;
-			
-			TermVectorsReader termVectorsReader = GetTermVectorsReader();
-			if (termVectorsReader == null)
-				return null;
-			
-			return termVectorsReader.Get(docNumber, field);
-		}
-		
-		
-		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			FieldInfo fi = core.fieldInfos.FieldInfo(field);
-			if (fi == null || !fi.storeTermVector)
-				return;
-			
-			TermVectorsReader termVectorsReader = GetTermVectorsReader();
-			if (termVectorsReader == null)
-			{
-				return;
-			}
-			termVectorsReader.Get(docNumber, field, mapper);
-		}
-		
-		
-		public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			
-			TermVectorsReader termVectorsReader = GetTermVectorsReader();
-			if (termVectorsReader == null)
-				return ;
-			
-			termVectorsReader.Get(docNumber, mapper);
-		}
-		
-		/// <summary>Return an array of term frequency vectors for the specified document.
-		/// The array contains a vector for each vectorized field in the document.
-		/// Each vector vector contains term numbers and frequencies for all terms
-		/// in a given vectorized field.
-		/// If no such fields existed, the method returns null.
-		/// </summary>
-		/// <throws>  IOException </throws>
-		public override ITermFreqVector[] GetTermFreqVectors(int docNumber)
-		{
-			EnsureOpen();
-			
-			TermVectorsReader termVectorsReader = GetTermVectorsReader();
-			if (termVectorsReader == null)
-				return null;
-			
-			return termVectorsReader.Get(docNumber);
-		}
+                    return ;
+                }
+                
+                norm.Bytes(bytes, offset, MaxDoc);
+            }
+        }
+        
+        
+        private void  OpenNorms(Directory cfsDir, int readBufferSize)
+        {
+            long nextNormSeek = SegmentMerger.NORMS_HEADER.Length; //skip header (header unused for now)
+            int maxDoc = MaxDoc;
+            for (int i = 0; i < core.fieldInfos.Size(); i++)
+            {
+                FieldInfo fi = core.fieldInfos.FieldInfo(i);
+                if (norms.ContainsKey(fi.name))
+                {
+                    // in case this SegmentReader is being re-opened, we might be able to
+                    // reuse some norm instances and skip loading them here
+                    continue;
+                }
+                if (fi.isIndexed && !fi.omitNorms)
+                {
+                    Directory d = Directory();
+                    System.String fileName = si.GetNormFileName(fi.number);
+                    if (!si.HasSeparateNorms(fi.number))
+                    {
+                        d = cfsDir;
+                    }
+                    
+                    // singleNormFile means multiple norms share this file
+                    bool singleNormFile = fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION);
+                    IndexInput normInput = null;
+                    long normSeek;
+                    
+                    if (singleNormFile)
+                    {
+                        normSeek = nextNormSeek;
+                        if (singleNormStream == null)
+                        {
+                            singleNormStream = d.OpenInput(fileName, readBufferSize);
+                            singleNormRef = new Ref();
+                        }
+                        else
+                        {
+                            singleNormRef.IncRef();
+                        }
+                        // All norms in the .nrm file can share a single IndexInput since
+                        // they are only used in a synchronized context.
+                        // If this were to change in the future, a clone could be done here.
+                        normInput = singleNormStream;
+                    }
+                    else
+                    {
+                        normSeek = 0;
+                        normInput = d.OpenInput(fileName);
+                    }
+                    
+                    norms[fi.name] = new Norm(this, normInput, fi.number, normSeek);
+                    nextNormSeek += maxDoc; // increment also if some norms are separate
+                }
+            }
+        }
+        
+        public /*internal*/ virtual bool TermsIndexLoaded()
+        {
+            return core.TermsIndexIsLoaded();
+        }
+        
+        // NOTE: only called from IndexWriter when a near
+        // real-time reader is opened, or applyDeletes is run,
+        // sharing a segment that's still being merged.  This
+        // method is not thread safe, and relies on the
+        // synchronization in IndexWriter
+        internal virtual void  LoadTermsIndex(int termsIndexDivisor)
+        {
+            core.LoadTermsIndex(si, termsIndexDivisor);
+        }
+        
+        // for testing only
+        public /*internal*/ virtual bool NormsClosed()
+        {
+            if (singleNormStream != null)
+            {
+                return false;
+            }
+            return norms.Values.All(norm => norm.refCount <= 0);
+        }
+        
+        // for testing only
+        public /*internal*/ virtual bool NormsClosed(System.String field)
+        {
+            return norms[field].refCount == 0;
+        }
+        
+        /// <summary> Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.</summary>
+        /// <returns> TermVectorsReader
+        /// </returns>
+        internal virtual TermVectorsReader GetTermVectorsReader()
+        {
+            TermVectorsReader tvReader = termVectorsLocal.Get();
+            if (tvReader == null)
+            {
+                TermVectorsReader orig = core.GetTermVectorsReaderOrig();
+                if (orig == null)
+                {
+                    return null;
+                }
+                else
+                {
+                    try
+                    {
+                        tvReader = (TermVectorsReader) orig.Clone();
+                    }
+                    catch (System.Exception)
+                    {
+                        return null;
+                    }
+                }
+                termVectorsLocal.Set(tvReader);
+            }
+            return tvReader;
+        }
+        
+        internal virtual TermVectorsReader GetTermVectorsReaderOrig()
+        {
+            return core.GetTermVectorsReaderOrig();
+        }
+        
+        /// <summary>Return a term frequency vector for the specified document and field. The
+        /// vector returned contains term numbers and frequencies for all terms in
+        /// the specified field of this document, if the field had storeTermVector
+        /// flag set.  If the flag was not set, the method returns null.
+        /// </summary>
+        /// <throws>  IOException </throws>
+        public override ITermFreqVector GetTermFreqVector(int docNumber, System.String field)
+        {
+            // Check if this field is invalid or has no stored term vector
+            EnsureOpen();
+            FieldInfo fi = core.fieldInfos.FieldInfo(field);
+            if (fi == null || !fi.storeTermVector)
+                return null;
+            
+            TermVectorsReader termVectorsReader = GetTermVectorsReader();
+            if (termVectorsReader == null)
+                return null;
+            
+            return termVectorsReader.Get(docNumber, field);
+        }
+        
+        
+        public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            FieldInfo fi = core.fieldInfos.FieldInfo(field);
+            if (fi == null || !fi.storeTermVector)
+                return;
+            
+     

<TRUNCATED>

[06/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ParallelReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ParallelReader.cs b/src/core/Index/ParallelReader.cs
index e0b4b04..34e7033 100644
--- a/src/core/Index/ParallelReader.cs
+++ b/src/core/Index/ParallelReader.cs
@@ -25,581 +25,581 @@ using FieldSelectorResult = Lucene.Net.Documents.FieldSelectorResult;
 
 namespace Lucene.Net.Index
 {
-	/// <summary>An IndexReader which reads multiple, parallel indexes.  Each index added
-	/// must have the same number of documents, but typically each contains
-	/// different fields.  Each document contains the union of the fields of all
-	/// documents with the same document number.  When searching, matches for a
-	/// query term are from the first index added that has the field.
-	/// 
-	/// <p/>This is useful, e.g., with collections that have large fields which
-	/// change rarely and small fields that change more frequently.  The smaller
-	/// fields may be re-indexed in a new index and both indexes may be searched
-	/// together.
-	/// 
-	/// <p/><strong>Warning:</strong> It is up to you to make sure all indexes
-	/// are created and modified the same way. For example, if you add
-	/// documents to one index, you need to add the same documents in the
-	/// same order to the other indexes. <em>Failure to do so will result in
-	/// undefined behavior</em>.
-	/// </summary>
-	public class ParallelReader:IndexReader, System.ICloneable
-	{
+    /// <summary>An IndexReader which reads multiple, parallel indexes.  Each index added
+    /// must have the same number of documents, but typically each contains
+    /// different fields.  Each document contains the union of the fields of all
+    /// documents with the same document number.  When searching, matches for a
+    /// query term are from the first index added that has the field.
+    /// 
+    /// <p/>This is useful, e.g., with collections that have large fields which
+    /// change rarely and small fields that change more frequently.  The smaller
+    /// fields may be re-indexed in a new index and both indexes may be searched
+    /// together.
+    /// 
+    /// <p/><strong>Warning:</strong> It is up to you to make sure all indexes
+    /// are created and modified the same way. For example, if you add
+    /// documents to one index, you need to add the same documents in the
+    /// same order to the other indexes. <em>Failure to do so will result in
+    /// undefined behavior</em>.
+    /// </summary>
+    public class ParallelReader:IndexReader, System.ICloneable
+    {
         private List<IndexReader> readers = new List<IndexReader>();
         private List<bool> decrefOnClose = new List<bool>(); // remember which subreaders to decRef on close
-		internal bool incRefReaders = false;
-		private SortedDictionary<string, IndexReader> fieldToReader = new SortedDictionary<string, IndexReader>();
-		private IDictionary<IndexReader, ICollection<string>> readerToFields = new HashMap<IndexReader, ICollection<string>>();
+        internal bool incRefReaders = false;
+        private SortedDictionary<string, IndexReader> fieldToReader = new SortedDictionary<string, IndexReader>();
+        private IDictionary<IndexReader, ICollection<string>> readerToFields = new HashMap<IndexReader, ICollection<string>>();
         private List<IndexReader> storedFieldReaders = new List<IndexReader>();
-		
-		private int maxDoc;
-		private int numDocs;
-		private bool hasDeletions;
-		
-		/// <summary>Construct a ParallelReader. 
-		/// <p/>Note that all subreaders are closed if this ParallelReader is closed.<p/>
-		/// </summary>
-		public ParallelReader():this(true)
-		{
-		}
-		
-		/// <summary>Construct a ParallelReader. </summary>
-		/// <param name="closeSubReaders">indicates whether the subreaders should be closed
-		/// when this ParallelReader is closed
-		/// </param>
-		public ParallelReader(bool closeSubReaders):base()
-		{
-			this.incRefReaders = !closeSubReaders;
-		}
-		
-		/// <summary>Add an IndexReader.</summary>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public virtual void  Add(IndexReader reader)
-		{
-			EnsureOpen();
-			Add(reader, false);
-		}
-		
-		/// <summary>Add an IndexReader whose stored fields will not be returned.  This can
-		/// accellerate search when stored fields are only needed from a subset of
-		/// the IndexReaders.
-		/// 
-		/// </summary>
-		/// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
-		/// <summary>     of documents
-		/// </summary>
-		/// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
-		/// <summary>     of <see cref="IndexReader.MaxDoc" />
-		/// </summary>
-		/// <throws>  IOException if there is a low-level IO error </throws>
-		public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
-		{
-			
-			EnsureOpen();
-			if (readers.Count == 0)
-			{
-				this.maxDoc = reader.MaxDoc;
-				this.numDocs = reader.NumDocs();
-				this.hasDeletions = reader.HasDeletions;
-			}
-			
-			if (reader.MaxDoc != maxDoc)
-			// check compatibility
-				throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
-			if (reader.NumDocs() != numDocs)
-				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
-			
-			ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
-			readerToFields[reader] = fields;
-			foreach(var field in fields)
-			{
-				// update fieldToReader map
+        
+        private int maxDoc;
+        private int numDocs;
+        private bool hasDeletions;
+        
+        /// <summary>Construct a ParallelReader. 
+        /// <p/>Note that all subreaders are closed if this ParallelReader is closed.<p/>
+        /// </summary>
+        public ParallelReader():this(true)
+        {
+        }
+        
+        /// <summary>Construct a ParallelReader. </summary>
+        /// <param name="closeSubReaders">indicates whether the subreaders should be closed
+        /// when this ParallelReader is closed
+        /// </param>
+        public ParallelReader(bool closeSubReaders):base()
+        {
+            this.incRefReaders = !closeSubReaders;
+        }
+        
+        /// <summary>Add an IndexReader.</summary>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public virtual void  Add(IndexReader reader)
+        {
+            EnsureOpen();
+            Add(reader, false);
+        }
+        
+        /// <summary>Add an IndexReader whose stored fields will not be returned.  This can
+        /// accellerate search when stored fields are only needed from a subset of
+        /// the IndexReaders.
+        /// 
+        /// </summary>
+        /// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
+        /// <summary>     of documents
+        /// </summary>
+        /// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
+        /// <summary>     of <see cref="IndexReader.MaxDoc" />
+        /// </summary>
+        /// <throws>  IOException if there is a low-level IO error </throws>
+        public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
+        {
+            
+            EnsureOpen();
+            if (readers.Count == 0)
+            {
+                this.maxDoc = reader.MaxDoc;
+                this.numDocs = reader.NumDocs();
+                this.hasDeletions = reader.HasDeletions;
+            }
+            
+            if (reader.MaxDoc != maxDoc)
+            // check compatibility
+                throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
+            if (reader.NumDocs() != numDocs)
+                throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
+            
+            ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+            readerToFields[reader] = fields;
+            foreach(var field in fields)
+            {
+                // update fieldToReader map
                 // Do a containskey firt to mimic java behavior
-				if (!fieldToReader.ContainsKey(field) || fieldToReader[field] == null)
-					fieldToReader[field] = reader;
-			}
-			
-			if (!ignoreStoredFields)
-				storedFieldReaders.Add(reader); // add to storedFieldReaders
-			readers.Add(reader);
-			
-			if (incRefReaders)
-			{
-				reader.IncRef();
-			}
-			decrefOnClose.Add(incRefReaders);
-		}
-		
-		public override System.Object Clone()
-		{
-			try
-			{
-				return DoReopen(true);
-			}
-			catch (System.Exception ex)
-			{
-				throw new System.SystemException(ex.Message, ex);
-			}
-		}
-		
-		/// <summary> Tries to reopen the subreaders.
-		/// <br/>
-		/// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
-		/// returned a new instance != subReader), then a new ParallelReader instance 
-		/// is returned, otherwise this instance is returned.
-		/// <p/>
-		/// A re-opened instance might share one or more subreaders with the old 
-		/// instance. Index modification operations result in undefined behavior
-		/// when performed before the old instance is closed.
-		/// (see <see cref="IndexReader.Reopen()" />).
-		/// <p/>
-		/// If subreaders are shared, then the reference count of those
-		/// readers is increased to ensure that the subreaders remain open
-		/// until the last referring reader is closed.
-		/// 
-		/// </summary>
-		/// <throws>  CorruptIndexException if the index is corrupt </throws>
-		/// <throws>  IOException if there is a low-level IO error  </throws>
-		public override IndexReader Reopen()
-		{
-			lock (this)
-			{
-				return DoReopen(false);
-			}
-		}
-		
-		protected internal virtual IndexReader DoReopen(bool doClone)
-		{
-			EnsureOpen();
-			
-			bool reopened = false;
+                if (!fieldToReader.ContainsKey(field) || fieldToReader[field] == null)
+                    fieldToReader[field] = reader;
+            }
+            
+            if (!ignoreStoredFields)
+                storedFieldReaders.Add(reader); // add to storedFieldReaders
+            readers.Add(reader);
+            
+            if (incRefReaders)
+            {
+                reader.IncRef();
+            }
+            decrefOnClose.Add(incRefReaders);
+        }
+        
+        public override System.Object Clone()
+        {
+            try
+            {
+                return DoReopen(true);
+            }
+            catch (System.Exception ex)
+            {
+                throw new System.SystemException(ex.Message, ex);
+            }
+        }
+        
+        /// <summary> Tries to reopen the subreaders.
+        /// <br/>
+        /// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
+        /// returned a new instance != subReader), then a new ParallelReader instance 
+        /// is returned, otherwise this instance is returned.
+        /// <p/>
+        /// A re-opened instance might share one or more subreaders with the old 
+        /// instance. Index modification operations result in undefined behavior
+        /// when performed before the old instance is closed.
+        /// (see <see cref="IndexReader.Reopen()" />).
+        /// <p/>
+        /// If subreaders are shared, then the reference count of those
+        /// readers is increased to ensure that the subreaders remain open
+        /// until the last referring reader is closed.
+        /// 
+        /// </summary>
+        /// <throws>  CorruptIndexException if the index is corrupt </throws>
+        /// <throws>  IOException if there is a low-level IO error  </throws>
+        public override IndexReader Reopen()
+        {
+            lock (this)
+            {
+                return DoReopen(false);
+            }
+        }
+        
+        protected internal virtual IndexReader DoReopen(bool doClone)
+        {
+            EnsureOpen();
+            
+            bool reopened = false;
             IList<IndexReader> newReaders = new List<IndexReader>();
-			
-			bool success = false;
-			
-			try
-			{
-				foreach(var oldReader in readers)
-				{
-					IndexReader newReader = null;
-					if (doClone)
-					{
-						newReader = (IndexReader) oldReader.Clone();
-					}
-					else
-					{
-						newReader = oldReader.Reopen();
-					}
-					newReaders.Add(newReader);
-					// if at least one of the subreaders was updated we remember that
-					// and return a new ParallelReader
-					if (newReader != oldReader)
-					{
-						reopened = true;
-					}
-				}
-				success = true;
-			}
-			finally
-			{
-				if (!success && reopened)
-				{
-					for (int i = 0; i < newReaders.Count; i++)
-					{
-						IndexReader r = newReaders[i];
-						if (r != readers[i])
-						{
-							try
-							{
-								r.Close();
-							}
-							catch (System.IO.IOException)
-							{
-								// keep going - we want to clean up as much as possible
-							}
-						}
-					}
-				}
-			}
-			
-			if (reopened)
-			{
+            
+            bool success = false;
+            
+            try
+            {
+                foreach(var oldReader in readers)
+                {
+                    IndexReader newReader = null;
+                    if (doClone)
+                    {
+                        newReader = (IndexReader) oldReader.Clone();
+                    }
+                    else
+                    {
+                        newReader = oldReader.Reopen();
+                    }
+                    newReaders.Add(newReader);
+                    // if at least one of the subreaders was updated we remember that
+                    // and return a new ParallelReader
+                    if (newReader != oldReader)
+                    {
+                        reopened = true;
+                    }
+                }
+                success = true;
+            }
+            finally
+            {
+                if (!success && reopened)
+                {
+                    for (int i = 0; i < newReaders.Count; i++)
+                    {
+                        IndexReader r = newReaders[i];
+                        if (r != readers[i])
+                        {
+                            try
+                            {
+                                r.Close();
+                            }
+                            catch (System.IO.IOException)
+                            {
+                                // keep going - we want to clean up as much as possible
+                            }
+                        }
+                    }
+                }
+            }
+            
+            if (reopened)
+            {
                 List<bool> newDecrefOnClose = new List<bool>();
-				ParallelReader pr = new ParallelReader();
-				for (int i = 0; i < readers.Count; i++)
-				{
-					IndexReader oldReader = readers[i];
-					IndexReader newReader = newReaders[i];
-					if (newReader == oldReader)
-					{
-						newDecrefOnClose.Add(true);
-						newReader.IncRef();
-					}
-					else
-					{
-						// this is a new subreader instance, so on close() we don't
-						// decRef but close it 
-						newDecrefOnClose.Add(false);
-					}
-					pr.Add(newReader, !storedFieldReaders.Contains(oldReader));
-				}
-				pr.decrefOnClose = newDecrefOnClose;
-				pr.incRefReaders = incRefReaders;
-				return pr;
-			}
-			else
-			{
-				// No subreader was refreshed
-				return this;
-			}
-		}
-
-
-	    public override int NumDocs()
-	    {
-	        // Don't call ensureOpen() here (it could affect performance)
-	        return numDocs;
-	    }
-
-	    public override int MaxDoc
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return maxDoc;
-	        }
-	    }
-
-	    public override bool HasDeletions
-	    {
-	        get
-	        {
-	            // Don't call ensureOpen() here (it could affect performance)
-	            return hasDeletions;
-	        }
-	    }
-
-	    // check first reader
-		public override bool IsDeleted(int n)
-		{
-			// Don't call ensureOpen() here (it could affect performance)
-			if (readers.Count > 0)
-				return readers[0].IsDeleted(n);
-			return false;
-		}
-		
-		// delete in all readers
-		protected internal override void  DoDelete(int n)
-		{
-			foreach(var reader in readers)
-			{
-				reader.DeleteDocument(n);
-			}
-			hasDeletions = true;
-		}
-		
-		// undeleteAll in all readers
-		protected internal override void  DoUndeleteAll()
-		{
-			foreach(var reader in readers)
-			{
-				reader.UndeleteAll();
-			}
-			hasDeletions = false;
-		}
-		
-		// append fields from storedFieldReaders
-		public override Document Document(int n, FieldSelector fieldSelector)
-		{
-			EnsureOpen();
-			Document result = new Document();
-			foreach(IndexReader reader in storedFieldReaders)
-			{
-				bool include = (fieldSelector == null);
-				if (!include)
-				{
-				    var fields = readerToFields[reader];
-					foreach(var field in fields)
-					{
+                ParallelReader pr = new ParallelReader();
+                for (int i = 0; i < readers.Count; i++)
+                {
+                    IndexReader oldReader = readers[i];
+                    IndexReader newReader = newReaders[i];
+                    if (newReader == oldReader)
+                    {
+                        newDecrefOnClose.Add(true);
+                        newReader.IncRef();
+                    }
+                    else
+                    {
+                        // this is a new subreader instance, so on close() we don't
+                        // decRef but close it 
+                        newDecrefOnClose.Add(false);
+                    }
+                    pr.Add(newReader, !storedFieldReaders.Contains(oldReader));
+                }
+                pr.decrefOnClose = newDecrefOnClose;
+                pr.incRefReaders = incRefReaders;
+                return pr;
+            }
+            else
+            {
+                // No subreader was refreshed
+                return this;
+            }
+        }
+
+
+        public override int NumDocs()
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            return numDocs;
+        }
+
+        public override int MaxDoc
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return maxDoc;
+            }
+        }
+
+        public override bool HasDeletions
+        {
+            get
+            {
+                // Don't call ensureOpen() here (it could affect performance)
+                return hasDeletions;
+            }
+        }
+
+        // check first reader
+        public override bool IsDeleted(int n)
+        {
+            // Don't call ensureOpen() here (it could affect performance)
+            if (readers.Count > 0)
+                return readers[0].IsDeleted(n);
+            return false;
+        }
+        
+        // delete in all readers
+        protected internal override void  DoDelete(int n)
+        {
+            foreach(var reader in readers)
+            {
+                reader.DeleteDocument(n);
+            }
+            hasDeletions = true;
+        }
+        
+        // undeleteAll in all readers
+        protected internal override void  DoUndeleteAll()
+        {
+            foreach(var reader in readers)
+            {
+                reader.UndeleteAll();
+            }
+            hasDeletions = false;
+        }
+        
+        // append fields from storedFieldReaders
+        public override Document Document(int n, FieldSelector fieldSelector)
+        {
+            EnsureOpen();
+            Document result = new Document();
+            foreach(IndexReader reader in storedFieldReaders)
+            {
+                bool include = (fieldSelector == null);
+                if (!include)
+                {
+                    var fields = readerToFields[reader];
+                    foreach(var field in fields)
+                    {
                         if (fieldSelector.Accept(field) != FieldSelectorResult.NO_LOAD)
-						{
-							include = true;
-							break;
-						}
-					}
-				}
-				if (include)
-				{
-				    var fields = reader.Document(n, fieldSelector).GetFields();
-					foreach(var field in fields)
-					{
+                        {
+                            include = true;
+                            break;
+                        }
+                    }
+                }
+                if (include)
+                {
+                    var fields = reader.Document(n, fieldSelector).GetFields();
+                    foreach(var field in fields)
+                    {
                         result.Add(field);
-					}
-				}
-			}
-			return result;
-		}
-		
-		// get all vectors
-		public override ITermFreqVector[] GetTermFreqVectors(int n)
-		{
-			EnsureOpen();
-			IList<ITermFreqVector> results = new List<ITermFreqVector>();
+                    }
+                }
+            }
+            return result;
+        }
+        
+        // get all vectors
+        public override ITermFreqVector[] GetTermFreqVectors(int n)
+        {
+            EnsureOpen();
+            IList<ITermFreqVector> results = new List<ITermFreqVector>();
             foreach(var e in fieldToReader)
-			{
-				System.String field = e.Key;
-				IndexReader reader = e.Value;
-
-				ITermFreqVector vector = reader.GetTermFreqVector(n, field);
-				if (vector != null)
-					results.Add(vector);
-			}
-			return results.ToArray();
-		}
-		
-		public override ITermFreqVector GetTermFreqVector(int n, System.String field)
-		{
-			EnsureOpen();
-			IndexReader reader = (fieldToReader[field]);
-			return reader == null?null:reader.GetTermFreqVector(n, field);
-		}
-		
-		
-		public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
-		{
-			EnsureOpen();
-			IndexReader reader = (fieldToReader[field]);
-			if (reader != null)
-			{
-				reader.GetTermFreqVector(docNumber, field, mapper);
-			}
-		}
-		
-		public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
-		{
-			EnsureOpen();
+            {
+                System.String field = e.Key;
+                IndexReader reader = e.Value;
+
+                ITermFreqVector vector = reader.GetTermFreqVector(n, field);
+                if (vector != null)
+                    results.Add(vector);
+            }
+            return results.ToArray();
+        }
+        
+        public override ITermFreqVector GetTermFreqVector(int n, System.String field)
+        {
+            EnsureOpen();
+            IndexReader reader = (fieldToReader[field]);
+            return reader == null?null:reader.GetTermFreqVector(n, field);
+        }
+        
+        
+        public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+        {
+            EnsureOpen();
+            IndexReader reader = (fieldToReader[field]);
+            if (reader != null)
+            {
+                reader.GetTermFreqVector(docNumber, field, mapper);
+            }
+        }
+        
+        public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+        {
+            EnsureOpen();
 
             foreach(var e in fieldToReader)
-			{
-				System.String field = e.Key;
-				IndexReader reader = e.Value;
-				reader.GetTermFreqVector(docNumber, field, mapper);
-			}
-		}
-		
-		public override bool HasNorms(System.String field)
-		{
-			EnsureOpen();
-			IndexReader reader = fieldToReader[field];
-		    return reader != null && reader.HasNorms(field);
-		}
-		
-		public override byte[] Norms(System.String field)
-		{
-			EnsureOpen();
-			IndexReader reader = fieldToReader[field];
-			return reader == null?null:reader.Norms(field);
-		}
-		
-		public override void  Norms(System.String field, byte[] result, int offset)
-		{
-			EnsureOpen();
-			IndexReader reader = fieldToReader[field];
-			if (reader != null)
-				reader.Norms(field, result, offset);
-		}
-		
-		protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
-		{
-			IndexReader reader = fieldToReader[field];
-			if (reader != null)
-				reader.DoSetNorm(n, field, value_Renamed);
-		}
-		
-		public override TermEnum Terms()
-		{
-			EnsureOpen();
-			return new ParallelTermEnum(this);
-		}
-		
-		public override TermEnum Terms(Term term)
-		{
-			EnsureOpen();
-			return new ParallelTermEnum(this, term);
-		}
-		
-		public override int DocFreq(Term term)
-		{
-			EnsureOpen();
-			IndexReader reader = fieldToReader[term.Field];
-			return reader == null?0:reader.DocFreq(term);
-		}
-		
-		public override TermDocs TermDocs(Term term)
-		{
-			EnsureOpen();
-			return new ParallelTermDocs(this, term);
-		}
-		
-		public override TermDocs TermDocs()
-		{
-			EnsureOpen();
-			return new ParallelTermDocs(this);
-		}
-		
-		public override TermPositions TermPositions(Term term)
-		{
-			EnsureOpen();
-			return new ParallelTermPositions(this, term);
-		}
-		
-		public override TermPositions TermPositions()
-		{
-			EnsureOpen();
-			return new ParallelTermPositions(this);
-		}
-
-	    /// <summary> Checks recursively if all subreaders are up to date. </summary>
-	    public override bool IsCurrent()
-	    {
-	        foreach (var reader in readers)
-	        {
-	            if (!reader.IsCurrent())
-	            {
-	                return false;
-	            }
-	        }
-
-	        // all subreaders are up to date
-	        return true;
-	    }
-
-	    /// <summary> Checks recursively if all subindexes are optimized </summary>
-	    public override bool IsOptimized()
-	    {
-	        foreach (var reader in readers)
-	        {
-	            if (!reader.IsOptimized())
-	            {
-	                return false;
-	            }
-	        }
-
-	        // all subindexes are optimized
-	        return true;
-	    }
-
-
-	    /// <summary>Not implemented.</summary>
-	    /// <throws>  UnsupportedOperationException </throws>
-	    public override long Version
-	    {
-	        get { throw new System.NotSupportedException("ParallelReader does not support this method."); }
-	    }
-
-	    // for testing
-		public /*internal*/ virtual IndexReader[] GetSubReaders()
-		{
-			return readers.ToArray();
-		}
+            {
+                System.String field = e.Key;
+                IndexReader reader = e.Value;
+                reader.GetTermFreqVector(docNumber, field, mapper);
+            }
+        }
+        
+        public override bool HasNorms(System.String field)
+        {
+            EnsureOpen();
+            IndexReader reader = fieldToReader[field];
+            return reader != null && reader.HasNorms(field);
+        }
+        
+        public override byte[] Norms(System.String field)
+        {
+            EnsureOpen();
+            IndexReader reader = fieldToReader[field];
+            return reader == null?null:reader.Norms(field);
+        }
+        
+        public override void  Norms(System.String field, byte[] result, int offset)
+        {
+            EnsureOpen();
+            IndexReader reader = fieldToReader[field];
+            if (reader != null)
+                reader.Norms(field, result, offset);
+        }
+        
+        protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+        {
+            IndexReader reader = fieldToReader[field];
+            if (reader != null)
+                reader.DoSetNorm(n, field, value_Renamed);
+        }
+        
+        public override TermEnum Terms()
+        {
+            EnsureOpen();
+            return new ParallelTermEnum(this);
+        }
+        
+        public override TermEnum Terms(Term term)
+        {
+            EnsureOpen();
+            return new ParallelTermEnum(this, term);
+        }
+        
+        public override int DocFreq(Term term)
+        {
+            EnsureOpen();
+            IndexReader reader = fieldToReader[term.Field];
+            return reader == null?0:reader.DocFreq(term);
+        }
+        
+        public override TermDocs TermDocs(Term term)
+        {
+            EnsureOpen();
+            return new ParallelTermDocs(this, term);
+        }
+        
+        public override TermDocs TermDocs()
+        {
+            EnsureOpen();
+            return new ParallelTermDocs(this);
+        }
+        
+        public override TermPositions TermPositions(Term term)
+        {
+            EnsureOpen();
+            return new ParallelTermPositions(this, term);
+        }
+        
+        public override TermPositions TermPositions()
+        {
+            EnsureOpen();
+            return new ParallelTermPositions(this);
+        }
+
+        /// <summary> Checks recursively if all subreaders are up to date. </summary>
+        public override bool IsCurrent()
+        {
+            foreach (var reader in readers)
+            {
+                if (!reader.IsCurrent())
+                {
+                    return false;
+                }
+            }
+
+            // all subreaders are up to date
+            return true;
+        }
+
+        /// <summary> Checks recursively if all subindexes are optimized </summary>
+        public override bool IsOptimized()
+        {
+            foreach (var reader in readers)
+            {
+                if (!reader.IsOptimized())
+                {
+                    return false;
+                }
+            }
+
+            // all subindexes are optimized
+            return true;
+        }
+
+
+        /// <summary>Not implemented.</summary>
+        /// <throws>  UnsupportedOperationException </throws>
+        public override long Version
+        {
+            get { throw new System.NotSupportedException("ParallelReader does not support this method."); }
+        }
+
+        // for testing
+        public /*internal*/ virtual IndexReader[] GetSubReaders()
+        {
+            return readers.ToArray();
+        }
 
         protected internal override void DoCommit(IDictionary<string, string> commitUserData)
-		{
-			foreach(var reader in readers)
-				reader.Commit(commitUserData);
-		}
-		
-		protected internal override void  DoClose()
-		{
-			lock (this)
-			{
-				for (int i = 0; i < readers.Count; i++)
-				{
-					if (decrefOnClose[i])
-					{
-						readers[i].DecRef();
-					}
-					else
-					{
-						readers[i].Close();
-					}
-				}
-			}
+        {
+            foreach(var reader in readers)
+                reader.Commit(commitUserData);
+        }
+        
+        protected internal override void  DoClose()
+        {
+            lock (this)
+            {
+                for (int i = 0; i < readers.Count; i++)
+                {
+                    if (decrefOnClose[i])
+                    {
+                        readers[i].DecRef();
+                    }
+                    else
+                    {
+                        readers[i].Close();
+                    }
+                }
+            }
 
             Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
-		}
+        }
 
         public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
-		{
-			EnsureOpen();
+        {
+            EnsureOpen();
             ISet<string> fieldSet = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet<string>();
-			foreach(var reader in readers)
-			{
-				ICollection<string> names = reader.GetFieldNames(fieldNames);
+            foreach(var reader in readers)
+            {
+                ICollection<string> names = reader.GetFieldNames(fieldNames);
                 fieldSet.UnionWith(names);
-			}
-			return fieldSet;
-		}
-		
-		private class ParallelTermEnum : TermEnum
-		{
-			private void  InitBlock(ParallelReader enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private ParallelReader enclosingInstance;
-			public ParallelReader Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			private System.String field;
-			private IEnumerator<string> fieldIterator;
-			private TermEnum termEnum;
-
-		    private bool isDisposed;
-			
-			public ParallelTermEnum(ParallelReader enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-				try
-				{
-					field = Enclosing_Instance.fieldToReader.Keys.First();
-				}
-				catch (ArgumentOutOfRangeException)
-				{
-					// No fields, so keep field == null, termEnum == null
-					return;
-				}
-				if (field != null)
-					termEnum = Enclosing_Instance.fieldToReader[field].Terms();
-			}
-			
-			public ParallelTermEnum(ParallelReader enclosingInstance, Term term)
-			{
-				InitBlock(enclosingInstance);
-				field = term.Field;
-				IndexReader reader = Enclosing_Instance.fieldToReader[field];
-				if (reader != null)
-					termEnum = reader.Terms(term);
-			}
-			
-			public override bool Next()
-			{
-				if (termEnum == null)
-					return false;
-				
-				// another term in this field?
-				if (termEnum.Next() && (System.Object) termEnum.Term.Field == (System.Object) field)
-					return true; // yes, keep going
-				
-				termEnum.Close(); // close old termEnum
-				
-				// find the next field with terms, if any
-				if (fieldIterator == null)
-				{
+            }
+            return fieldSet;
+        }
+        
+        private class ParallelTermEnum : TermEnum
+        {
+            private void  InitBlock(ParallelReader enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private ParallelReader enclosingInstance;
+            public ParallelReader Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            private System.String field;
+            private IEnumerator<string> fieldIterator;
+            private TermEnum termEnum;
+
+            private bool isDisposed;
+            
+            public ParallelTermEnum(ParallelReader enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+                try
+                {
+                    field = Enclosing_Instance.fieldToReader.Keys.First();
+                }
+                catch (ArgumentOutOfRangeException)
+                {
+                    // No fields, so keep field == null, termEnum == null
+                    return;
+                }
+                if (field != null)
+                    termEnum = Enclosing_Instance.fieldToReader[field].Terms();
+            }
+            
+            public ParallelTermEnum(ParallelReader enclosingInstance, Term term)
+            {
+                InitBlock(enclosingInstance);
+                field = term.Field;
+                IndexReader reader = Enclosing_Instance.fieldToReader[field];
+                if (reader != null)
+                    termEnum = reader.Terms(term);
+            }
+            
+            public override bool Next()
+            {
+                if (termEnum == null)
+                    return false;
+                
+                // another term in this field?
+                if (termEnum.Next() && (System.Object) termEnum.Term.Field == (System.Object) field)
+                    return true; // yes, keep going
+                
+                termEnum.Close(); // close old termEnum
+                
+                // find the next field with terms, if any
+                if (fieldIterator == null)
+                {
                     var newList = new List<string>();  
                     if (Enclosing_Instance.fieldToReader != null && Enclosing_Instance.fieldToReader.Count > 0)
                     {
@@ -609,39 +609,39 @@ namespace Lucene.Net.Index
                     }
 
                     fieldIterator = newList.Skip(1).GetEnumerator(); // Skip field to get next one
-				}
-				while (fieldIterator.MoveNext())
-				{
-					field = fieldIterator.Current;
-					termEnum = Enclosing_Instance.fieldToReader[field].Terms(new Term(field));
-					Term term = termEnum.Term;
-					if (term != null && (System.Object) term.Field == (System.Object) field)
-						return true;
-					else
-						termEnum.Close();
-				}
-				
-				return false; // no more fields
-			}
-
-		    public override Term Term
-		    {
-		        get
-		        {
-		            if (termEnum == null)
-		                return null;
-
-		            return termEnum.Term;
-		        }
-		    }
-
-		    public override int DocFreq()
-			{
-				if (termEnum == null)
-					return 0;
-				
-				return termEnum.DocFreq();
-			}
+                }
+                while (fieldIterator.MoveNext())
+                {
+                    field = fieldIterator.Current;
+                    termEnum = Enclosing_Instance.fieldToReader[field].Terms(new Term(field));
+                    Term term = termEnum.Term;
+                    if (term != null && (System.Object) term.Field == (System.Object) field)
+                        return true;
+                    else
+                        termEnum.Close();
+                }
+                
+                return false; // no more fields
+            }
+
+            public override Term Term
+            {
+                get
+                {
+                    if (termEnum == null)
+                        return null;
+
+                    return termEnum.Term;
+                }
+            }
+
+            public override int DocFreq()
+            {
+                if (termEnum == null)
+                    return 0;
+                
+                return termEnum.DocFreq();
+            }
 
             protected override void Dispose(bool disposing)
             {
@@ -655,87 +655,87 @@ namespace Lucene.Net.Index
 
                 isDisposed = true;
             }
-		}
-		
-		// wrap a TermDocs in order to support seek(Term)
-		private class ParallelTermDocs : TermDocs
-		{
-			private void  InitBlock(ParallelReader enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private ParallelReader enclosingInstance;
-			public ParallelReader Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			protected internal TermDocs termDocs;
-
-		    private bool isDisposed;
-			
-			public ParallelTermDocs(ParallelReader enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			public ParallelTermDocs(ParallelReader enclosingInstance, Term term)
-			{
-				InitBlock(enclosingInstance);
+        }
+        
+        // wrap a TermDocs in order to support seek(Term)
+        private class ParallelTermDocs : TermDocs
+        {
+            private void  InitBlock(ParallelReader enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private ParallelReader enclosingInstance;
+            public ParallelReader Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            protected internal TermDocs termDocs;
+
+            private bool isDisposed;
+            
+            public ParallelTermDocs(ParallelReader enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            public ParallelTermDocs(ParallelReader enclosingInstance, Term term)
+            {
+                InitBlock(enclosingInstance);
                 if(term == null)
                     termDocs = (Enclosing_Instance.readers.Count == 0)
                                    ? null
                                    : Enclosing_Instance.readers[0].TermDocs(null);
                 else
                     Seek(term);
-			}
-
-		    public virtual int Doc
-		    {
-		        get { return termDocs.Doc; }
-		    }
-
-		    public virtual int Freq
-		    {
-		        get { return termDocs.Freq; }
-		    }
-
-		    public virtual void  Seek(Term term)
-			{
-				IndexReader reader = Enclosing_Instance.fieldToReader[term.Field];
-				termDocs = reader != null?reader.TermDocs(term):null;
-			}
-			
-			public virtual void  Seek(TermEnum termEnum)
-			{
-				Seek(termEnum.Term);
-			}
-			
-			public virtual bool Next()
-			{
-				if (termDocs == null)
-					return false;
-				
-				return termDocs.Next();
-			}
-			
-			public virtual int Read(int[] docs, int[] freqs)
-			{
-				if (termDocs == null)
-					return 0;
-				
-				return termDocs.Read(docs, freqs);
-			}
-			
-			public virtual bool SkipTo(int target)
-			{
-				if (termDocs == null)
-					return false;
-				
-				return termDocs.SkipTo(target);
-			}
+            }
+
+            public virtual int Doc
+            {
+                get { return termDocs.Doc; }
+            }
+
+            public virtual int Freq
+            {
+                get { return termDocs.Freq; }
+            }
+
+            public virtual void  Seek(Term term)
+            {
+                IndexReader reader = Enclosing_Instance.fieldToReader[term.Field];
+                termDocs = reader != null?reader.TermDocs(term):null;
+            }
+            
+            public virtual void  Seek(TermEnum termEnum)
+            {
+                Seek(termEnum.Term);
+            }
+            
+            public virtual bool Next()
+            {
+                if (termDocs == null)
+                    return false;
+                
+                return termDocs.Next();
+            }
+            
+            public virtual int Read(int[] docs, int[] freqs)
+            {
+                if (termDocs == null)
+                    return 0;
+                
+                return termDocs.Read(docs, freqs);
+            }
+            
+            public virtual bool SkipTo(int target)
+            {
+                if (termDocs == null)
+                    return false;
+                
+                return termDocs.SkipTo(target);
+            }
 
             [Obsolete("Use Dispose() instead")]
             public virtual void Close()
@@ -743,10 +743,10 @@ namespace Lucene.Net.Index
                 Dispose();
             }
 
-		    public void Dispose()
-		    {
-		        Dispose(true);
-		    }
+            public void Dispose()
+            {
+                Dispose(true);
+            }
 
             protected virtual void Dispose(bool disposing)
             {
@@ -760,63 +760,63 @@ namespace Lucene.Net.Index
 
                 isDisposed = true;
             }
-		}
-		
-		private class ParallelTermPositions:ParallelTermDocs, TermPositions
-		{
-			private void  InitBlock(ParallelReader enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private ParallelReader enclosingInstance;
-			public new ParallelReader Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			public ParallelTermPositions(ParallelReader enclosingInstance):base(enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			public ParallelTermPositions(ParallelReader enclosingInstance, Term term):base(enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-				Seek(term);
-			}
-			
-			public override void  Seek(Term term)
-			{
-				IndexReader reader = Enclosing_Instance.fieldToReader[term.Field];
-				termDocs = reader != null?reader.TermPositions(term):null;
-			}
-			
-			public virtual int NextPosition()
-			{
-				// It is an error to call this if there is no next position, e.g. if termDocs==null
-				return ((TermPositions) termDocs).NextPosition();
-			}
-
-		    public virtual int PayloadLength
-		    {
-		        get { return ((TermPositions) termDocs).PayloadLength; }
-		    }
-
-		    public virtual byte[] GetPayload(byte[] data, int offset)
-			{
-				return ((TermPositions) termDocs).GetPayload(data, offset);
-			}
-			
-			
-			// TODO: Remove warning after API has been finalized
-
-		    public virtual bool IsPayloadAvailable
-		    {
-		        get { return ((TermPositions) termDocs).IsPayloadAvailable; }
-		    }
-		}
-	}
+        }
+        
+        private class ParallelTermPositions:ParallelTermDocs, TermPositions
+        {
+            private void  InitBlock(ParallelReader enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private ParallelReader enclosingInstance;
+            public new ParallelReader Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            public ParallelTermPositions(ParallelReader enclosingInstance):base(enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            public ParallelTermPositions(ParallelReader enclosingInstance, Term term):base(enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+                Seek(term);
+            }
+            
+            public override void  Seek(Term term)
+            {
+                IndexReader reader = Enclosing_Instance.fieldToReader[term.Field];
+                termDocs = reader != null?reader.TermPositions(term):null;
+            }
+            
+            public virtual int NextPosition()
+            {
+                // It is an error to call this if there is no next position, e.g. if termDocs==null
+                return ((TermPositions) termDocs).NextPosition();
+            }
+
+            public virtual int PayloadLength
+            {
+                get { return ((TermPositions) termDocs).PayloadLength; }
+            }
+
+            public virtual byte[] GetPayload(byte[] data, int offset)
+            {
+                return ((TermPositions) termDocs).GetPayload(data, offset);
+            }
+            
+            
+            // TODO: Remove warning after API has been finalized
+
+            public virtual bool IsPayloadAvailable
+            {
+                get { return ((TermPositions) termDocs).IsPayloadAvailable; }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/Payload.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/Payload.cs b/src/core/Index/Payload.cs
index a6f391a..9c00c52 100644
--- a/src/core/Index/Payload.cs
+++ b/src/core/Index/Payload.cs
@@ -22,196 +22,196 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>  A Payload is metadata that can be stored together with each occurrence 
-	/// of a term. This metadata is stored inline in the posting list of the
-	/// specific term.  
-	/// <p/>
-	/// To store payloads in the index a <see cref="TokenStream"/> has to be used that
-	/// produces payload data.
-	/// <p/>
+    
+    /// <summary>  A Payload is metadata that can be stored together with each occurrence 
+    /// of a term. This metadata is stored inline in the posting list of the
+    /// specific term.  
+    /// <p/>
+    /// To store payloads in the index a <see cref="TokenStream"/> has to be used that
+    /// produces payload data.
+    /// <p/>
     /// Use <see cref="TermPositions.PayloadLength"/> and <see cref="TermPositions.GetPayload(byte[], int)"/>
-	/// to retrieve the payloads from the index.<br/>
-	/// 
-	/// </summary>
-	[Serializable]
-	public class Payload : System.ICloneable
-	{
-		/// <summary>the byte array containing the payload data </summary>
-		protected internal byte[] data;
-		
-		/// <summary>the offset within the byte array </summary>
-		protected internal int internalOffset;
-		
-		/// <summary>the length of the payload data </summary>
-		protected internal int internalLength;
-		
-		/// <summary>Creates an empty payload and does not allocate a byte array. </summary>
-		public Payload()
-		{
-			// nothing to do
-		}
-		
-		/// <summary> Creates a new payload with the the given array as data.
-		/// A reference to the passed-in array is held, i. e. no 
-		/// copy is made.
-		/// 
-		/// </summary>
-		/// <param name="data">the data of this payload
-		/// </param>
-		public Payload(byte[] data):this(data, 0, data.Length)
-		{
-		}
-		
-		/// <summary> Creates a new payload with the the given array as data. 
-		/// A reference to the passed-in array is held, i. e. no 
-		/// copy is made.
-		/// 
-		/// </summary>
-		/// <param name="data">the data of this payload
-		/// </param>
-		/// <param name="offset">the offset in the data byte array
-		/// </param>
-		/// <param name="length">the length of the data
-		/// </param>
-		public Payload(byte[] data, int offset, int length)
-		{
-			if (offset < 0 || offset + length > data.Length)
-			{
-				throw new System.ArgumentException();
-			}
-			this.data = data;
-			this.internalOffset = offset;
-			this.internalLength = length;
-		}
+    /// to retrieve the payloads from the index.<br/>
+    /// 
+    /// </summary>
+    [Serializable]
+    public class Payload : System.ICloneable
+    {
+        /// <summary>the byte array containing the payload data </summary>
+        protected internal byte[] data;
+        
+        /// <summary>the offset within the byte array </summary>
+        protected internal int internalOffset;
+        
+        /// <summary>the length of the payload data </summary>
+        protected internal int internalLength;
+        
+        /// <summary>Creates an empty payload and does not allocate a byte array. </summary>
+        public Payload()
+        {
+            // nothing to do
+        }
+        
+        /// <summary> Creates a new payload with the the given array as data.
+        /// A reference to the passed-in array is held, i. e. no 
+        /// copy is made.
+        /// 
+        /// </summary>
+        /// <param name="data">the data of this payload
+        /// </param>
+        public Payload(byte[] data):this(data, 0, data.Length)
+        {
+        }
+        
+        /// <summary> Creates a new payload with the the given array as data. 
+        /// A reference to the passed-in array is held, i. e. no 
+        /// copy is made.
+        /// 
+        /// </summary>
+        /// <param name="data">the data of this payload
+        /// </param>
+        /// <param name="offset">the offset in the data byte array
+        /// </param>
+        /// <param name="length">the length of the data
+        /// </param>
+        public Payload(byte[] data, int offset, int length)
+        {
+            if (offset < 0 || offset + length > data.Length)
+            {
+                throw new System.ArgumentException();
+            }
+            this.data = data;
+            this.internalOffset = offset;
+            this.internalLength = length;
+        }
 
-	    /// <summary> Sets this payloads data. 
-		/// A reference to the passed-in array is held, i. e. no 
-		/// copy is made.
-		/// </summary>
-		public virtual void  SetData(byte[] value, int offset, int length)
-		{
-			this.data = value;
-			this.internalOffset = offset;
-			this.internalLength = length;
-		}
+        /// <summary> Sets this payloads data. 
+        /// A reference to the passed-in array is held, i. e. no 
+        /// copy is made.
+        /// </summary>
+        public virtual void  SetData(byte[] value, int offset, int length)
+        {
+            this.data = value;
+            this.internalOffset = offset;
+            this.internalLength = length;
+        }
 
-	    /// <summary> Gets or sets a reference to the underlying byte array
-	    /// that holds this payloads data.  Data is not copied.
-	    /// </summary>
-	    public virtual void SetData(byte[] value)
-	    {
-	        SetData(value, 0, value.Length);
-	    }
+        /// <summary> Gets or sets a reference to the underlying byte array
+        /// that holds this payloads data.  Data is not copied.
+        /// </summary>
+        public virtual void SetData(byte[] value)
+        {
+            SetData(value, 0, value.Length);
+        }
 
-	    /// <summary> Gets or sets a reference to the underlying byte array
-	    /// that holds this payloads data.  Data is not copied.
-	    /// </summary>
-	    public virtual byte[] GetData()
-	    {
-	        return this.data;
-	    }
+        /// <summary> Gets or sets a reference to the underlying byte array
+        /// that holds this payloads data.  Data is not copied.
+        /// </summary>
+        public virtual byte[] GetData()
+        {
+            return this.data;
+        }
 
-	    /// <summary> Returns the offset in the underlying byte array </summary>
-	    public virtual int Offset
-	    {
-	        get { return this.internalOffset; }
-	    }
+        /// <summary> Returns the offset in the underlying byte array </summary>
+        public virtual int Offset
+        {
+            get { return this.internalOffset; }
+        }
 
-	    /// <summary> Returns the length of the payload data. </summary>
-	    public virtual int Length
-	    {
-	        get { return this.internalLength; }
-	    }
+        /// <summary> Returns the length of the payload data. </summary>
+        public virtual int Length
+        {
+            get { return this.internalLength; }
+        }
 
-	    /// <summary> Returns the byte at the given index.</summary>
-		public virtual byte ByteAt(int index)
-		{
-			if (0 <= index && index < this.internalLength)
-			{
-				return this.data[this.internalOffset + index];
-			}
-			throw new System. IndexOutOfRangeException("Index of bound " + index);
-		}
-		
-		/// <summary> Allocates a new byte array, copies the payload data into it and returns it. </summary>
-		public virtual byte[] ToByteArray()
-		{
-			byte[] retArray = new byte[this.internalLength];
-			Array.Copy(this.data, this.internalOffset, retArray, 0, this.internalLength);
-			return retArray;
-		}
-		
-		/// <summary> Copies the payload data to a byte array.
-		/// 
-		/// </summary>
-		/// <param name="target">the target byte array
-		/// </param>
-		/// <param name="targetOffset">the offset in the target byte array
-		/// </param>
-		public virtual void  CopyTo(byte[] target, int targetOffset)
-		{
-			if (this.internalLength > target.Length + targetOffset)
-			{
-				throw new System.IndexOutOfRangeException();
-			}
-			Array.Copy(this.data, this.internalOffset, target, targetOffset, this.internalLength);
-		}
-		
-		/// <summary> Clones this payload by creating a copy of the underlying
-		/// byte array.
-		/// </summary>
-		public virtual System.Object Clone()
-		{
-			try
-			{
-				// Start with a shallow copy of data
-				Payload clone = (Payload) base.MemberwiseClone();
-				// Only copy the part of data that belongs to this Payload
-				if (internalOffset == 0 && internalLength == data.Length)
-				{
-					// It is the whole thing, so just clone it.
-					clone.data = new byte[data.Length];
-					data.CopyTo(clone.data, 0);
-				}
-				else
-				{
-					// Just get the part
-					clone.data = this.ToByteArray();
-					clone.internalOffset = 0;
-				}
-				return clone;
-			}
-			catch (System.Exception e)
-			{
-				throw new System.SystemException(e.Message, e); // shouldn't happen
-			}
-		}
-		
-		public  override bool Equals(System.Object obj)
-		{
-			if (obj == this)
-				return true;
-			if (obj is Payload)
-			{
-				Payload other = (Payload) obj;
-				if (internalLength == other.internalLength)
-				{
-					for (int i = 0; i < internalLength; i++)
-						if (data[internalOffset + i] != other.data[other.internalOffset + i])
-							return false;
-					return true;
-				}
-				else
-					return false;
-			}
-			else
-				return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return ArrayUtil.HashCode(data, internalOffset, internalOffset + internalLength);
-		}
-	}
+        /// <summary> Returns the byte at the given index.</summary>
+        public virtual byte ByteAt(int index)
+        {
+            if (0 <= index && index < this.internalLength)
+            {
+                return this.data[this.internalOffset + index];
+            }
+            throw new System. IndexOutOfRangeException("Index of bound " + index);
+        }
+        
+        /// <summary> Allocates a new byte array, copies the payload data into it and returns it. </summary>
+        public virtual byte[] ToByteArray()
+        {
+            byte[] retArray = new byte[this.internalLength];
+            Array.Copy(this.data, this.internalOffset, retArray, 0, this.internalLength);
+            return retArray;
+        }
+        
+        /// <summary> Copies the payload data to a byte array.
+        /// 
+        /// </summary>
+        /// <param name="target">the target byte array
+        /// </param>
+        /// <param name="targetOffset">the offset in the target byte array
+        /// </param>
+        public virtual void  CopyTo(byte[] target, int targetOffset)
+        {
+            if (this.internalLength > target.Length + targetOffset)
+            {
+                throw new System.IndexOutOfRangeException();
+            }
+            Array.Copy(this.data, this.internalOffset, target, targetOffset, this.internalLength);
+        }
+        
+        /// <summary> Clones this payload by creating a copy of the underlying
+        /// byte array.
+        /// </summary>
+        public virtual System.Object Clone()
+        {
+            try
+            {
+                // Start with a shallow copy of data
+                Payload clone = (Payload) base.MemberwiseClone();
+                // Only copy the part of data that belongs to this Payload
+                if (internalOffset == 0 && internalLength == data.Length)
+                {
+                    // It is the whole thing, so just clone it.
+                    clone.data = new byte[data.Length];
+                    data.CopyTo(clone.data, 0);
+                }
+                else
+                {
+                    // Just get the part
+                    clone.data = this.ToByteArray();
+                    clone.internalOffset = 0;
+                }
+                return clone;
+            }
+            catch (System.Exception e)
+            {
+                throw new System.SystemException(e.Message, e); // shouldn't happen
+            }
+        }
+        
+        public  override bool Equals(System.Object obj)
+        {
+            if (obj == this)
+                return true;
+            if (obj is Payload)
+            {
+                Payload other = (Payload) obj;
+                if (internalLength == other.internalLength)
+                {
+                    for (int i = 0; i < internalLength; i++)
+                        if (data[internalOffset + i] != other.data[other.internalOffset + i])
+                            return false;
+                    return true;
+                }
+                else
+                    return false;
+            }
+            else
+                return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return ArrayUtil.HashCode(data, internalOffset, internalOffset + internalLength);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/PositionBasedTermVectorMapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/PositionBasedTermVectorMapper.cs b/src/core/Index/PositionBasedTermVectorMapper.cs
index af548a7..3504262 100644
--- a/src/core/Index/PositionBasedTermVectorMapper.cs
+++ b/src/core/Index/PositionBasedTermVectorMapper.cs
@@ -21,156 +21,156 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> For each Field, store position by position information.  It ignores frequency information
-	/// <p/>
-	/// This is not thread-safe.
-	/// </summary>
-	public class PositionBasedTermVectorMapper:TermVectorMapper
-	{
-		private IDictionary<string, IDictionary<int, TVPositionInfo>> fieldToTerms;
-		
-		private System.String currentField;
-		/// <summary> A Map of Integer and TVPositionInfo</summary>
+    
+    /// <summary> For each Field, store position by position information.  It ignores frequency information
+    /// <p/>
+    /// This is not thread-safe.
+    /// </summary>
+    public class PositionBasedTermVectorMapper:TermVectorMapper
+    {
+        private IDictionary<string, IDictionary<int, TVPositionInfo>> fieldToTerms;
+        
+        private System.String currentField;
+        /// <summary> A Map of Integer and TVPositionInfo</summary>
         private IDictionary<int, TVPositionInfo> currentPositions;
-		private bool storeOffsets;
-		
-		public PositionBasedTermVectorMapper():base(false, false)
-		{
-		}
-		
-		public PositionBasedTermVectorMapper(bool ignoringOffsets):base(false, ignoringOffsets)
-		{
-		}
+        private bool storeOffsets;
+        
+        public PositionBasedTermVectorMapper():base(false, false)
+        {
+        }
+        
+        public PositionBasedTermVectorMapper(bool ignoringOffsets):base(false, ignoringOffsets)
+        {
+        }
 
-	    /// <summary> Never ignores positions.  This mapper doesn't make much sense unless there are positions</summary>
-	    /// <value> false </value>
-	    public override bool IsIgnoringPositions
-	    {
-	        get { return false; }
-	    }
+        /// <summary> Never ignores positions.  This mapper doesn't make much sense unless there are positions</summary>
+        /// <value> false </value>
+        public override bool IsIgnoringPositions
+        {
+            get { return false; }
+        }
 
-	    /// <summary> Callback for the TermVectorReader. </summary>
-		/// <param name="term">
-		/// </param>
-		/// <param name="frequency">
-		/// </param>
-		/// <param name="offsets">
-		/// </param>
-		/// <param name="positions">
-		/// </param>
-		public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
-		{
-			for (int i = 0; i < positions.Length; i++)
-			{
-				System.Int32 posVal =  positions[i];
-				TVPositionInfo pos = currentPositions[posVal];
-				if (pos == null)
-				{
-					pos = new TVPositionInfo(positions[i], storeOffsets);
-					currentPositions[posVal] = pos;
-				}
-				pos.addTerm(term, offsets != null ? offsets[i] : TermVectorOffsetInfo.Null);
-			}
-		}
-		
-		/// <summary> Callback mechanism used by the TermVectorReader</summary>
-		/// <param name="field"> The field being read
-		/// </param>
-		/// <param name="numTerms">The number of terms in the vector
-		/// </param>
-		/// <param name="storeOffsets">Whether offsets are available
-		/// </param>
-		/// <param name="storePositions">Whether positions are available
-		/// </param>
-		public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
-		{
-			if (storePositions == false)
-			{
-				throw new System.SystemException("You must store positions in order to use this Mapper");
-			}
-			if (storeOffsets == true)
-			{
-				//ignoring offsets
-			}
-			fieldToTerms = new HashMap<string, IDictionary<int, TVPositionInfo>>(numTerms);
-			this.storeOffsets = storeOffsets;
-			currentField = field;
-			currentPositions = new HashMap<int, TVPositionInfo>();
-			fieldToTerms[currentField] = currentPositions;
-		}
+        /// <summary> Callback for the TermVectorReader. </summary>
+        /// <param name="term">
+        /// </param>
+        /// <param name="frequency">
+        /// </param>
+        /// <param name="offsets">
+        /// </param>
+        /// <param name="positions">
+        /// </param>
+        public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+        {
+            for (int i = 0; i < positions.Length; i++)
+            {
+                System.Int32 posVal =  positions[i];
+                TVPositionInfo pos = currentPositions[posVal];
+                if (pos == null)
+                {
+                    pos = new TVPositionInfo(positions[i], storeOffsets);
+                    currentPositions[posVal] = pos;
+                }
+                pos.addTerm(term, offsets != null ? offsets[i] : TermVectorOffsetInfo.Null);
+            }
+        }
+        
+        /// <summary> Callback mechanism used by the TermVectorReader</summary>
+        /// <param name="field"> The field being read
+        /// </param>
+        /// <param name="numTerms">The number of terms in the vector
+        /// </param>
+        /// <param name="storeOffsets">Whether offsets are available
+        /// </param>
+        /// <param name="storePositions">Whether positions are available
+        /// </param>
+        public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+        {
+            if (storePositions == false)
+            {
+                throw new System.SystemException("You must store positions in order to use this Mapper");
+            }
+            if (storeOffsets == true)
+            {
+                //ignoring offsets
+            }
+            fieldToTerms = new HashMap<string, IDictionary<int, TVPositionInfo>>(numTerms);
+            this.storeOffsets = storeOffsets;
+            currentField = field;
+            currentPositions = new HashMap<int, TVPositionInfo>();
+            fieldToTerms[currentField] = currentPositions;
+        }
 
-	    /// <summary> Get the mapping between fields and terms, sorted by the comparator
-	    /// 
-	    /// </summary>
-	    /// <value> A map between field names and a Map. The sub-Map key is the position as the integer, the value is &lt;see cref=&quot;Lucene.Net.Index.PositionBasedTermVectorMapper.TVPositionInfo&quot; /&gt;. </value>
-	    public virtual IDictionary<string, IDictionary<int, TVPositionInfo>> FieldToTerms
-	    {
-	        get { return fieldToTerms; }
-	    }
+        /// <summary> Get the mapping between fields and terms, sorted by the comparator
+        /// 
+        /// </summary>
+        /// <value> A map between field names and a Map. The sub-Map key is the position as the integer, the value is &lt;see cref=&quot;Lucene.Net.Index.PositionBasedTermVectorMapper.TVPositionInfo&quot; /&gt;. </value>
+        public virtual IDictionary<string, IDictionary<int, TVPositionInfo>> FieldToTerms
+        {
+            get { return fieldToTerms; }
+        }
 
-	    /// <summary> Container for a term at a position</summary>
-		public class TVPositionInfo
-		{
-			/// <summary> </summary>
-			/// <returns> The position of the term
-			/// </returns>
-			virtual public int Position
-			{
-				get
-				{
-					return position;
-				}
-				
-			}
-			/// <summary> Note, there may be multiple terms at the same position</summary>
-			/// <returns> A List of Strings
-			/// </returns>
-			virtual public IList<String> Terms
-			{
-				get
-				{
-					return terms;
-				}
-				
-			}
-			/// <summary> Parallel list (to <see cref="Terms" />) of TermVectorOffsetInfo objects.  
-			/// There may be multiple entries since there may be multiple terms at a position</summary>
-			/// <returns> A List of TermVectorOffsetInfo objects, if offsets are store.
-			/// </returns>
-			virtual public IList<TermVectorOffsetInfo> Offsets
-			{
-				get
-				{
-					return offsets;
-				}
-				
-			}
-			private int position;
-			//a list of Strings
-			private IList<string> terms;
-			//A list of TermVectorOffsetInfo
-			private IList<TermVectorOffsetInfo> offsets;
-			
-			
-			public TVPositionInfo(int position, bool storeOffsets)
-			{
-				this.position = position;
-				terms = new List<string>();
-				if (storeOffsets)
-				{
-					offsets = new List<TermVectorOffsetInfo>();
-				}
-			}
-			
-			internal virtual void  addTerm(System.String term, TermVectorOffsetInfo info)
-			{
-				terms.Add(term);
-				if (offsets != null)
-				{
-					offsets.Add(info);
-				}
-			}
-		}
-	}
+        /// <summary> Container for a term at a position</summary>
+        public class TVPositionInfo
+        {
+            /// <summary> </summary>
+            /// <returns> The position of the term
+            /// </returns>
+            virtual public int Position
+            {
+                get
+                {
+                    return position;
+                }
+                
+            }
+            /// <summary> Note, there may be multiple terms at the same position</summary>
+            /// <returns> A List of Strings
+            /// </returns>
+            virtual public IList<String> Terms
+            {
+                get
+                {
+                    return terms;
+                }
+                
+            }
+            /// <summary> Parallel list (to <see cref="Terms" />) of TermVectorOffsetInfo objects.  
+            /// There may be multiple entries since there may be multiple terms at a position</summary>
+            /// <returns> A List of TermVectorOffsetInfo objects, if offsets are store.
+            /// </returns>
+            virtual public IList<TermVectorOffsetInfo> Offsets
+            {
+                get
+                {
+                    return offsets;
+                }
+                
+            }
+            private int position;
+            //a list of Strings
+            private IList<string> terms;
+            //A list of TermVectorOffsetInfo
+            private IList<TermVectorOffsetInfo> offsets;
+            
+            
+            public TVPositionInfo(int position, bool storeOffsets)
+            {
+                this.position = position;
+                terms = new List<string>();
+                if (storeOffsets)
+                {
+                    offsets = new List<TermVectorOffsetInfo>();
+                }
+            }
+            
+            internal virtual void  addTerm(System.String term, TermVectorOffsetInfo info)
+            {
+                terms.Add(term);
+                if (offsets != null)
+                {
+                    offsets.Add(info);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/RawPostingList.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/RawPostingList.cs b/src/core/Index/RawPostingList.cs
index bffc2de..c3646b2 100644
--- a/src/core/Index/RawPostingList.cs
+++ b/src/core/Index/RawPostingList.cs
@@ -19,28 +19,28 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	
-	/// <summary>This is the base class for an in-memory posting list,
-	/// keyed by a Token.  <see cref="TermsHash" /> maintains a hash
-	/// table holding one instance of this per unique Token.
-	/// Consumers of TermsHash (<see cref="TermsHashConsumer" />) must
-	/// subclass this class with its own concrete class.
-	/// FreqProxTermsWriter.PostingList is a private inner class used 
-	/// for the freq/prox postings, and 
-	/// TermVectorsTermsWriter.PostingList is a private inner class
-	/// used to hold TermVectors postings. 
-	/// </summary>
-	
-	abstract class RawPostingList
-	{
-		internal static readonly int BYTES_SIZE;
-		internal int textStart;
-		internal int intStart;
-		internal int byteStart;
-		static RawPostingList()
-		{
-			BYTES_SIZE = DocumentsWriter.OBJECT_HEADER_BYTES + 3 * DocumentsWriter.INT_NUM_BYTE;
-		}
-	}
+    
+    
+    /// <summary>This is the base class for an in-memory posting list,
+    /// keyed by a Token.  <see cref="TermsHash" /> maintains a hash
+    /// table holding one instance of this per unique Token.
+    /// Consumers of TermsHash (<see cref="TermsHashConsumer" />) must
+    /// subclass this class with its own concrete class.
+    /// FreqProxTermsWriter.PostingList is a private inner class used 
+    /// for the freq/prox postings, and 
+    /// TermVectorsTermsWriter.PostingList is a private inner class
+    /// used to hold TermVectors postings. 
+    /// </summary>
+    
+    abstract class RawPostingList
+    {
+        internal static readonly int BYTES_SIZE;
+        internal int textStart;
+        internal int intStart;
+        internal int byteStart;
+        static RawPostingList()
+        {
+            BYTES_SIZE = DocumentsWriter.OBJECT_HEADER_BYTES + 3 * DocumentsWriter.INT_NUM_BYTE;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ReadOnlyDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ReadOnlyDirectoryReader.cs b/src/core/Index/ReadOnlyDirectoryReader.cs
index 8f0f3b7..be168d6 100644
--- a/src/core/Index/ReadOnlyDirectoryReader.cs
+++ b/src/core/Index/ReadOnlyDirectoryReader.cs
@@ -21,25 +21,25 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	public class ReadOnlyDirectoryReader:DirectoryReader
-	{
-		internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor):base(directory, sis, deletionPolicy, true, termInfosIndexDivisor)
-		{
-		}
+    
+    public class ReadOnlyDirectoryReader:DirectoryReader
+    {
+        internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor):base(directory, sis, deletionPolicy, true, termInfosIndexDivisor)
+        {
+        }
 
         internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.Generic.IDictionary<string, byte[]> oldNormsCache, bool doClone, int termInfosIndexDivisor)
             : base(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor)
         {
         }
 
-	    internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor)
-		{
-		}
-		
-		protected internal override void  AcquireWriteLock()
-		{
-			ReadOnlySegmentReader.NoWrite();
-		}
-	}
+        internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor)
+        {
+        }
+        
+        protected internal override void  AcquireWriteLock()
+        {
+            ReadOnlySegmentReader.NoWrite();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ReadOnlySegmentReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ReadOnlySegmentReader.cs b/src/core/Index/ReadOnlySegmentReader.cs
index 3c7c916..bd204c8 100644
--- a/src/core/Index/ReadOnlySegmentReader.cs
+++ b/src/core/Index/ReadOnlySegmentReader.cs
@@ -19,24 +19,24 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	public class ReadOnlySegmentReader:SegmentReader
-	{
-		
-		internal static void  NoWrite()
-		{
-			throw new System.NotSupportedException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
-		}
-		
-		protected internal override void  AcquireWriteLock()
-		{
-			NoWrite();
-		}
-		
-		// Not synchronized
-		public override bool IsDeleted(int n)
-		{
-			return deletedDocs != null && deletedDocs.Get(n);
-		}
-	}
+    
+    public class ReadOnlySegmentReader:SegmentReader
+    {
+        
+        internal static void  NoWrite()
+        {
+            throw new System.NotSupportedException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
+        }
+        
+        protected internal override void  AcquireWriteLock()
+        {
+            NoWrite();
+        }
+        
+        // Not synchronized
+        public override bool IsDeleted(int n)
+        {
+            return deletedDocs != null && deletedDocs.Get(n);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ReusableStringReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ReusableStringReader.cs b/src/core/Index/ReusableStringReader.cs
index 54c1b7d..5a3c86e 100644
--- a/src/core/Index/ReusableStringReader.cs
+++ b/src/core/Index/ReusableStringReader.cs
@@ -20,11 +20,11 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Used by DocumentsWriter to implemented a StringReader
-	/// that can be reset to a new string; we use this when
-	/// tokenizing the string value from a Field. 
-	/// </summary>
+    
+    /// <summary>Used by DocumentsWriter to implemented a StringReader
+    /// that can be reset to a new string; we use this when
+    /// tokenizing the string value from a Field. 
+    /// </summary>
     sealed class ReusableStringReader : System.IO.TextReader
     {
         internal int upto;


[16/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocInverterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocInverterPerField.cs b/src/core/Index/DocInverterPerField.cs
index 8cd7c0a..0cdd9b6 100644
--- a/src/core/Index/DocInverterPerField.cs
+++ b/src/core/Index/DocInverterPerField.cs
@@ -22,214 +22,214 @@ using TokenStream = Lucene.Net.Analysis.TokenStream;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Holds state for inverting all occurrences of a single
-	/// field in the document.  This class doesn't do anything
-	/// itself; instead, it forwards the tokens produced by
-	/// analysis to its own consumer
-	/// (InvertedDocConsumerPerField).  It also interacts with an
-	/// endConsumer (InvertedDocEndConsumerPerField).
-	/// </summary>
-	
-	sealed class DocInverterPerField:DocFieldConsumerPerField
-	{
-		
-		private DocInverterPerThread perThread;
-		private FieldInfo fieldInfo;
-		internal InvertedDocConsumerPerField consumer;
-		internal InvertedDocEndConsumerPerField endConsumer;
-		internal DocumentsWriter.DocState docState;
-		internal FieldInvertState fieldState;
-		
-		public DocInverterPerField(DocInverterPerThread perThread, FieldInfo fieldInfo)
-		{
-			this.perThread = perThread;
-			this.fieldInfo = fieldInfo;
-			docState = perThread.docState;
-			fieldState = perThread.fieldState;
-			this.consumer = perThread.consumer.AddField(this, fieldInfo);
-			this.endConsumer = perThread.endConsumer.AddField(this, fieldInfo);
-		}
-		
-		public override void  Abort()
-		{
-			consumer.Abort();
-			endConsumer.Abort();
-		}
-		
-		public override void  ProcessFields(IFieldable[] fields, int count)
-		{
-			
-			fieldState.Reset(docState.doc.Boost);
-			
-			int maxFieldLength = docState.maxFieldLength;
-			
-			bool doInvert = consumer.Start(fields, count);
-			
-			for (int i = 0; i < count; i++)
-			{
-				
-				IFieldable field = fields[i];
-				
-				// TODO FI: this should be "genericized" to querying
-				// consumer if it wants to see this particular field
-				// tokenized.
-				if (field.IsIndexed && doInvert)
-				{
-					
-					bool anyToken;
-					
-					if (fieldState.length > 0)
-						fieldState.position += docState.analyzer.GetPositionIncrementGap(fieldInfo.name);
-					
-					if (!field.IsTokenized)
-					{
-						// un-tokenized field
-						System.String stringValue = field.StringValue;
-						int valueLength = stringValue.Length;
-						perThread.singleToken.Reinit(stringValue, 0, valueLength);
-						fieldState.attributeSource = perThread.singleToken;
-					    consumer.Start(field);
-						
-						bool success = false;
-						try
-						{
-							consumer.Add();
-							success = true;
-						}
-						finally
-						{
-							if (!success)
-								docState.docWriter.SetAborting();
-						}
-						fieldState.offset += valueLength;
-						fieldState.length++;
-						fieldState.position++;
-						anyToken = valueLength > 0;
-					}
-					else
-					{
-						// tokenized field
-						TokenStream stream;
-						TokenStream streamValue = field.TokenStreamValue;
-						
-						if (streamValue != null)
-							stream = streamValue;
-						else
-						{
-							// the field does not have a TokenStream,
-							// so we have to obtain one from the analyzer
-							System.IO.TextReader reader; // find or make Reader
-							System.IO.TextReader readerValue = field.ReaderValue;
-							
-							if (readerValue != null)
-								reader = readerValue;
-							else
-							{
-								System.String stringValue = field.StringValue;
-								if (stringValue == null)
-									throw new System.ArgumentException("field must have either TokenStream, String or Reader value");
-								perThread.stringReader.Init(stringValue);
-								reader = perThread.stringReader;
-							}
-							
-							// Tokenize field and add to postingTable
-							stream = docState.analyzer.ReusableTokenStream(fieldInfo.name, reader);
-						}
-						
-						// reset the TokenStream to the first token
-						stream.Reset();
-						
-						int startLength = fieldState.length;
-						
-						try
-						{
-							int offsetEnd = fieldState.offset - 1;
-							
-							bool hasMoreTokens = stream.IncrementToken();
-							
-							fieldState.attributeSource = stream;
+    
+    /// <summary> Holds state for inverting all occurrences of a single
+    /// field in the document.  This class doesn't do anything
+    /// itself; instead, it forwards the tokens produced by
+    /// analysis to its own consumer
+    /// (InvertedDocConsumerPerField).  It also interacts with an
+    /// endConsumer (InvertedDocEndConsumerPerField).
+    /// </summary>
+    
+    sealed class DocInverterPerField:DocFieldConsumerPerField
+    {
+        
+        private DocInverterPerThread perThread;
+        private FieldInfo fieldInfo;
+        internal InvertedDocConsumerPerField consumer;
+        internal InvertedDocEndConsumerPerField endConsumer;
+        internal DocumentsWriter.DocState docState;
+        internal FieldInvertState fieldState;
+        
+        public DocInverterPerField(DocInverterPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.perThread = perThread;
+            this.fieldInfo = fieldInfo;
+            docState = perThread.docState;
+            fieldState = perThread.fieldState;
+            this.consumer = perThread.consumer.AddField(this, fieldInfo);
+            this.endConsumer = perThread.endConsumer.AddField(this, fieldInfo);
+        }
+        
+        public override void  Abort()
+        {
+            consumer.Abort();
+            endConsumer.Abort();
+        }
+        
+        public override void  ProcessFields(IFieldable[] fields, int count)
+        {
+            
+            fieldState.Reset(docState.doc.Boost);
+            
+            int maxFieldLength = docState.maxFieldLength;
+            
+            bool doInvert = consumer.Start(fields, count);
+            
+            for (int i = 0; i < count; i++)
+            {
+                
+                IFieldable field = fields[i];
+                
+                // TODO FI: this should be "genericized" to querying
+                // consumer if it wants to see this particular field
+                // tokenized.
+                if (field.IsIndexed && doInvert)
+                {
+                    
+                    bool anyToken;
+                    
+                    if (fieldState.length > 0)
+                        fieldState.position += docState.analyzer.GetPositionIncrementGap(fieldInfo.name);
+                    
+                    if (!field.IsTokenized)
+                    {
+                        // un-tokenized field
+                        System.String stringValue = field.StringValue;
+                        int valueLength = stringValue.Length;
+                        perThread.singleToken.Reinit(stringValue, 0, valueLength);
+                        fieldState.attributeSource = perThread.singleToken;
+                        consumer.Start(field);
+                        
+                        bool success = false;
+                        try
+                        {
+                            consumer.Add();
+                            success = true;
+                        }
+                        finally
+                        {
+                            if (!success)
+                                docState.docWriter.SetAborting();
+                        }
+                        fieldState.offset += valueLength;
+                        fieldState.length++;
+                        fieldState.position++;
+                        anyToken = valueLength > 0;
+                    }
+                    else
+                    {
+                        // tokenized field
+                        TokenStream stream;
+                        TokenStream streamValue = field.TokenStreamValue;
+                        
+                        if (streamValue != null)
+                            stream = streamValue;
+                        else
+                        {
+                            // the field does not have a TokenStream,
+                            // so we have to obtain one from the analyzer
+                            System.IO.TextReader reader; // find or make Reader
+                            System.IO.TextReader readerValue = field.ReaderValue;
+                            
+                            if (readerValue != null)
+                                reader = readerValue;
+                            else
+                            {
+                                System.String stringValue = field.StringValue;
+                                if (stringValue == null)
+                                    throw new System.ArgumentException("field must have either TokenStream, String or Reader value");
+                                perThread.stringReader.Init(stringValue);
+                                reader = perThread.stringReader;
+                            }
+                            
+                            // Tokenize field and add to postingTable
+                            stream = docState.analyzer.ReusableTokenStream(fieldInfo.name, reader);
+                        }
+                        
+                        // reset the TokenStream to the first token
+                        stream.Reset();
+                        
+                        int startLength = fieldState.length;
+                        
+                        try
+                        {
+                            int offsetEnd = fieldState.offset - 1;
+                            
+                            bool hasMoreTokens = stream.IncrementToken();
+                            
+                            fieldState.attributeSource = stream;
 
                             IOffsetAttribute offsetAttribute = fieldState.attributeSource.AddAttribute<IOffsetAttribute>();
-							IPositionIncrementAttribute posIncrAttribute = fieldState.attributeSource.AddAttribute<IPositionIncrementAttribute>();
-							
-							consumer.Start(field);
-							
-							for (; ; )
-							{
-								
-								// If we hit an exception in stream.next below
-								// (which is fairly common, eg if analyzer
-								// chokes on a given document), then it's
-								// non-aborting and (above) this one document
-								// will be marked as deleted, but still
-								// consume a docID
-								
-								if (!hasMoreTokens)
-									break;
-								
-								int posIncr = posIncrAttribute.PositionIncrement;
-								fieldState.position += posIncr;
-								if (fieldState.position > 0)
-								{
-									fieldState.position--;
-								}
-								
-								if (posIncr == 0)
-									fieldState.numOverlap++;
-								
-								bool success = false;
-								try
-								{
-									// If we hit an exception in here, we abort
-									// all buffered documents since the last
-									// flush, on the likelihood that the
-									// internal state of the consumer is now
-									// corrupt and should not be flushed to a
-									// new segment:
-									consumer.Add();
-									success = true;
-								}
-								finally
-								{
-									if (!success)
-										docState.docWriter.SetAborting();
-								}
-								fieldState.position++;
-								offsetEnd = fieldState.offset + offsetAttribute.EndOffset;
-								if (++fieldState.length >= maxFieldLength)
-								{
-									if (docState.infoStream != null)
-										docState.infoStream.WriteLine("maxFieldLength " + maxFieldLength + " reached for field " + fieldInfo.name + ", ignoring following tokens");
-									break;
-								}
-								
-								hasMoreTokens = stream.IncrementToken();
-							}
-							// trigger streams to perform end-of-stream operations
-							stream.End();
-							
-							fieldState.offset += offsetAttribute.EndOffset;
-							anyToken = fieldState.length > startLength;
-						}
-						finally
-						{
-							stream.Close();
-						}
-					}
-					
-					if (anyToken)
-						fieldState.offset += docState.analyzer.GetOffsetGap(field);
-					fieldState.boost *= field.Boost;
-				}
+                            IPositionIncrementAttribute posIncrAttribute = fieldState.attributeSource.AddAttribute<IPositionIncrementAttribute>();
+                            
+                            consumer.Start(field);
+                            
+                            for (; ; )
+                            {
+                                
+                                // If we hit an exception in stream.next below
+                                // (which is fairly common, eg if analyzer
+                                // chokes on a given document), then it's
+                                // non-aborting and (above) this one document
+                                // will be marked as deleted, but still
+                                // consume a docID
+                                
+                                if (!hasMoreTokens)
+                                    break;
+                                
+                                int posIncr = posIncrAttribute.PositionIncrement;
+                                fieldState.position += posIncr;
+                                if (fieldState.position > 0)
+                                {
+                                    fieldState.position--;
+                                }
+                                
+                                if (posIncr == 0)
+                                    fieldState.numOverlap++;
+                                
+                                bool success = false;
+                                try
+                                {
+                                    // If we hit an exception in here, we abort
+                                    // all buffered documents since the last
+                                    // flush, on the likelihood that the
+                                    // internal state of the consumer is now
+                                    // corrupt and should not be flushed to a
+                                    // new segment:
+                                    consumer.Add();
+                                    success = true;
+                                }
+                                finally
+                                {
+                                    if (!success)
+                                        docState.docWriter.SetAborting();
+                                }
+                                fieldState.position++;
+                                offsetEnd = fieldState.offset + offsetAttribute.EndOffset;
+                                if (++fieldState.length >= maxFieldLength)
+                                {
+                                    if (docState.infoStream != null)
+                                        docState.infoStream.WriteLine("maxFieldLength " + maxFieldLength + " reached for field " + fieldInfo.name + ", ignoring following tokens");
+                                    break;
+                                }
+                                
+                                hasMoreTokens = stream.IncrementToken();
+                            }
+                            // trigger streams to perform end-of-stream operations
+                            stream.End();
+                            
+                            fieldState.offset += offsetAttribute.EndOffset;
+                            anyToken = fieldState.length > startLength;
+                        }
+                        finally
+                        {
+                            stream.Close();
+                        }
+                    }
+                    
+                    if (anyToken)
+                        fieldState.offset += docState.analyzer.GetOffsetGap(field);
+                    fieldState.boost *= field.Boost;
+                }
                 
                 // LUCENE-2387: don't hang onto the field, so GC can
                 // reclaim
                 fields[i] = null;
-			}
-			
-			consumer.Finish();
-			endConsumer.Finish();
-		}
-	}
+            }
+            
+            consumer.Finish();
+            endConsumer.Finish();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocInverterPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocInverterPerThread.cs b/src/core/Index/DocInverterPerThread.cs
index c38ed35..afa6d14 100644
--- a/src/core/Index/DocInverterPerThread.cs
+++ b/src/core/Index/DocInverterPerThread.cs
@@ -22,86 +22,86 @@ using TokenStream = Lucene.Net.Analysis.TokenStream;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is a DocFieldConsumer that inverts each field,
-	/// separately, from a Document, and accepts a
-	/// InvertedTermsConsumer to process those terms. 
-	/// </summary>
-	
-	sealed class DocInverterPerThread : DocFieldConsumerPerThread
-	{
-		private void  InitBlock()
-		{
-			singleToken = new SingleTokenAttributeSource();
-		}
-		internal DocInverter docInverter;
-		internal InvertedDocConsumerPerThread consumer;
-		internal InvertedDocEndConsumerPerThread endConsumer;
-		internal SingleTokenAttributeSource singleToken;
-		
-		internal class SingleTokenAttributeSource : AttributeSource
-		{
-			internal ITermAttribute termAttribute;
-			internal IOffsetAttribute offsetAttribute;
+    
+    /// <summary>This is a DocFieldConsumer that inverts each field,
+    /// separately, from a Document, and accepts a
+    /// InvertedTermsConsumer to process those terms. 
+    /// </summary>
+    
+    sealed class DocInverterPerThread : DocFieldConsumerPerThread
+    {
+        private void  InitBlock()
+        {
+            singleToken = new SingleTokenAttributeSource();
+        }
+        internal DocInverter docInverter;
+        internal InvertedDocConsumerPerThread consumer;
+        internal InvertedDocEndConsumerPerThread endConsumer;
+        internal SingleTokenAttributeSource singleToken;
+        
+        internal class SingleTokenAttributeSource : AttributeSource
+        {
+            internal ITermAttribute termAttribute;
+            internal IOffsetAttribute offsetAttribute;
 
             internal SingleTokenAttributeSource()
-			{
+            {
                 termAttribute = AddAttribute<ITermAttribute>();
-				offsetAttribute = AddAttribute<IOffsetAttribute>();
-			}
-			
-			public void  Reinit(System.String stringValue, int startOffset, int endOffset)
-			{
-				termAttribute.SetTermBuffer(stringValue);
-				offsetAttribute.SetOffset(startOffset, endOffset);
-			}
-		}
-		
-		internal DocumentsWriter.DocState docState;
-		
-		internal FieldInvertState fieldState = new FieldInvertState();
-		
-		// Used to read a string value for a field
-		internal ReusableStringReader stringReader = new ReusableStringReader();
-		
-		public DocInverterPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocInverter docInverter)
-		{
-			InitBlock();
-			this.docInverter = docInverter;
-			docState = docFieldProcessorPerThread.docState;
-			consumer = docInverter.consumer.AddThread(this);
-			endConsumer = docInverter.endConsumer.AddThread(this);
-		}
-		
-		public override void  StartDocument()
-		{
-			consumer.StartDocument();
-			endConsumer.StartDocument();
-		}
-		
-		public override DocumentsWriter.DocWriter FinishDocument()
-		{
-			// TODO: allow endConsumer.finishDocument to also return
-			// a DocWriter
-			endConsumer.FinishDocument();
-			return consumer.FinishDocument();
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				consumer.Abort();
-			}
-			finally
-			{
-				endConsumer.Abort();
-			}
-		}
-		
-		public override DocFieldConsumerPerField AddField(FieldInfo fi)
-		{
-			return new DocInverterPerField(this, fi);
-		}
-	}
+                offsetAttribute = AddAttribute<IOffsetAttribute>();
+            }
+            
+            public void  Reinit(System.String stringValue, int startOffset, int endOffset)
+            {
+                termAttribute.SetTermBuffer(stringValue);
+                offsetAttribute.SetOffset(startOffset, endOffset);
+            }
+        }
+        
+        internal DocumentsWriter.DocState docState;
+        
+        internal FieldInvertState fieldState = new FieldInvertState();
+        
+        // Used to read a string value for a field
+        internal ReusableStringReader stringReader = new ReusableStringReader();
+        
+        public DocInverterPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocInverter docInverter)
+        {
+            InitBlock();
+            this.docInverter = docInverter;
+            docState = docFieldProcessorPerThread.docState;
+            consumer = docInverter.consumer.AddThread(this);
+            endConsumer = docInverter.endConsumer.AddThread(this);
+        }
+        
+        public override void  StartDocument()
+        {
+            consumer.StartDocument();
+            endConsumer.StartDocument();
+        }
+        
+        public override DocumentsWriter.DocWriter FinishDocument()
+        {
+            // TODO: allow endConsumer.finishDocument to also return
+            // a DocWriter
+            endConsumer.FinishDocument();
+            return consumer.FinishDocument();
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                consumer.Abort();
+            }
+            finally
+            {
+                endConsumer.Abort();
+            }
+        }
+        
+        public override DocFieldConsumerPerField AddField(FieldInfo fi)
+        {
+            return new DocInverterPerField(this, fi);
+        }
+    }
 }
\ No newline at end of file


[39/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs
index 6d667d8..c0a8c82 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/KpStemmer.cs
@@ -24,2595 +24,2595 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
 
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class KpStemmer : SnowballProgram
-	{
-		public KpStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("nde", - 1, 7, "", this), new Among("en", - 1, 6, "", this), new Among("s", - 1, 2, "", this), new Among("'s", 2, 1, "", this), new Among("es", 2, 4, "", this), new Among("ies", 4, 3, "", this), new Among("aus", 2, 5, "", this)};
-			a_1 = new Among[]{new Among("de", - 1, 5, "", this), new Among("ge", - 1, 2, "", this), new Among("ische", - 1, 4, "", this), new Among("je", - 1, 1, "", this), new Among("lijke", - 1, 3, "", this), new Among("le", - 1, 9, "", this), new Among("ene", - 1, 10, "", this), new Among("re", - 1, 8, "", this), new Among("se", - 1, 7, "", this), new Among("te", - 1, 6, "", this), new Among("ieve", - 1, 11, "", this)};
-			a_2 = new Among[]{new Among("heid", - 1, 3, "", this), new Among("fie", - 1, 7, "", this), new Among("gie", - 1, 8, "", this), new Among("atie", - 1, 1, "", this), new Among("isme", - 1, 5, "", this), new Among("ing", - 1, 5, "", this), new Among("arij", - 1, 6, "", this), new Among("erij", - 1, 5, "", this), new Among("sel", - 1, 3, "", this), new Among("rder", - 1, 4, "", this), new Among("ster", - 1, 3, "", this), new Among("iteit", - 1, 2, "", this), new Among("dst", - 1, 10, "", this), new Among("tst", - 1, 9, "", this)};
-			a_3 = new Among[]{new Among("end", - 1, 10, "", this), new Among("atief", - 1, 2, "", this), new Among("erig", - 1, 10, "", this), new Among("achtig", - 1, 9, "", this), new Among("ioneel", - 1, 1, "", this), new Among("baar", - 1, 3, "", this), new Among("laar", - 1, 5, "", this), new Among("naar", - 1, 4, "", this), new Among("raar", - 1, 6, "", this), new Among("eriger", - 1, 10, "", this), new Among("achtiger", - 1, 9, "", this), new Among("lijker", - 1, 8, "", this), new Among("tant", - 1, 7, "", this), new Among("erigst", - 1, 10, "", this), new Among("achtigst", - 1, 9, "", this), new Among("lijkst", - 1, 8, "", this)};
-			a_4 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("iger", - 1, 1, "", this), new Among("igst", - 1, 1, "", this)};
-			a_5 = new Among[]{new Among("ft", - 1, 2, "", this), new Among("kt", - 1, 1, "", this), new Among("pt", - 1, 3, "", this)};
-			a_6 = new Among[]{new Among("bb", - 1, 1, "", this), new Among("cc", - 1, 2, "", this), new Among("dd", - 1, 3, "", this), new Among("ff", - 1, 4, "", this), new Among("gg", - 1, 5, "", this), new Among("hh", - 1, 6, "", this), new Among("jj", - 1, 7, "", this), new Among("kk", - 1, 8, "", this), new Among("ll", - 1, 9, "", this), new Among("mm", - 1, 10, "", this), new Among("nn", - 1, 11, "", this), new Among("pp", - 1, 12, "", this), new Among("qq", - 1, 13, "", this), new Among("rr", - 1, 14, "", this), new Among("ss", - 1, 15, "", this), new Among("tt", - 1, 16, "", this), new Among("v", - 1, 21, "", this), new Among("vv", 16, 17, "", this), new Among("ww", - 1, 18, "", this), new Among("xx", - 1, 19, "", this), new Among("z", - 1, 22, "", this), new Among("zz", 20, 20, "", this)};
-			a_7 = new Among[]{new Among("d", - 1, 1, "", this), new Among("t", - 1, 2, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1)};
-		private static readonly char[] g_v_WX = new char[]{(char) (17), (char) (65), (char) (208), (char) (1)};
-		private static readonly char[] g_AOU = new char[]{(char) (1), (char) (64), (char) (16)};
-		private static readonly char[] g_AIOU = new char[]{(char) (1), (char) (65), (char) (16)};
-		
-		private bool B_GE_removed;
-		private bool B_stemmed;
-		private bool B_Y_found;
-		private int I_p2;
-		private int I_p1;
-		private int I_x;
-		private System.Text.StringBuilder S_ch = new System.Text.StringBuilder();
-		
-		protected internal virtual void  copy_from(KpStemmer other)
-		{
-			B_GE_removed = other.B_GE_removed;
-			B_stemmed = other.B_stemmed;
-			B_Y_found = other.B_Y_found;
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			I_x = other.I_x;
-			S_ch = other.S_ch;
-			base.copy_from(other);
-		}
-		
-		private bool r_R1()
-		{
-			// (, line 32
-			// setmark x, line 32
-			I_x = cursor;
-			if (!(I_x >= I_p1))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			// (, line 33
-			// setmark x, line 33
-			I_x = cursor;
-			if (!(I_x >= I_p2))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_V()
-		{
-			int v_1;
-			int v_2;
-			// test, line 35
-			v_1 = limit - cursor;
-			// (, line 35
-			// or, line 35
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					if (!(in_grouping_b(g_v, 97, 121)))
-					{
-						goto lab1_brk;
-					}
-					goto lab0_brk;
-				}
-				while (false);
+    public class KpStemmer : SnowballProgram
+    {
+        public KpStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("nde", - 1, 7, "", this), new Among("en", - 1, 6, "", this), new Among("s", - 1, 2, "", this), new Among("'s", 2, 1, "", this), new Among("es", 2, 4, "", this), new Among("ies", 4, 3, "", this), new Among("aus", 2, 5, "", this)};
+            a_1 = new Among[]{new Among("de", - 1, 5, "", this), new Among("ge", - 1, 2, "", this), new Among("ische", - 1, 4, "", this), new Among("je", - 1, 1, "", this), new Among("lijke", - 1, 3, "", this), new Among("le", - 1, 9, "", this), new Among("ene", - 1, 10, "", this), new Among("re", - 1, 8, "", this), new Among("se", - 1, 7, "", this), new Among("te", - 1, 6, "", this), new Among("ieve", - 1, 11, "", this)};
+            a_2 = new Among[]{new Among("heid", - 1, 3, "", this), new Among("fie", - 1, 7, "", this), new Among("gie", - 1, 8, "", this), new Among("atie", - 1, 1, "", this), new Among("isme", - 1, 5, "", this), new Among("ing", - 1, 5, "", this), new Among("arij", - 1, 6, "", this), new Among("erij", - 1, 5, "", this), new Among("sel", - 1, 3, "", this), new Among("rder", - 1, 4, "", this), new Among("ster", - 1, 3, "", this), new Among("iteit", - 1, 2, "", this), new Among("dst", - 1, 10, "", this), new Among("tst", - 1, 9, "", this)};
+            a_3 = new Among[]{new Among("end", - 1, 10, "", this), new Among("atief", - 1, 2, "", this), new Among("erig", - 1, 10, "", this), new Among("achtig", - 1, 9, "", this), new Among("ioneel", - 1, 1, "", this), new Among("baar", - 1, 3, "", this), new Among("laar", - 1, 5, "", this), new Among("naar", - 1, 4, "", this), new Among("raar", - 1, 6, "", this), new Among("eriger", - 1, 10, "", this), new Among("achtiger", - 1, 9, "", this), new Among("lijker", - 1, 8, "", this), new Among("tant", - 1, 7, "", this), new Among("erigst", - 1, 10, "", this), new Among("achtigst", - 1, 9, "", this), new Among("lijkst", - 1, 8, "", this)};
+            a_4 = new Among[]{new Among("ig", - 1, 1, "", this), new Among("iger", - 1, 1, "", this), new Among("igst", - 1, 1, "", this)};
+            a_5 = new Among[]{new Among("ft", - 1, 2, "", this), new Among("kt", - 1, 1, "", this), new Among("pt", - 1, 3, "", this)};
+            a_6 = new Among[]{new Among("bb", - 1, 1, "", this), new Among("cc", - 1, 2, "", this), new Among("dd", - 1, 3, "", this), new Among("ff", - 1, 4, "", this), new Among("gg", - 1, 5, "", this), new Among("hh", - 1, 6, "", this), new Among("jj", - 1, 7, "", this), new Among("kk", - 1, 8, "", this), new Among("ll", - 1, 9, "", this), new Among("mm", - 1, 10, "", this), new Among("nn", - 1, 11, "", this), new Among("pp", - 1, 12, "", this), new Among("qq", - 1, 13, "", this), new Among("rr", - 1, 14, "", this), new Among("ss", - 1, 15, "", this), new Among("tt", - 1, 16, "", this), new Among("v", - 1, 21, "", this), new Among("vv", 16, 17, "", this), new Among("ww", - 1, 18, "", this), new Among("xx", - 1, 19, "", this), new Among("z", - 1, 22, "", this), new Among("zz", 20, 20, "", this)};
+            a_7 = new Among[]{new Among("d", - 1, 1, "", this), new Among("t", - 1, 2, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (1)};
+        private static readonly char[] g_v_WX = new char[]{(char) (17), (char) (65), (char) (208), (char) (1)};
+        private static readonly char[] g_AOU = new char[]{(char) (1), (char) (64), (char) (16)};
+        private static readonly char[] g_AIOU = new char[]{(char) (1), (char) (65), (char) (16)};
+        
+        private bool B_GE_removed;
+        private bool B_stemmed;
+        private bool B_Y_found;
+        private int I_p2;
+        private int I_p1;
+        private int I_x;
+        private System.Text.StringBuilder S_ch = new System.Text.StringBuilder();
+        
+        protected internal virtual void  copy_from(KpStemmer other)
+        {
+            B_GE_removed = other.B_GE_removed;
+            B_stemmed = other.B_stemmed;
+            B_Y_found = other.B_Y_found;
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            I_x = other.I_x;
+            S_ch = other.S_ch;
+            base.copy_from(other);
+        }
+        
+        private bool r_R1()
+        {
+            // (, line 32
+            // setmark x, line 32
+            I_x = cursor;
+            if (!(I_x >= I_p1))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            // (, line 33
+            // setmark x, line 33
+            I_x = cursor;
+            if (!(I_x >= I_p2))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_V()
+        {
+            int v_1;
+            int v_2;
+            // test, line 35
+            v_1 = limit - cursor;
+            // (, line 35
+            // or, line 35
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    if (!(in_grouping_b(g_v, 97, 121)))
+                    {
+                        goto lab1_brk;
+                    }
+                    goto lab0_brk;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 35
-				if (!(eq_s_b(2, "ij")))
-				{
-					return false;
-				}
-			}
-			while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 35
+                if (!(eq_s_b(2, "ij")))
+                {
+                    return false;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = limit - v_1;
-			return true;
-		}
-		
-		private bool r_VX()
-		{
-			int v_1;
-			int v_2;
-			// test, line 36
-			v_1 = limit - cursor;
-			// (, line 36
-			// next, line 36
-			if (cursor <= limit_backward)
-			{
-				return false;
-			}
-			cursor--;
-			// or, line 36
+            
+            cursor = limit - v_1;
+            return true;
+        }
+        
+        private bool r_VX()
+        {
+            int v_1;
+            int v_2;
+            // test, line 36
+            v_1 = limit - cursor;
+            // (, line 36
+            // next, line 36
+            if (cursor <= limit_backward)
+            {
+                return false;
+            }
+            cursor--;
+            // or, line 36
 lab2: 
-			do 
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					if (!(in_grouping_b(g_v, 97, 121)))
-					{
-						goto lab2_brk;
-					}
-					goto lab2_brk;
-				}
-				while (false);
+            do 
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    if (!(in_grouping_b(g_v, 97, 121)))
+                    {
+                        goto lab2_brk;
+                    }
+                    goto lab2_brk;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-				// literal, line 36
-				if (!(eq_s_b(2, "ij")))
-				{
-					return false;
-				}
-			}
-			while (false);
-			cursor = limit - v_1;
-			return true;
-		}
-		
-		private bool r_C()
-		{
-			int v_1;
-			int v_2;
-			// test, line 37
-			v_1 = limit - cursor;
-			// (, line 37
-			// not, line 37
-			{
-				v_2 = limit - cursor;
-				do 
-				{
-					// literal, line 37
-					if (!(eq_s_b(2, "ij")))
-					{
-						goto lab2_brk;
-					}
-					return false;
-				}
-				while (false);
+                
+                cursor = limit - v_2;
+                // literal, line 36
+                if (!(eq_s_b(2, "ij")))
+                {
+                    return false;
+                }
+            }
+            while (false);
+            cursor = limit - v_1;
+            return true;
+        }
+        
+        private bool r_C()
+        {
+            int v_1;
+            int v_2;
+            // test, line 37
+            v_1 = limit - cursor;
+            // (, line 37
+            // not, line 37
+            {
+                v_2 = limit - cursor;
+                do 
+                {
+                    // literal, line 37
+                    if (!(eq_s_b(2, "ij")))
+                    {
+                        goto lab2_brk;
+                    }
+                    return false;
+                }
+                while (false);
 
 lab2_brk: ;
-				
-				cursor = limit - v_2;
-			}
-			if (!(out_grouping_b(g_v, 97, 121)))
-			{
-				return false;
-			}
-			cursor = limit - v_1;
-			return true;
-		}
-		
-		private bool r_lengthen_V()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			int v_8;
-			// do, line 39
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 39
-				if (!(out_grouping_b(g_v_WX, 97, 121)))
-				{
-					goto lab0_brk;
-				}
-				// [, line 40
-				ket = cursor;
-				// or, line 40
-				do 
-				{
-					v_2 = limit - cursor;
-					do 
-					{
-						// (, line 40
-						if (!(in_grouping_b(g_AOU, 97, 117)))
-						{
-							goto lab2_brk;
-						}
-						// ], line 40
-						bra = cursor;
-						// test, line 40
-						v_3 = limit - cursor;
-						// (, line 40
-						// or, line 40
-						do 
-						{
-							v_4 = limit - cursor;
-							do 
-							{
-								if (!(out_grouping_b(g_v, 97, 121)))
-								{
-									goto lab4_brk;
-								}
-								goto lab3_brk;
-							}
-							while (false);
+                
+                cursor = limit - v_2;
+            }
+            if (!(out_grouping_b(g_v, 97, 121)))
+            {
+                return false;
+            }
+            cursor = limit - v_1;
+            return true;
+        }
+        
+        private bool r_lengthen_V()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            int v_8;
+            // do, line 39
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 39
+                if (!(out_grouping_b(g_v_WX, 97, 121)))
+                {
+                    goto lab0_brk;
+                }
+                // [, line 40
+                ket = cursor;
+                // or, line 40
+                do 
+                {
+                    v_2 = limit - cursor;
+                    do 
+                    {
+                        // (, line 40
+                        if (!(in_grouping_b(g_AOU, 97, 117)))
+                        {
+                            goto lab2_brk;
+                        }
+                        // ], line 40
+                        bra = cursor;
+                        // test, line 40
+                        v_3 = limit - cursor;
+                        // (, line 40
+                        // or, line 40
+                        do 
+                        {
+                            v_4 = limit - cursor;
+                            do 
+                            {
+                                if (!(out_grouping_b(g_v, 97, 121)))
+                                {
+                                    goto lab4_brk;
+                                }
+                                goto lab3_brk;
+                            }
+                            while (false);
 
 lab4_brk: ;
-							
-							cursor = limit - v_4;
-							// atlimit, line 40
-							if (cursor > limit_backward)
-							{
-								goto lab2_brk;
-							}
-						}
-						while (false);
+                            
+                            cursor = limit - v_4;
+                            // atlimit, line 40
+                            if (cursor > limit_backward)
+                            {
+                                goto lab2_brk;
+                            }
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = limit - v_3;
-						goto lab1_brk;
-					}
-					while (false);
+                        
+                        cursor = limit - v_3;
+                        goto lab1_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = limit - v_2;
-					// (, line 41
-					// literal, line 41
-					if (!(eq_s_b(1, "e")))
-					{
-						goto lab0_brk;
-					}
-					// ], line 41
-					bra = cursor;
-					// test, line 41
-					v_5 = limit - cursor;
-					// (, line 41
-					// or, line 41
-					do 
-					{
-						v_6 = limit - cursor;
-						do 
-						{
-							if (!(out_grouping_b(g_v, 97, 121)))
-							{
-								goto lab6_brk;
-							}
-							goto lab5_brk;
-						}
-						while (false);
+                    
+                    cursor = limit - v_2;
+                    // (, line 41
+                    // literal, line 41
+                    if (!(eq_s_b(1, "e")))
+                    {
+                        goto lab0_brk;
+                    }
+                    // ], line 41
+                    bra = cursor;
+                    // test, line 41
+                    v_5 = limit - cursor;
+                    // (, line 41
+                    // or, line 41
+                    do 
+                    {
+                        v_6 = limit - cursor;
+                        do 
+                        {
+                            if (!(out_grouping_b(g_v, 97, 121)))
+                            {
+                                goto lab6_brk;
+                            }
+                            goto lab5_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						cursor = limit - v_6;
-						// atlimit, line 41
-						if (cursor > limit_backward)
-						{
-							goto lab0_brk;
-						}
-					}
-					while (false);
+                        
+                        cursor = limit - v_6;
+                        // atlimit, line 41
+                        if (cursor > limit_backward)
+                        {
+                            goto lab0_brk;
+                        }
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-					// not, line 42
-					{
-						v_7 = limit - cursor;
-						do 
-						{
-							if (!(in_grouping_b(g_AIOU, 97, 117)))
-							{
-								goto lab7_brk;
-							}
-							goto lab0_brk;
-						}
-						while (false);
+                    
+                    // not, line 42
+                    {
+                        v_7 = limit - cursor;
+                        do 
+                        {
+                            if (!(in_grouping_b(g_AIOU, 97, 117)))
+                            {
+                                goto lab7_brk;
+                            }
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab7_brk: ;
-						
-						cursor = limit - v_7;
-					}
-					// not, line 43
-					{
-						v_8 = limit - cursor;
-						do 
-						{
-							// (, line 43
-							// next, line 43
-							if (cursor <= limit_backward)
-							{
-								goto lab8_brk;
-							}
-							cursor--;
-							if (!(in_grouping_b(g_AIOU, 97, 117)))
-							{
-								goto lab8_brk;
-							}
-							if (!(out_grouping_b(g_v, 97, 121)))
-							{
-								goto lab8_brk;
-							}
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_7;
+                    }
+                    // not, line 43
+                    {
+                        v_8 = limit - cursor;
+                        do 
+                        {
+                            // (, line 43
+                            // next, line 43
+                            if (cursor <= limit_backward)
+                            {
+                                goto lab8_brk;
+                            }
+                            cursor--;
+                            if (!(in_grouping_b(g_AIOU, 97, 117)))
+                            {
+                                goto lab8_brk;
+                            }
+                            if (!(out_grouping_b(g_v, 97, 121)))
+                            {
+                                goto lab8_brk;
+                            }
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab8_brk: ;
-						
-						cursor = limit - v_8;
-					}
-					cursor = limit - v_5;
-				}
-				while (false);
+                        
+                        cursor = limit - v_8;
+                    }
+                    cursor = limit - v_5;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				// -> ch, line 44
-				S_ch = slice_to(S_ch);
-				// <+ ch, line 44
-				{
-					int c = cursor;
-					insert(cursor, cursor, S_ch);
-					cursor = c;
-				}
-			}
-			while (false);
+                
+                // -> ch, line 44
+                S_ch = slice_to(S_ch);
+                // <+ ch, line 44
+                {
+                    int c = cursor;
+                    insert(cursor, cursor, S_ch);
+                    cursor = c;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = limit - v_1;
-			return true;
-		}
-		
-		private bool r_Step_1()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 48
-			// [, line 49
-			ket = cursor;
-			// among, line 49
-			among_var = find_among_b(a_0, 7);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 49
-			// ], line 49
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 51
-					// delete, line 51
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 52
-					// call R1, line 52
-					if (!r_R1())
-					{
-						return false;
-					}
-					// not, line 52
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// (, line 52
-							// literal, line 52
-							if (!(eq_s_b(1, "t")))
-							{
-								goto lab0_brk;
-							}
-							// call R1, line 52
-							if (!r_R1())
-							{
-								goto lab0_brk;
-							}
-							return false;
-						}
-						while (false);
+            
+            cursor = limit - v_1;
+            return true;
+        }
+        
+        private bool r_Step_1()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 48
+            // [, line 49
+            ket = cursor;
+            // among, line 49
+            among_var = find_among_b(a_0, 7);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 49
+            // ], line 49
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 51
+                    // delete, line 51
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 52
+                    // call R1, line 52
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // not, line 52
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // (, line 52
+                            // literal, line 52
+                            if (!(eq_s_b(1, "t")))
+                            {
+                                goto lab0_brk;
+                            }
+                            // call R1, line 52
+                            if (!r_R1())
+                            {
+                                goto lab0_brk;
+                            }
+                            return false;
+                        }
+                        while (false);
 
 lab0_brk: ;
-						
-						cursor = limit - v_1;
-					}
-					// call C, line 52
-					if (!r_C())
-					{
-						return false;
-					}
-					// delete, line 52
-					slice_del();
-					break;
-				
-				case 3: 
-					// (, line 53
-					// call R1, line 53
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 53
-					slice_from("ie");
-					break;
-				
-				case 4: 
-					// (, line 55
-					// or, line 55
-					do 
-					{
-						v_2 = limit - cursor;
-						do 
-						{
-							// (, line 55
-							// literal, line 55
-							if (!(eq_s_b(2, "ar")))
-							{
-								goto lab2_brk;
-							}
-							// call R1, line 55
-							if (!r_R1())
-							{
-								goto lab2_brk;
-							}
-							// call C, line 55
-							if (!r_C())
-							{
-								goto lab2_brk;
-							}
-							// ], line 55
-							bra = cursor;
-							// delete, line 55
-							slice_del();
-							// call lengthen_V, line 55
-							if (!r_lengthen_V())
-							{
-								goto lab2_brk;
-							}
-							goto lab1_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                    }
+                    // call C, line 52
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // delete, line 52
+                    slice_del();
+                    break;
+                
+                case 3: 
+                    // (, line 53
+                    // call R1, line 53
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 53
+                    slice_from("ie");
+                    break;
+                
+                case 4: 
+                    // (, line 55
+                    // or, line 55
+                    do 
+                    {
+                        v_2 = limit - cursor;
+                        do 
+                        {
+                            // (, line 55
+                            // literal, line 55
+                            if (!(eq_s_b(2, "ar")))
+                            {
+                                goto lab2_brk;
+                            }
+                            // call R1, line 55
+                            if (!r_R1())
+                            {
+                                goto lab2_brk;
+                            }
+                            // call C, line 55
+                            if (!r_C())
+                            {
+                                goto lab2_brk;
+                            }
+                            // ], line 55
+                            bra = cursor;
+                            // delete, line 55
+                            slice_del();
+                            // call lengthen_V, line 55
+                            if (!r_lengthen_V())
+                            {
+                                goto lab2_brk;
+                            }
+                            goto lab1_brk;
+                        }
+                        while (false);
 
 lab2_brk: ;
-						
-						cursor = limit - v_2;
-						do 
-						{
-							// (, line 56
-							// literal, line 56
-							if (!(eq_s_b(2, "er")))
-							{
-								goto lab3_brk;
-							}
-							// call R1, line 56
-							if (!r_R1())
-							{
-								goto lab3_brk;
-							}
-							// call C, line 56
-							if (!r_C())
-							{
-								goto lab3_brk;
-							}
-							// ], line 56
-							bra = cursor;
-							// delete, line 56
-							slice_del();
-							goto lab1_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_2;
+                        do 
+                        {
+                            // (, line 56
+                            // literal, line 56
+                            if (!(eq_s_b(2, "er")))
+                            {
+                                goto lab3_brk;
+                            }
+                            // call R1, line 56
+                            if (!r_R1())
+                            {
+                                goto lab3_brk;
+                            }
+                            // call C, line 56
+                            if (!r_C())
+                            {
+                                goto lab3_brk;
+                            }
+                            // ], line 56
+                            bra = cursor;
+                            // delete, line 56
+                            slice_del();
+                            goto lab1_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = limit - v_2;
-						// (, line 57
-						// call R1, line 57
-						if (!r_R1())
-						{
-							return false;
-						}
-						// call C, line 57
-						if (!r_C())
-						{
-							return false;
-						}
-						// <-, line 57
-						slice_from("e");
-					}
-					while (false);
+                        
+                        cursor = limit - v_2;
+                        // (, line 57
+                        // call R1, line 57
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // call C, line 57
+                        if (!r_C())
+                        {
+                            return false;
+                        }
+                        // <-, line 57
+                        slice_from("e");
+                    }
+                    while (false);
 
 lab1_brk: ;
 
-					break;
-				
-				case 5: 
-					// (, line 59
-					// call R1, line 59
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call V, line 59
-					if (!r_V())
-					{
-						return false;
-					}
-					// <-, line 59
-					slice_from("au");
-					break;
-				
-				case 6: 
-					// (, line 60
-					// or, line 60
-					do 
-					{
-						v_3 = limit - cursor;
-						do 
-						{
-							// (, line 60
-							// literal, line 60
-							if (!(eq_s_b(3, "hed")))
-							{
-								goto lab5_brk;
-							}
-							// call R1, line 60
-							if (!r_R1())
-							{
-								goto lab5_brk;
-							}
-							// ], line 60
-							bra = cursor;
-							// <-, line 60
-							slice_from("heid");
-							goto lab4_brk;
-						}
-						while (false);
+                    break;
+                
+                case 5: 
+                    // (, line 59
+                    // call R1, line 59
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call V, line 59
+                    if (!r_V())
+                    {
+                        return false;
+                    }
+                    // <-, line 59
+                    slice_from("au");
+                    break;
+                
+                case 6: 
+                    // (, line 60
+                    // or, line 60
+                    do 
+                    {
+                        v_3 = limit - cursor;
+                        do 
+                        {
+                            // (, line 60
+                            // literal, line 60
+                            if (!(eq_s_b(3, "hed")))
+                            {
+                                goto lab5_brk;
+                            }
+                            // call R1, line 60
+                            if (!r_R1())
+                            {
+                                goto lab5_brk;
+                            }
+                            // ], line 60
+                            bra = cursor;
+                            // <-, line 60
+                            slice_from("heid");
+                            goto lab4_brk;
+                        }
+                        while (false);
 
 lab5_brk: ;
-						
-						cursor = limit - v_3;
-						do 
-						{
-							// (, line 61
-							// literal, line 61
-							if (!(eq_s_b(2, "nd")))
-							{
-								goto lab6_brk;
-							}
-							// delete, line 61
-							slice_del();
-							goto lab4_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_3;
+                        do 
+                        {
+                            // (, line 61
+                            // literal, line 61
+                            if (!(eq_s_b(2, "nd")))
+                            {
+                                goto lab6_brk;
+                            }
+                            // delete, line 61
+                            slice_del();
+                            goto lab4_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						cursor = limit - v_3;
-						do 
-						{
-							// (, line 62
-							// literal, line 62
-							if (!(eq_s_b(1, "d")))
-							{
-								goto lab7_brk;
-							}
-							// call R1, line 62
-							if (!r_R1())
-							{
-								goto lab7_brk;
-							}
-							// call C, line 62
-							if (!r_C())
-							{
-								goto lab7_brk;
-							}
-							// ], line 62
-							bra = cursor;
-							// delete, line 62
-							slice_del();
-							goto lab4_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_3;
+                        do 
+                        {
+                            // (, line 62
+                            // literal, line 62
+                            if (!(eq_s_b(1, "d")))
+                            {
+                                goto lab7_brk;
+                            }
+                            // call R1, line 62
+                            if (!r_R1())
+                            {
+                                goto lab7_brk;
+                            }
+                            // call C, line 62
+                            if (!r_C())
+                            {
+                                goto lab7_brk;
+                            }
+                            // ], line 62
+                            bra = cursor;
+                            // delete, line 62
+                            slice_del();
+                            goto lab4_brk;
+                        }
+                        while (false);
 
 lab7_brk: ;
-						
-						cursor = limit - v_3;
-						do 
-						{
-							// (, line 63
-							// or, line 63
-							do 
-							{
-								v_4 = limit - cursor;
-								do 
-								{
-									// literal, line 63
-									if (!(eq_s_b(1, "i")))
-									{
-										goto lab10_brk;
-									}
-									goto lab9_brk;
-								}
-								while (false);
+                        
+                        cursor = limit - v_3;
+                        do 
+                        {
+                            // (, line 63
+                            // or, line 63
+                            do 
+                            {
+                                v_4 = limit - cursor;
+                                do 
+                                {
+                                    // literal, line 63
+                                    if (!(eq_s_b(1, "i")))
+                                    {
+                                        goto lab10_brk;
+                                    }
+                                    goto lab9_brk;
+                                }
+                                while (false);
 
 lab10_brk: ;
-								
-								cursor = limit - v_4;
-								// literal, line 63
-								if (!(eq_s_b(1, "j")))
-								{
-									goto lab8_brk;
-								}
-							}
-							while (false);
+                                
+                                cursor = limit - v_4;
+                                // literal, line 63
+                                if (!(eq_s_b(1, "j")))
+                                {
+                                    goto lab8_brk;
+                                }
+                            }
+                            while (false);
 
 lab9_brk: ;
-							
-							// call V, line 63
-							if (!r_V())
-							{
-								goto lab8_brk;
-							}
-							// delete, line 63
-							slice_del();
-							goto lab4_brk;
-						}
-						while (false);
+                            
+                            // call V, line 63
+                            if (!r_V())
+                            {
+                                goto lab8_brk;
+                            }
+                            // delete, line 63
+                            slice_del();
+                            goto lab4_brk;
+                        }
+                        while (false);
 
 lab8_brk: ;
-						
-						cursor = limit - v_3;
-						// (, line 64
-						// call R1, line 64
-						if (!r_R1())
-						{
-							return false;
-						}
-						// call C, line 64
-						if (!r_C())
-						{
-							return false;
-						}
-						// delete, line 64
-						slice_del();
-						// call lengthen_V, line 64
-						if (!r_lengthen_V())
-						{
-							return false;
-						}
-					}
-					while (false);
+                        
+                        cursor = limit - v_3;
+                        // (, line 64
+                        // call R1, line 64
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // call C, line 64
+                        if (!r_C())
+                        {
+                            return false;
+                        }
+                        // delete, line 64
+                        slice_del();
+                        // call lengthen_V, line 64
+                        if (!r_lengthen_V())
+                        {
+                            return false;
+                        }
+                    }
+                    while (false);
 
 lab4_brk: ;
 
-					break;
-				
-				case 7: 
-					// (, line 65
-					// <-, line 65
-					slice_from("nd");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_2()
-		{
-			int among_var;
-			int v_1;
-			// (, line 70
-			// [, line 71
-			ket = cursor;
-			// among, line 71
-			among_var = find_among_b(a_1, 11);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 71
-			// ], line 71
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 72
-					// or, line 72
-					do 
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// (, line 72
-							// literal, line 72
-							if (!(eq_s_b(2, "'t")))
-							{
-								goto lab1_brk;
-							}
-							// ], line 72
-							bra = cursor;
-							// delete, line 72
-							slice_del();
-							goto lab0_brk;
-						}
-						while (false);
+                    break;
+                
+                case 7: 
+                    // (, line 65
+                    // <-, line 65
+                    slice_from("nd");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_2()
+        {
+            int among_var;
+            int v_1;
+            // (, line 70
+            // [, line 71
+            ket = cursor;
+            // among, line 71
+            among_var = find_among_b(a_1, 11);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 71
+            // ], line 71
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 72
+                    // or, line 72
+                    do 
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // (, line 72
+                            // literal, line 72
+                            if (!(eq_s_b(2, "'t")))
+                            {
+                                goto lab1_brk;
+                            }
+                            // ], line 72
+                            bra = cursor;
+                            // delete, line 72
+                            slice_del();
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab1_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 73
-							// literal, line 73
-							if (!(eq_s_b(2, "et")))
-							{
-								goto lab2_brk;
-							}
-							// ], line 73
-							bra = cursor;
-							// call R1, line 73
-							if (!r_R1())
-							{
-								goto lab2_brk;
-							}
-							// call C, line 73
-							if (!r_C())
-							{
-								goto lab2_brk;
-							}
-							// delete, line 73
-							slice_del();
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 73
+                            // literal, line 73
+                            if (!(eq_s_b(2, "et")))
+                            {
+                                goto lab2_brk;
+                            }
+                            // ], line 73
+                            bra = cursor;
+                            // call R1, line 73
+                            if (!r_R1())
+                            {
+                                goto lab2_brk;
+                            }
+                            // call C, line 73
+                            if (!r_C())
+                            {
+                                goto lab2_brk;
+                            }
+                            // delete, line 73
+                            slice_del();
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab2_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 74
-							// literal, line 74
-							if (!(eq_s_b(3, "rnt")))
-							{
-								goto lab3_brk;
-							}
-							// ], line 74
-							bra = cursor;
-							// <-, line 74
-							slice_from("rn");
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 74
+                            // literal, line 74
+                            if (!(eq_s_b(3, "rnt")))
+                            {
+                                goto lab3_brk;
+                            }
+                            // ], line 74
+                            bra = cursor;
+                            // <-, line 74
+                            slice_from("rn");
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab3_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 75
-							// literal, line 75
-							if (!(eq_s_b(1, "t")))
-							{
-								goto lab4_brk;
-							}
-							// ], line 75
-							bra = cursor;
-							// call R1, line 75
-							if (!r_R1())
-							{
-								goto lab4_brk;
-							}
-							// call VX, line 75
-							if (!r_VX())
-							{
-								goto lab4_brk;
-							}
-							// delete, line 75
-							slice_del();
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 75
+                            // literal, line 75
+                            if (!(eq_s_b(1, "t")))
+                            {
+                                goto lab4_brk;
+                            }
+                            // ], line 75
+                            bra = cursor;
+                            // call R1, line 75
+                            if (!r_R1())
+                            {
+                                goto lab4_brk;
+                            }
+                            // call VX, line 75
+                            if (!r_VX())
+                            {
+                                goto lab4_brk;
+                            }
+                            // delete, line 75
+                            slice_del();
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab4_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 76
-							// literal, line 76
-							if (!(eq_s_b(3, "ink")))
-							{
-								goto lab5_brk;
-							}
-							// ], line 76
-							bra = cursor;
-							// <-, line 76
-							slice_from("ing");
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 76
+                            // literal, line 76
+                            if (!(eq_s_b(3, "ink")))
+                            {
+                                goto lab5_brk;
+                            }
+                            // ], line 76
+                            bra = cursor;
+                            // <-, line 76
+                            slice_from("ing");
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab5_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 77
-							// literal, line 77
-							if (!(eq_s_b(2, "mp")))
-							{
-								goto lab6_brk;
-							}
-							// ], line 77
-							bra = cursor;
-							// <-, line 77
-							slice_from("m");
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 77
+                            // literal, line 77
+                            if (!(eq_s_b(2, "mp")))
+                            {
+                                goto lab6_brk;
+                            }
+                            // ], line 77
+                            bra = cursor;
+                            // <-, line 77
+                            slice_from("m");
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab6_brk: ;
-						
-						cursor = limit - v_1;
-						do 
-						{
-							// (, line 78
-							// literal, line 78
-							if (!(eq_s_b(1, "'")))
-							{
-								goto lab7_brk;
-							}
-							// ], line 78
-							bra = cursor;
-							// call R1, line 78
-							if (!r_R1())
-							{
-								goto lab7_brk;
-							}
-							// delete, line 78
-							slice_del();
-							goto lab0_brk;
-						}
-						while (false);
+                        
+                        cursor = limit - v_1;
+                        do 
+                        {
+                            // (, line 78
+                            // literal, line 78
+                            if (!(eq_s_b(1, "'")))
+                            {
+                                goto lab7_brk;
+                            }
+                            // ], line 78
+                            bra = cursor;
+                            // call R1, line 78
+                            if (!r_R1())
+                            {
+                                goto lab7_brk;
+                            }
+                            // delete, line 78
+                            slice_del();
+                            goto lab0_brk;
+                        }
+                        while (false);
 
 lab7_brk: ;
-						
-						cursor = limit - v_1;
-						// (, line 79
-						// ], line 79
-						bra = cursor;
-						// call R1, line 79
-						if (!r_R1())
-						{
-							return false;
-						}
-						// call C, line 79
-						if (!r_C())
-						{
-							return false;
-						}
-						// delete, line 79
-						slice_del();
-					}
-					while (false);
+                        
+                        cursor = limit - v_1;
+                        // (, line 79
+                        // ], line 79
+                        bra = cursor;
+                        // call R1, line 79
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // call C, line 79
+                        if (!r_C())
+                        {
+                            return false;
+                        }
+                        // delete, line 79
+                        slice_del();
+                    }
+                    while (false);
 
 lab0_brk: ;
 
-					break;
-				
-				case 2: 
-					// (, line 80
-					// call R1, line 80
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 80
-					slice_from("g");
-					break;
-				
-				case 3: 
-					// (, line 81
-					// call R1, line 81
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 81
-					slice_from("lijk");
-					break;
-				
-				case 4: 
-					// (, line 82
-					// call R1, line 82
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 82
-					slice_from("isch");
-					break;
-				
-				case 5: 
-					// (, line 83
-					// call R1, line 83
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 83
-					if (!r_C())
-					{
-						return false;
-					}
-					// delete, line 83
-					slice_del();
-					break;
-				
-				case 6: 
-					// (, line 84
-					// call R1, line 84
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 84
-					slice_from("t");
-					break;
-				
-				case 7: 
-					// (, line 85
-					// call R1, line 85
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 85
-					slice_from("s");
-					break;
-				
-				case 8: 
-					// (, line 86
-					// call R1, line 86
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 86
-					slice_from("r");
-					break;
-				
-				case 9: 
-					// (, line 87
-					// call R1, line 87
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 87
-					slice_del();
-					// attach, line 87
-					insert(cursor, cursor, "l");
-					// call lengthen_V, line 87
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 10: 
-					// (, line 88
-					// call R1, line 88
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 88
-					if (!r_C())
-					{
-						return false;
-					}
-					// delete, line 88
-					slice_del();
-					// attach, line 88
-					insert(cursor, cursor, "en");
-					// call lengthen_V, line 88
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 11: 
-					// (, line 89
-					// call R1, line 89
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 89
-					if (!r_C())
-					{
-						return false;
-					}
-					// <-, line 89
-					slice_from("ief");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_3()
-		{
-			int among_var;
-			// (, line 94
-			// [, line 95
-			ket = cursor;
-			// among, line 95
-			among_var = find_among_b(a_2, 14);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 95
-			// ], line 95
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 96
-					// call R1, line 96
-					if (!r_R1())
-					{
-						return false;
-					}
-					// <-, line 96
-					slice_from("eer");
-					break;
-				
-				case 2: 
-					// (, line 97
-					// call R1, line 97
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 97
-					slice_del();
-					// call lengthen_V, line 97
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 3: 
-					// (, line 100
-					// call R1, line 100
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 100
-					slice_del();
-					break;
-				
-				case 4: 
-					// (, line 101
-					// <-, line 101
-					slice_from("r");
-					break;
-				
-				case 5: 
-					// (, line 104
-					// call R1, line 104
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 104
-					slice_del();
-					// call lengthen_V, line 104
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 6: 
-					// (, line 105
-					// call R1, line 105
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 105
-					if (!r_C())
-					{
-						return false;
-					}
-					// <-, line 105
-					slice_from("aar");
-					break;
-				
-				case 7: 
-					// (, line 106
-					// call R2, line 106
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 106
-					slice_del();
-					// attach, line 106
-					insert(cursor, cursor, "f");
-					// call lengthen_V, line 106
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 8: 
-					// (, line 107
-					// call R2, line 107
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 107
-					slice_del();
-					// attach, line 107
-					insert(cursor, cursor, "g");
-					// call lengthen_V, line 107
-					if (!r_lengthen_V())
-					{
-						return false;
-					}
-					break;
-				
-				case 9: 
-					// (, line 108
-					// call R1, line 108
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 108
-					if (!r_C())
-					{
-						return false;
-					}
-					// <-, line 108
-					slice_from("t");
-					break;
-				
-				case 10: 
-					// (, line 109
-					// call R1, line 109
-					if (!r_R1())
-					{
-						return false;
-					}
-					// call C, line 109
-					if (!r_C())
-					{
-						return false;
-					}
-					// <-, line 109
-					slice_from("d");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_4()
-		{
-			int among_var;
-			int v_1;
-			// (, line 114
-			// or, line 134
+                    break;
+                
+                case 2: 
+                    // (, line 80
+                    // call R1, line 80
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 80
+                    slice_from("g");
+                    break;
+                
+                case 3: 
+                    // (, line 81
+                    // call R1, line 81
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 81
+                    slice_from("lijk");
+                    break;
+                
+                case 4: 
+                    // (, line 82
+                    // call R1, line 82
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 82
+                    slice_from("isch");
+                    break;
+                
+                case 5: 
+                    // (, line 83
+                    // call R1, line 83
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 83
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // delete, line 83
+                    slice_del();
+                    break;
+                
+                case 6: 
+                    // (, line 84
+                    // call R1, line 84
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 84
+                    slice_from("t");
+                    break;
+                
+                case 7: 
+                    // (, line 85
+                    // call R1, line 85
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 85
+                    slice_from("s");
+                    break;
+                
+                case 8: 
+                    // (, line 86
+                    // call R1, line 86
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 86
+                    slice_from("r");
+                    break;
+                
+                case 9: 
+                    // (, line 87
+                    // call R1, line 87
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 87
+                    slice_del();
+                    // attach, line 87
+                    insert(cursor, cursor, "l");
+                    // call lengthen_V, line 87
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 10: 
+                    // (, line 88
+                    // call R1, line 88
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 88
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // delete, line 88
+                    slice_del();
+                    // attach, line 88
+                    insert(cursor, cursor, "en");
+                    // call lengthen_V, line 88
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 11: 
+                    // (, line 89
+                    // call R1, line 89
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 89
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // <-, line 89
+                    slice_from("ief");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_3()
+        {
+            int among_var;
+            // (, line 94
+            // [, line 95
+            ket = cursor;
+            // among, line 95
+            among_var = find_among_b(a_2, 14);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 95
+            // ], line 95
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 96
+                    // call R1, line 96
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // <-, line 96
+                    slice_from("eer");
+                    break;
+                
+                case 2: 
+                    // (, line 97
+                    // call R1, line 97
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 97
+                    slice_del();
+                    // call lengthen_V, line 97
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 3: 
+                    // (, line 100
+                    // call R1, line 100
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 100
+                    slice_del();
+                    break;
+                
+                case 4: 
+                    // (, line 101
+                    // <-, line 101
+                    slice_from("r");
+                    break;
+                
+                case 5: 
+                    // (, line 104
+                    // call R1, line 104
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 104
+                    slice_del();
+                    // call lengthen_V, line 104
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 6: 
+                    // (, line 105
+                    // call R1, line 105
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 105
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // <-, line 105
+                    slice_from("aar");
+                    break;
+                
+                case 7: 
+                    // (, line 106
+                    // call R2, line 106
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 106
+                    slice_del();
+                    // attach, line 106
+                    insert(cursor, cursor, "f");
+                    // call lengthen_V, line 106
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 8: 
+                    // (, line 107
+                    // call R2, line 107
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 107
+                    slice_del();
+                    // attach, line 107
+                    insert(cursor, cursor, "g");
+                    // call lengthen_V, line 107
+                    if (!r_lengthen_V())
+                    {
+                        return false;
+                    }
+                    break;
+                
+                case 9: 
+                    // (, line 108
+                    // call R1, line 108
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 108
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // <-, line 108
+                    slice_from("t");
+                    break;
+                
+                case 10: 
+                    // (, line 109
+                    // call R1, line 109
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // call C, line 109
+                    if (!r_C())
+                    {
+                        return false;
+                    }
+                    // <-, line 109
+                    slice_from("d");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_4()
+        {
+            int among_var;
+            int v_1;
+            // (, line 114
+            // or, line 134
 lab11: 
-			do 
-			{
-				v_1 = limit - cursor;
-				do 
-				{
-					// (, line 115
-					// [, line 115
-					ket = cursor;
-					// among, line 115
-					among_var = find_among_b(a_3, 16);
-					if (among_var == 0)
-					{
-						goto lab11_brk;
-					}
-					// (, line 115
-					// ], line 115
-					bra = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab11_brk;
-						
-						case 1: 
-							// (, line 116
-							// call R1, line 116
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 116
-							slice_from("ie");
-							break;
-						
-						case 2: 
-							// (, line 117
-							// call R1, line 117
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 117
-							slice_from("eer");
-							break;
-						
-						case 3: 
-							// (, line 118
-							// call R1, line 118
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// delete, line 118
-							slice_del();
-							break;
-						
-						case 4: 
-							// (, line 119
-							// call R1, line 119
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// call V, line 119
-							if (!r_V())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 119
-							slice_from("n");
-							break;
-						
-						case 5: 
-							// (, line 120
-							// call R1, line 120
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// call V, line 120
-							if (!r_V())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 120
-							slice_from("l");
-							break;
-						
-						case 6: 
-							// (, line 121
-							// call R1, line 121
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// call V, line 121
-							if (!r_V())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 121
-							slice_from("r");
-							break;
-						
-						case 7: 
-							// (, line 122
-							// call R1, line 122
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 122
-							slice_from("teer");
-							break;
-						
-						case 8: 
-							// (, line 124
-							// call R1, line 124
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// <-, line 124
-							slice_from("lijk");
-							break;
-						
-						case 9: 
-							// (, line 127
-							// call R1, line 127
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// delete, line 127
-							slice_del();
-							break;
-						
-						case 10: 
-							// (, line 131
-							// call R1, line 131
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							// call C, line 131
-							if (!r_C())
-							{
-								goto lab11_brk;
-							}
-							// delete, line 131
-							slice_del();
-							// call lengthen_V, line 131
-							if (!r_lengthen_V())
-							{
-								goto lab11_brk;
-							}
-							break;
-						}
-					goto lab11_brk;
-				}
-				while (false);
+            do 
+            {
+                v_1 = limit - cursor;
+                do 
+                {
+                    // (, line 115
+                    // [, line 115
+                    ket = cursor;
+                    // among, line 115
+                    among_var = find_among_b(a_3, 16);
+                    if (among_var == 0)
+                    {
+                        goto lab11_brk;
+                    }
+                    // (, line 115
+                    // ], line 115
+                    bra = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab11_brk;
+                        
+                        case 1: 
+                            // (, line 116
+                            // call R1, line 116
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 116
+                            slice_from("ie");
+                            break;
+                        
+                        case 2: 
+                            // (, line 117
+                            // call R1, line 117
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 117
+                            slice_from("eer");
+                            break;
+                        
+                        case 3: 
+                            // (, line 118
+                            // call R1, line 118
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // delete, line 118
+                            slice_del();
+                            break;
+                        
+                        case 4: 
+                            // (, line 119
+                            // call R1, line 119
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // call V, line 119
+                            if (!r_V())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 119
+                            slice_from("n");
+                            break;
+                        
+                        case 5: 
+                            // (, line 120
+                            // call R1, line 120
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // call V, line 120
+                            if (!r_V())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 120
+                            slice_from("l");
+                            break;
+                        
+                        case 6: 
+                            // (, line 121
+                            // call R1, line 121
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // call V, line 121
+                            if (!r_V())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 121
+                            slice_from("r");
+                            break;
+                        
+                        case 7: 
+                            // (, line 122
+                            // call R1, line 122
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 122
+                            slice_from("teer");
+                            break;
+                        
+                        case 8: 
+                            // (, line 124
+                            // call R1, line 124
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // <-, line 124
+                            slice_from("lijk");
+                            break;
+                        
+                        case 9: 
+                            // (, line 127
+                            // call R1, line 127
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // delete, line 127
+                            slice_del();
+                            break;
+                        
+                        case 10: 
+                            // (, line 131
+                            // call R1, line 131
+                            if (!r_R1())
+                            {
+                                goto lab11_brk;
+                            }
+                            // call C, line 131
+                            if (!r_C())
+                            {
+                                goto lab11_brk;
+                            }
+                            // delete, line 131
+                            slice_del();
+                            // call lengthen_V, line 131
+                            if (!r_lengthen_V())
+                            {
+                                goto lab11_brk;
+                            }
+                            break;
+                        }
+                    goto lab11_brk;
+                }
+                while (false);
 
 lab11_brk: ;
-				
-				cursor = limit - v_1;
-				// (, line 135
-				// [, line 135
-				ket = cursor;
-				// among, line 135
-				among_var = find_among_b(a_4, 3);
-				if (among_var == 0)
-				{
-					return false;
-				}
-				// (, line 135
-				// ], line 135
-				bra = cursor;
-				switch (among_var)
-				{
-					
-					case 0: 
-						return false;
-					
-					case 1: 
-						// (, line 138
-						// call R1, line 138
-						if (!r_R1())
-						{
-							return false;
-						}
-						// call C, line 138
-						if (!r_C())
-						{
-							return false;
-						}
-						// delete, line 138
-						slice_del();
-						// call lengthen_V, line 138
-						if (!r_lengthen_V())
-						{
-							return false;
-						}
-						break;
-					}
-			}
-			while (false);
-			return true;
-		}
-		
-		private bool r_Step_7()
-		{
-			int among_var;
-			// (, line 144
-			// [, line 145
-			ket = cursor;
-			// among, line 145
-			among_var = find_among_b(a_5, 3);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 145
-			// ], line 145
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 146
-					// <-, line 146
-					slice_from("k");
-					break;
-				
-				case 2: 
-					// (, line 147
-					// <-, line 147
-					slice_from("f");
-					break;
-				
-				case 3: 
-					// (, line 148
-					// <-, line 148
-					slice_from("p");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_6()
-		{
-			int among_var;
-			// (, line 153
-			// [, line 154
-			ket = cursor;
-			// among, line 154
-			among_var = find_among_b(a_6, 22);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 154
-			// ], line 154
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 155
-					// <-, line 155
-					slice_from("b");
-					break;
-				
-				case 2: 
-					// (, line 156
-					// <-, line 156
-					slice_from("c");
-					break;
-				
-				case 3: 
-					// (, line 157
-					// <-, line 157
-					slice_from("d");
-					break;
-				
-				case 4: 
-					// (, line 158
-					// <-, line 158
-					slice_from("f");
-					break;
-				
-				case 5: 
-					// (, line 159
-					// <-, line 159
-					slice_from("g");
-					break;
-				
-				case 6: 
-					// (, line 160
-					// <-, line 160
-					slice_from("h");
-					break;
-				
-				case 7: 
-					// (, line 161
-					// <-, line 161
-					slice_from("j");
-					break;
-				
-				case 8: 
-					// (, line 162
-					// <-, line 162
-					slice_from("k");
-					break;
-				
-				case 9: 
-					// (, line 163
-					// <-, line 163
-					slice_from("l");
-					break;
-				
-				case 10: 
-					// (, line 164
-					// <-, line 164
-					slice_from("m");
-					break;
-				
-				case 11: 
-					// (, line 165
-					// <-, line 165
-					slice_from("n");
-					break;
-				
-				case 12: 
-					// (, line 166
-					// <-, line 166
-					slice_from("p");
-					break;
-				
-				case 13: 
-					// (, line 167
-					// <-, line 167
-					slice_from("q");
-					break;
-				
-				case 14: 
-					// (, line 168
-					// <-, line 168
-					slice_from("r");
-					break;
-				
-				case 15: 
-					// (, line 169
-					// <-, line 169
-					slice_from("s");
-					break;
-				
-				case 16: 
-					// (, line 170
-					// <-, line 170
-					slice_from("t");
-					break;
-				
-				case 17: 
-					// (, line 171
-					// <-, line 171
-					slice_from("v");
-					break;
-				
-				case 18: 
-					// (, line 172
-					// <-, line 172
-					slice_from("w");
-					break;
-				
-				case 19: 
-					// (, line 173
-					// <-, line 173
-					slice_from("x");
-					break;
-				
-				case 20: 
-					// (, line 174
-					// <-, line 174
-					slice_from("z");
-					break;
-				
-				case 21: 
-					// (, line 175
-					// <-, line 175
-					slice_from("f");
-					break;
-				
-				case 22: 
-					// (, line 176
-					// <-, line 176
-					slice_from("s");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_Step_1c()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// (, line 181
-			// [, line 182
-			ket = cursor;
-			// among, line 182
-			among_var = find_among_b(a_7, 2);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 182
-			// ], line 182
-			bra = cursor;
-			// call R1, line 182
-			if (!r_R1())
-			{
-				return false;
-			}
-			// call C, line 182
-			if (!r_C())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 183
-					// not, line 183
-					{
-						v_1 = limit - cursor;
-						do 
-						{
-							// (, line 183
-							// literal, line 183
-							if (!(eq_s_b(1, "n")))
-							{
-								goto lab11_brk;
-							}
-							// call R1, line 183
-							if (!r_R1())
-							{
-								goto lab11_brk;
-							}
-							return false;
-						}
-						while (false);
+                
+                cursor = limit - v_1;
+                // (, line 135
+                // [, line 135
+                ket = cursor;
+                // among, line 135
+                among_var = find_among_b(a_4, 3);
+                if (among_var == 0)
+                {
+                    return false;
+                }
+                // (, line 135
+                // ], line 135
+                bra = cursor;
+                switch (among_var)
+                {
+                    
+                    case 0: 
+                        return false;
+                    
+                    case 1: 
+                        // (, line 138
+                        // call R1, line 138
+                        if (!r_R1())
+                        {
+                            return false;
+                        }
+                        // call C, line 138
+                        if (!r_C())
+                        {
+                            return false;
+                        }
+                        // delete, line 138
+                        slice_del();
+                        // call lengthen_V, line 138
+                        if (!r_lengthen_V())
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+            }
+            while (false);
+            return true;
+        }
+        
+        private bool r_Step_7()
+        {
+            int among_var;
+            // (, line 144
+            // [, line 145
+            ket = cursor;
+            // among, line 145
+            among_var = find_among_b(a_5, 3);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 145
+            // ], line 145
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 146
+                    // <-, line 146
+                    slice_from("k");
+                    break;
+                
+                case 2: 
+                    // (, line 147
+                    // <-, line 147
+                    slice_from("f");
+                    break;
+                
+                case 3: 
+                    // (, line 148
+                    // <-, line 148
+                    slice_from("p");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_6()
+        {
+            int among_var;
+            // (, line 153
+            // [, line 154
+            ket = cursor;
+            // among, line 154
+            among_var = find_among_b(a_6, 22);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 154
+            // ], line 154
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 155
+                    // <-, line 155
+                    slice_from("b");
+                    break;
+                
+                case 2: 
+                    // (, line 156
+                    // <-, line 156
+                    slice_from("c");
+                    break;
+                
+                case 3: 
+                    // (, line 157
+                    // <-, line 157
+                    slice_from("d");
+                    break;
+                
+                case 4: 
+                    // (, line 158
+                    // <-, line 158
+                    slice_from("f");
+                    break;
+                
+                case 5: 
+                    // (, line 159
+                    // <-, line 159
+                    slice_from("g");
+                    break;
+                
+                case 6: 
+                    // (, line 160
+                    // <-, line 160
+                    slice_from("h");
+                    break;
+                
+                case 7: 
+                    // (, line 161
+                    // <-, line 161
+                    slice_from("j");
+                    break;
+                
+                case 8: 
+                    // (, line 162
+                    // <-, line 162
+                    slice_from("k");
+                    break;
+                
+                case 9: 
+                    // (, line 163
+                    // <-, line 163
+                    slice_from("l");
+                    break;
+                
+                case 10: 
+                    // (, line 164
+                    // <-, line 164
+                    slice_from("m");
+                    break;
+                
+                case 11: 
+                    // (, line 165
+                    // <-, line 165
+                    slice_from("n");
+                    break;
+                
+                case 12: 
+                    // (, line 166
+                    // <-, line 166
+                    slice_from("p");
+                    break;
+                
+                case 13: 
+                    // (, line 167
+                    // <-, line 167
+                    slice_from("q");
+                    break;
+                
+                case 14: 
+                    // (, line 168
+                    // <-, line 168
+                    slice_from("r");
+                    break;
+                
+                case 15: 
+                    // (, line 169
+                    // <-, line 169
+                    slice_from("s");
+                    break;
+                
+                case 16: 
+                    // (, line 170
+                    // <-, line 170
+                    slice_from("t");
+                    break;
+                
+                case 17: 
+                    // (, line 171
+                    // <-, line 171
+                    slice_from("v");
+                    break;
+                
+                case 18: 
+                    // (, line 172
+                    // <-, line 172
+                    slice_from("w");
+                    break;
+                
+                case 19: 
+                    // (, line 173
+                    // <-, line 173
+                    slice_from("x");
+                    break;
+                
+                case 20: 
+                    // (, line 174
+                    // <-, line 174
+                    slice_from("z");
+                    break;
+                
+                case 21: 
+                    // (, line 175
+                    // <-, line 175
+                    slice_from("f");
+                    break;
+                
+                case 22: 
+                    // (, line 176
+                    // <-, line 176
+                    slice_from("s");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_Step_1c()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // (, line 181
+            // [, line 182
+            ket = cursor;
+            // among, line 182
+            among_var = find_among_b(a_7, 2);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 182
+            // ], line 182
+            bra = cursor;
+            // call R1, line 182
+            if (!r_R1())
+            {
+                return false;
+            }
+            // call C, line 182
+            if (!r_C())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 183
+                    // not, line 183
+                    {
+                        v_1 = limit - cursor;
+                        do 
+                        {
+                            // (, line 183
+                            // literal, line 183
+                            if (!(eq_s_b(1, "n")))
+                            

<TRUNCATED>

[24/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Token.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Token.cs b/src/core/Analysis/Token.cs
index 3357f34..ec5e29b 100644
--- a/src/core/Analysis/Token.cs
+++ b/src/core/Analysis/Token.cs
@@ -26,780 +26,780 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>A Token is an occurrence of a term from the text of a field.  It consists of
-	/// a term's text, the start and end offset of the term in the text of the field,
-	/// and a type string.
-	/// <p/>
-	/// The start and end offsets permit applications to re-associate a token with
-	/// its source text, e.g., to display highlighted query terms in a document
-	/// browser, or to show matching text fragments in a <abbr
-	/// title="KeyWord In Context">KWIC</abbr> display, etc.
-	/// <p/>
-	/// The type is a string, assigned by a lexical analyzer
-	/// (a.k.a. tokenizer), naming the lexical or syntactic class that the token
-	/// belongs to.  For example an end of sentence marker token might be implemented
-	/// with type "eos".  The default token type is "word".  
-	/// <p/>
-	/// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
-	/// length byte array. Use <see cref="TermPositions.PayloadLength" /> and 
-	/// <see cref="TermPositions.GetPayload(byte[], int)" /> to retrieve the payloads from the index.
-	/// </summary>
-	/// <summary><br/><br/>
-	/// </summary>
-	/// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all <see cref="IAttribute" /> interfaces
-	/// that are part of core Lucene and can be found in the <see cref="Lucene.Net.Analysis.Tokenattributes"/> namespace.
-	/// Even though it is not necessary to use Token anymore, with the new TokenStream API it can
-	/// be used as convenience class that implements all <see cref="IAttribute" />s, which is especially useful
-	/// to easily switch from the old to the new TokenStream API.
-	/// <br/><br/>
-	/// <p/>Tokenizers and TokenFilters should try to re-use a Token instance when
-	/// possible for best performance, by implementing the
-	/// <see cref="TokenStream.IncrementToken()" /> API.
-	/// Failing that, to create a new Token you should first use
-	/// one of the constructors that starts with null text.  To load
-	/// the token from a char[] use <see cref="SetTermBuffer(char[], int, int)" />.
-	/// To load from a String use <see cref="SetTermBuffer(String)" /> or <see cref="SetTermBuffer(String, int, int)" />.
-	/// Alternatively you can get the Token's termBuffer by calling either <see cref="TermBuffer()" />,
-	/// if you know that your text is shorter than the capacity of the termBuffer
-	/// or <see cref="ResizeTermBuffer(int)" />, if there is any possibility
-	/// that you may need to grow the buffer. Fill in the characters of your term into this
+    
+    /// <summary>A Token is an occurrence of a term from the text of a field.  It consists of
+    /// a term's text, the start and end offset of the term in the text of the field,
+    /// and a type string.
+    /// <p/>
+    /// The start and end offsets permit applications to re-associate a token with
+    /// its source text, e.g., to display highlighted query terms in a document
+    /// browser, or to show matching text fragments in a <abbr
+    /// title="KeyWord In Context">KWIC</abbr> display, etc.
+    /// <p/>
+    /// The type is a string, assigned by a lexical analyzer
+    /// (a.k.a. tokenizer), naming the lexical or syntactic class that the token
+    /// belongs to.  For example an end of sentence marker token might be implemented
+    /// with type "eos".  The default token type is "word".  
+    /// <p/>
+    /// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
+    /// length byte array. Use <see cref="TermPositions.PayloadLength" /> and 
+    /// <see cref="TermPositions.GetPayload(byte[], int)" /> to retrieve the payloads from the index.
+    /// </summary>
+    /// <summary><br/><br/>
+    /// </summary>
+    /// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all <see cref="IAttribute" /> interfaces
+    /// that are part of core Lucene and can be found in the <see cref="Lucene.Net.Analysis.Tokenattributes"/> namespace.
+    /// Even though it is not necessary to use Token anymore, with the new TokenStream API it can
+    /// be used as convenience class that implements all <see cref="IAttribute" />s, which is especially useful
+    /// to easily switch from the old to the new TokenStream API.
+    /// <br/><br/>
+    /// <p/>Tokenizers and TokenFilters should try to re-use a Token instance when
+    /// possible for best performance, by implementing the
+    /// <see cref="TokenStream.IncrementToken()" /> API.
+    /// Failing that, to create a new Token you should first use
+    /// one of the constructors that starts with null text.  To load
+    /// the token from a char[] use <see cref="SetTermBuffer(char[], int, int)" />.
+    /// To load from a String use <see cref="SetTermBuffer(String)" /> or <see cref="SetTermBuffer(String, int, int)" />.
+    /// Alternatively you can get the Token's termBuffer by calling either <see cref="TermBuffer()" />,
+    /// if you know that your text is shorter than the capacity of the termBuffer
+    /// or <see cref="ResizeTermBuffer(int)" />, if there is any possibility
+    /// that you may need to grow the buffer. Fill in the characters of your term into this
     /// buffer, with <see cref="string.ToCharArray(int, int)" /> if loading from a string,
-	/// or with <see cref="Array.Copy(Array, long, Array, long, long)" />, and finally call <see cref="SetTermLength(int)" /> to
-	/// set the length of the term text.  See <a target="_top"
-	/// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
-	/// for details.<p/>
-	/// <p/>Typical Token reuse patterns:
-	/// <list type="bullet">
-	/// <item> Copying text from a string (type is reset to <see cref="DEFAULT_TYPE" /> if not
-	/// specified):<br/>
-	/// <code>
-	/// return reusableToken.reinit(string, startOffset, endOffset[, type]);
-	/// </code>
-	/// </item>
-	/// <item> Copying some text from a string (type is reset to <see cref="DEFAULT_TYPE" />
-	/// if not specified):<br/>
+    /// or with <see cref="Array.Copy(Array, long, Array, long, long)" />, and finally call <see cref="SetTermLength(int)" /> to
+    /// set the length of the term text.  See <a target="_top"
+    /// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
+    /// for details.<p/>
+    /// <p/>Typical Token reuse patterns:
+    /// <list type="bullet">
+    /// <item> Copying text from a string (type is reset to <see cref="DEFAULT_TYPE" /> if not
+    /// specified):<br/>
     /// <code>
-	/// return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
+    /// return reusableToken.reinit(string, startOffset, endOffset[, type]);
     /// </code>
-	/// </item>
-	/// <item> Copying text from char[] buffer (type is reset to <see cref="DEFAULT_TYPE" />
-	/// if not specified):<br/>
+    /// </item>
+    /// <item> Copying some text from a string (type is reset to <see cref="DEFAULT_TYPE" />
+    /// if not specified):<br/>
     /// <code>
-	/// return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
+    /// return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
     /// </code>
-	/// </item>
-	/// <item> Copying some text from a char[] buffer (type is reset to
-	/// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
+    /// </item>
+    /// <item> Copying text from char[] buffer (type is reset to <see cref="DEFAULT_TYPE" />
+    /// if not specified):<br/>
     /// <code>
-	/// return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
+    /// return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
     /// </code>
-	/// </item>
-	/// <item> Copying from one one Token to another (type is reset to
-	/// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
+    /// </item>
+    /// <item> Copying some text from a char[] buffer (type is reset to
+    /// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
     /// <code>
-	/// return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
+    /// return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
     /// </code>
-	/// </item>
-	/// </list>
-	/// A few things to note:
-	/// <list type="bullet">
-	/// <item>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</item>
-	/// <item>Because <c>TokenStreams</c> can be chained, one cannot assume that the <c>Token's</c> current type is correct.</item>
-	/// <item>The startOffset and endOffset represent the start and offset in the
-	/// source text, so be careful in adjusting them.</item>
-	/// <item>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</item>
-	/// </list>
-	/// <p/>
-	/// </summary>
-	/// <seealso cref="Lucene.Net.Index.Payload">
-	/// </seealso>
-	[Serializable]
-	public class Token : Attribute, ITermAttribute, ITypeAttribute, IPositionIncrementAttribute, IFlagsAttribute, IOffsetAttribute, IPayloadAttribute
-	{
-		public const String DEFAULT_TYPE = "word";
+    /// </item>
+    /// <item> Copying from one one Token to another (type is reset to
+    /// <see cref="DEFAULT_TYPE" /> if not specified):<br/>
+    /// <code>
+    /// return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
+    /// </code>
+    /// </item>
+    /// </list>
+    /// A few things to note:
+    /// <list type="bullet">
+    /// <item>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</item>
+    /// <item>Because <c>TokenStreams</c> can be chained, one cannot assume that the <c>Token's</c> current type is correct.</item>
+    /// <item>The startOffset and endOffset represent the start and offset in the
+    /// source text, so be careful in adjusting them.</item>
+    /// <item>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</item>
+    /// </list>
+    /// <p/>
+    /// </summary>
+    /// <seealso cref="Lucene.Net.Index.Payload">
+    /// </seealso>
+    [Serializable]
+    public class Token : Attribute, ITermAttribute, ITypeAttribute, IPositionIncrementAttribute, IFlagsAttribute, IOffsetAttribute, IPayloadAttribute
+    {
+        public const String DEFAULT_TYPE = "word";
 
-		private const int MIN_BUFFER_SIZE = 10;
+        private const int MIN_BUFFER_SIZE = 10;
 
-		private char[] termBuffer;
-		private int termLength;
-		private int startOffset, endOffset;
-		private string type = DEFAULT_TYPE;
-		private int flags;
-		private Payload payload;
-		private int positionIncrement = 1;
-		
-		/// <summary>Constructs a Token will null text. </summary>
-		public Token()
-		{
-		}
-		
-		/// <summary>Constructs a Token with null text and start &amp; end
-		/// offsets.
-		/// </summary>
-		/// <param name="start">start offset in the source text</param>
-		/// <param name="end">end offset in the source text</param>
-		public Token(int start, int end)
-		{
-			startOffset = start;
-			endOffset = end;
-		}
-		
-		/// <summary>Constructs a Token with null text and start &amp; end
-		/// offsets plus the Token type.
-		/// </summary>
-		/// <param name="start">start offset in the source text</param>
-		/// <param name="end">end offset in the source text</param>
-		/// <param name="typ">the lexical type of this Token</param>
-		public Token(int start, int end, String typ)
-		{
-			startOffset = start;
-			endOffset = end;
-			type = typ;
-		}
-		
-		/// <summary> Constructs a Token with null text and start &amp; end
-		/// offsets plus flags. NOTE: flags is EXPERIMENTAL.
-		/// </summary>
-		/// <param name="start">start offset in the source text</param>
-		/// <param name="end">end offset in the source text</param>
-		/// <param name="flags">The bits to set for this token</param>
-		public Token(int start, int end, int flags)
-		{
-			startOffset = start;
-			endOffset = end;
-			this.flags = flags;
-		}
-		
-		/// <summary>Constructs a Token with the given term text, and start
-		/// &amp; end offsets.  The type defaults to "word."
-		/// <b>NOTE:</b> for better indexing speed you should
-		/// instead use the char[] termBuffer methods to set the
-		/// term text.
-		/// </summary>
-		/// <param name="text">term text</param>
-		/// <param name="start">start offset</param>
-		/// <param name="end">end offset</param>
-		public Token(String text, int start, int end)
-		{
-		    SetTermBuffer(text);
-			startOffset = start;
-			endOffset = end;
-		}
-		
-		/// <summary>Constructs a Token with the given text, start and end
-		/// offsets, &amp; type.  <b>NOTE:</b> for better indexing
-		/// speed you should instead use the char[] termBuffer
-		/// methods to set the term text.
-		/// </summary>
-		/// <param name="text">term text</param>
-		/// <param name="start">start offset</param>
-		/// <param name="end">end offset</param>
-		/// <param name="typ">token type</param>
-		public Token(System.String text, int start, int end, System.String typ)
-		{
-		    SetTermBuffer(text);
-			startOffset = start;
-			endOffset = end;
-			type = typ;
-		}
-		
-		/// <summary>  Constructs a Token with the given text, start and end
-		/// offsets, &amp; type.  <b>NOTE:</b> for better indexing
-		/// speed you should instead use the char[] termBuffer
-		/// methods to set the term text.
-		/// </summary>
-		/// <param name="text"></param>
-		/// <param name="start"></param>
-		/// <param name="end"></param>
-		/// <param name="flags">token type bits</param>
-		public Token(System.String text, int start, int end, int flags)
-		{
-		    SetTermBuffer(text);
-			startOffset = start;
-			endOffset = end;
-			this.flags = flags;
-		}
-		
-		/// <summary>  Constructs a Token with the given term buffer (offset
-		/// &amp; length), start and end
-		/// offsets
-		/// </summary>
-		/// <param name="startTermBuffer"></param>
-		/// <param name="termBufferOffset"></param>
-		/// <param name="termBufferLength"></param>
-		/// <param name="start"></param>
-		/// <param name="end"></param>
-		public Token(char[] startTermBuffer, int termBufferOffset, int termBufferLength, int start, int end)
-		{
-			SetTermBuffer(startTermBuffer, termBufferOffset, termBufferLength);
-			startOffset = start;
-			endOffset = end;
-		}
+        private char[] termBuffer;
+        private int termLength;
+        private int startOffset, endOffset;
+        private string type = DEFAULT_TYPE;
+        private int flags;
+        private Payload payload;
+        private int positionIncrement = 1;
+        
+        /// <summary>Constructs a Token will null text. </summary>
+        public Token()
+        {
+        }
+        
+        /// <summary>Constructs a Token with null text and start &amp; end
+        /// offsets.
+        /// </summary>
+        /// <param name="start">start offset in the source text</param>
+        /// <param name="end">end offset in the source text</param>
+        public Token(int start, int end)
+        {
+            startOffset = start;
+            endOffset = end;
+        }
+        
+        /// <summary>Constructs a Token with null text and start &amp; end
+        /// offsets plus the Token type.
+        /// </summary>
+        /// <param name="start">start offset in the source text</param>
+        /// <param name="end">end offset in the source text</param>
+        /// <param name="typ">the lexical type of this Token</param>
+        public Token(int start, int end, String typ)
+        {
+            startOffset = start;
+            endOffset = end;
+            type = typ;
+        }
+        
+        /// <summary> Constructs a Token with null text and start &amp; end
+        /// offsets plus flags. NOTE: flags is EXPERIMENTAL.
+        /// </summary>
+        /// <param name="start">start offset in the source text</param>
+        /// <param name="end">end offset in the source text</param>
+        /// <param name="flags">The bits to set for this token</param>
+        public Token(int start, int end, int flags)
+        {
+            startOffset = start;
+            endOffset = end;
+            this.flags = flags;
+        }
+        
+        /// <summary>Constructs a Token with the given term text, and start
+        /// &amp; end offsets.  The type defaults to "word."
+        /// <b>NOTE:</b> for better indexing speed you should
+        /// instead use the char[] termBuffer methods to set the
+        /// term text.
+        /// </summary>
+        /// <param name="text">term text</param>
+        /// <param name="start">start offset</param>
+        /// <param name="end">end offset</param>
+        public Token(String text, int start, int end)
+        {
+            SetTermBuffer(text);
+            startOffset = start;
+            endOffset = end;
+        }
+        
+        /// <summary>Constructs a Token with the given text, start and end
+        /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+        /// speed you should instead use the char[] termBuffer
+        /// methods to set the term text.
+        /// </summary>
+        /// <param name="text">term text</param>
+        /// <param name="start">start offset</param>
+        /// <param name="end">end offset</param>
+        /// <param name="typ">token type</param>
+        public Token(System.String text, int start, int end, System.String typ)
+        {
+            SetTermBuffer(text);
+            startOffset = start;
+            endOffset = end;
+            type = typ;
+        }
+        
+        /// <summary>  Constructs a Token with the given text, start and end
+        /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+        /// speed you should instead use the char[] termBuffer
+        /// methods to set the term text.
+        /// </summary>
+        /// <param name="text"></param>
+        /// <param name="start"></param>
+        /// <param name="end"></param>
+        /// <param name="flags">token type bits</param>
+        public Token(System.String text, int start, int end, int flags)
+        {
+            SetTermBuffer(text);
+            startOffset = start;
+            endOffset = end;
+            this.flags = flags;
+        }
+        
+        /// <summary>  Constructs a Token with the given term buffer (offset
+        /// &amp; length), start and end
+        /// offsets
+        /// </summary>
+        /// <param name="startTermBuffer"></param>
+        /// <param name="termBufferOffset"></param>
+        /// <param name="termBufferLength"></param>
+        /// <param name="start"></param>
+        /// <param name="end"></param>
+        public Token(char[] startTermBuffer, int termBufferOffset, int termBufferLength, int start, int end)
+        {
+            SetTermBuffer(startTermBuffer, termBufferOffset, termBufferLength);
+            startOffset = start;
+            endOffset = end;
+        }
 
-	    /// <summary>Set the position increment.  This determines the position of this token
-	    /// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
-	    /// searching.
-	    /// 
-	    /// <p/>The default value is one.
-	    /// 
-	    /// <p/>Some common uses for this are:<list>
-	    /// 
-	    /// <item>Set it to zero to put multiple terms in the same position.  This is
-	    /// useful if, e.g., a word has multiple stems.  Searches for phrases
-	    /// including either stem will match.  In this case, all but the first stem's
-	    /// increment should be set to zero: the increment of the first instance
-	    /// should be one.  Repeating a token with an increment of zero can also be
-	    /// used to boost the scores of matches on that token.</item>
-	    /// 
-	    /// <item>Set it to values greater than one to inhibit exact phrase matches.
-	    /// If, for example, one does not want phrases to match across removed stop
-	    /// words, then one could build a stop word filter that removes stop words and
-	    /// also sets the increment to the number of stop words removed before each
-	    /// non-stop word.  Then exact phrase queries will only match when the terms
-	    /// occur with no intervening stop words.</item>
-	    /// 
-	    /// </list>
-	    /// </summary>
-	    /// <value> the distance from the prior term </value>
-	    /// <seealso cref="Lucene.Net.Index.TermPositions">
-	    /// </seealso>
-	    public virtual int PositionIncrement
-	    {
-	        set
-	        {
-	            if (value < 0)
-	                throw new System.ArgumentException("Increment must be zero or greater: " + value);
-	            this.positionIncrement = value;
-	        }
-	        get { return positionIncrement; }
-	    }
+        /// <summary>Set the position increment.  This determines the position of this token
+        /// relative to the previous Token in a <see cref="TokenStream" />, used in phrase
+        /// searching.
+        /// 
+        /// <p/>The default value is one.
+        /// 
+        /// <p/>Some common uses for this are:<list>
+        /// 
+        /// <item>Set it to zero to put multiple terms in the same position.  This is
+        /// useful if, e.g., a word has multiple stems.  Searches for phrases
+        /// including either stem will match.  In this case, all but the first stem's
+        /// increment should be set to zero: the increment of the first instance
+        /// should be one.  Repeating a token with an increment of zero can also be
+        /// used to boost the scores of matches on that token.</item>
+        /// 
+        /// <item>Set it to values greater than one to inhibit exact phrase matches.
+        /// If, for example, one does not want phrases to match across removed stop
+        /// words, then one could build a stop word filter that removes stop words and
+        /// also sets the increment to the number of stop words removed before each
+        /// non-stop word.  Then exact phrase queries will only match when the terms
+        /// occur with no intervening stop words.</item>
+        /// 
+        /// </list>
+        /// </summary>
+        /// <value> the distance from the prior term </value>
+        /// <seealso cref="Lucene.Net.Index.TermPositions">
+        /// </seealso>
+        public virtual int PositionIncrement
+        {
+            set
+            {
+                if (value < 0)
+                    throw new System.ArgumentException("Increment must be zero or greater: " + value);
+                this.positionIncrement = value;
+            }
+            get { return positionIncrement; }
+        }
 
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()"/>
-	    /// directly instead.  If you really need a
-	    /// String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    public string Term
-	    {
-	        get
-	        {
-	            InitTermBuffer();
-	            return new System.String(termBuffer, 0, termLength);
-	        }
-	    }
+        /// <summary>Returns the Token's term text.
+        /// 
+        /// This method has a performance penalty
+        /// because the text is stored internally in a char[].  If
+        /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()"/>
+        /// directly instead.  If you really need a
+        /// String, use this method, which is nothing more than
+        /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+        /// </summary>
+        public string Term
+        {
+            get
+            {
+                InitTermBuffer();
+                return new System.String(termBuffer, 0, termLength);
+            }
+        }
 
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy</param>
-		/// <param name="offset">the index in the buffer of the first character to copy</param>
-		/// <param name="length">the number of characters to copy</param>
-		public void  SetTermBuffer(char[] buffer, int offset, int length)
-		{
-			GrowTermBuffer(length);
-			Array.Copy(buffer, offset, termBuffer, 0, length);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		public void  SetTermBuffer(System.String buffer)
-		{
-			int length = buffer.Length;
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public void  SetTermBuffer(System.String buffer, int offset, int length)
-		{
-			System.Diagnostics.Debug.Assert(offset <= buffer.Length);
-			System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		public char[] TermBuffer()
-		{
-			InitTermBuffer();
-			return termBuffer;
-		}
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		public virtual char[] ResizeTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
+        /// <summary>Copies the contents of buffer, starting at offset for
+        /// length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy</param>
+        /// <param name="offset">the index in the buffer of the first character to copy</param>
+        /// <param name="length">the number of characters to copy</param>
+        public void  SetTermBuffer(char[] buffer, int offset, int length)
+        {
+            GrowTermBuffer(length);
+            Array.Copy(buffer, offset, termBuffer, 0, length);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        public void  SetTermBuffer(System.String buffer)
+        {
+            int length = buffer.Length;
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Copies the contents of buffer, starting at offset and continuing
+        /// for length characters, into the termBuffer array.
+        /// </summary>
+        /// <param name="buffer">the buffer to copy
+        /// </param>
+        /// <param name="offset">the index in the buffer of the first character to copy
+        /// </param>
+        /// <param name="length">the number of characters to copy
+        /// </param>
+        public void  SetTermBuffer(System.String buffer, int offset, int length)
+        {
+            System.Diagnostics.Debug.Assert(offset <= buffer.Length);
+            System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
+            GrowTermBuffer(length);
+            TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+            termLength = length;
+        }
+        
+        /// <summary>Returns the internal termBuffer character array which
+        /// you can then directly alter.  If the array is too
+        /// small for your token, use <see cref="ResizeTermBuffer(int)" />
+        /// to increase it.  After
+        /// altering the buffer be sure to call <see cref="SetTermLength" />
+        /// to record the number of valid
+        /// characters that were placed into the termBuffer. 
+        /// </summary>
+        public char[] TermBuffer()
+        {
+            InitTermBuffer();
+            return termBuffer;
+        }
+        
+        /// <summary>Grows the termBuffer to at least size newSize, preserving the
+        /// existing content. Note: If the next operation is to change
+        /// the contents of the term buffer use
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="SetTermBuffer(String)" />, or
+        /// <see cref="SetTermBuffer(String, int, int)" />
+        /// to optimally combine the resize with the setting of the termBuffer.
+        /// </summary>
+        /// <param name="newSize">minimum size of the new termBuffer
+        /// </param>
+        /// <returns> newly created termBuffer with length >= newSize
+        /// </returns>
+        public virtual char[] ResizeTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
                 termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE ? MIN_BUFFER_SIZE : newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation and preserve content
-					var newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-					Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
-					termBuffer = newCharBuffer;
-				}
-			}
-			return termBuffer;
-		}
-		
-		/// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
-		/// its always used in places that set the content 
-		/// </summary>
-		/// <param name="newSize">minimum size of the buffer
-		/// </param>
-		private void  GrowTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE    
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation:
-					termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-				}
-			}
-		}
-		
-		private void  InitTermBuffer()
-		{
-			if (termBuffer == null)
-			{
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation and preserve content
+                    var newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                    Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
+                    termBuffer = newCharBuffer;
+                }
+            }
+            return termBuffer;
+        }
+        
+        /// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
+        /// its always used in places that set the content 
+        /// </summary>
+        /// <param name="newSize">minimum size of the buffer
+        /// </param>
+        private void  GrowTermBuffer(int newSize)
+        {
+            if (termBuffer == null)
+            {
+                // The buffer is always at least MIN_BUFFER_SIZE    
+                termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+            }
+            else
+            {
+                if (termBuffer.Length < newSize)
+                {
+                    // Not big enough; create a new array with slight
+                    // over allocation:
+                    termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                }
+            }
+        }
+        
+        private void  InitTermBuffer()
+        {
+            if (termBuffer == null)
+            {
                 termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
                 termLength = 0;
-			}
-		}
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		public int TermLength()
-		{
-			InitTermBuffer();
-			return termLength;
-		}
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		public void  SetTermLength(int length)
-		{
-			InitTermBuffer();
-			if (length > termBuffer.Length)
-				throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
-			termLength = length;
-		}
+            }
+        }
+        
+        /// <summary>Return number of valid characters (length of the term)
+        /// in the termBuffer array. 
+        /// </summary>
+        public int TermLength()
+        {
+            InitTermBuffer();
+            return termLength;
+        }
+        
+        /// <summary>Set number of valid characters (length of the term) in
+        /// the termBuffer array. Use this to truncate the termBuffer
+        /// or to synchronize with external manipulation of the termBuffer.
+        /// Note: to grow the size of the array,
+        /// use <see cref="ResizeTermBuffer(int)" /> first.
+        /// </summary>
+        /// <param name="length">the truncated length
+        /// </param>
+        public void  SetTermLength(int length)
+        {
+            InitTermBuffer();
+            if (length > termBuffer.Length)
+                throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
+            termLength = length;
+        }
 
-	    /// <summary>Gets or sets this Token's starting offset, the position of the first character
-	    /// corresponding to this token in the source text.
-	    /// Note that the difference between endOffset() and startOffset() may not be
-	    /// equal to <see cref="TermLength"/>, as the term text may have been altered by a
-	    /// stemmer or some other filter. 
-	    /// </summary>
-	    public virtual int StartOffset
-	    {
-	        get { return startOffset; }
-	        set { this.startOffset = value; }
-	    }
+        /// <summary>Gets or sets this Token's starting offset, the position of the first character
+        /// corresponding to this token in the source text.
+        /// Note that the difference between endOffset() and startOffset() may not be
+        /// equal to <see cref="TermLength"/>, as the term text may have been altered by a
+        /// stemmer or some other filter. 
+        /// </summary>
+        public virtual int StartOffset
+        {
+            get { return startOffset; }
+            set { this.startOffset = value; }
+        }
 
-	    /// <summary>Gets or sets this Token's ending offset, one greater than the position of the
-	    /// last character corresponding to this token in the source text. The length
-	    /// of the token in the source text is (endOffset - startOffset). 
-	    /// </summary>
-	    public virtual int EndOffset
-	    {
-	        get { return endOffset; }
-	        set { this.endOffset = value; }
-	    }
+        /// <summary>Gets or sets this Token's ending offset, one greater than the position of the
+        /// last character corresponding to this token in the source text. The length
+        /// of the token in the source text is (endOffset - startOffset). 
+        /// </summary>
+        public virtual int EndOffset
+        {
+            get { return endOffset; }
+            set { this.endOffset = value; }
+        }
 
-	    /// <summary>Set the starting and ending offset.
-		/// See StartOffset() and EndOffset()
-		/// </summary>
-		public virtual void  SetOffset(int startOffset, int endOffset)
-		{
-			this.startOffset = startOffset;
-			this.endOffset = endOffset;
-		}
+        /// <summary>Set the starting and ending offset.
+        /// See StartOffset() and EndOffset()
+        /// </summary>
+        public virtual void  SetOffset(int startOffset, int endOffset)
+        {
+            this.startOffset = startOffset;
+            this.endOffset = endOffset;
+        }
 
-	    /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
-	    public string Type
-	    {
-	        get { return type; }
-	        set { this.type = value; }
-	    }
+        /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+        public string Type
+        {
+            get { return type; }
+            set { this.type = value; }
+        }
 
-	    /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
-	    /// <p/>
-	    /// 
-	    /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="Type()" />, although they do share similar purposes.
-	    /// The flags can be used to encode information about the token for use by other <see cref="TokenFilter"/>s.
-	    /// 
-	    /// 
-	    /// </summary>
-	    /// <value> The bits </value>
-	    public virtual int Flags
-	    {
-	        get { return flags; }
-	        set { flags = value; }
-	    }
+        /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+        /// <p/>
+        /// 
+        /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="Type()" />, although they do share similar purposes.
+        /// The flags can be used to encode information about the token for use by other <see cref="TokenFilter"/>s.
+        /// 
+        /// 
+        /// </summary>
+        /// <value> The bits </value>
+        public virtual int Flags
+        {
+            get { return flags; }
+            set { flags = value; }
+        }
 
-	    /// <summary> Returns this Token's payload.</summary>
-	    public virtual Payload Payload
-	    {
-	        get { return payload; }
-	        set { payload = value; }
-	    }
+        /// <summary> Returns this Token's payload.</summary>
+        public virtual Payload Payload
+        {
+            get { return payload; }
+            set { payload = value; }
+        }
 
-	    public override String ToString()
-		{
-			var sb = new System.Text.StringBuilder();
-			sb.Append('(');
-			InitTermBuffer();
-			if (termBuffer == null)
-				sb.Append("null");
-			else
-				sb.Append(termBuffer, 0, termLength);
-			sb.Append(',').Append(startOffset).Append(',').Append(endOffset);
-			if (!type.Equals("word"))
-				sb.Append(",type=").Append(type);
-			if (positionIncrement != 1)
-				sb.Append(",posIncr=").Append(positionIncrement);
-			sb.Append(')');
-			return sb.ToString();
-		}
-		
-		/// <summary>Resets the term text, payload, flags, and positionIncrement,
-		/// startOffset, endOffset and token type to default.
-		/// </summary>
-		public override void  Clear()
-		{
-			payload = null;
-			// Leave termBuffer to allow re-use
-			termLength = 0;
-			positionIncrement = 1;
-			flags = 0;
-			startOffset = endOffset = 0;
-			type = DEFAULT_TYPE;
-		}
-		
-		public override System.Object Clone()
-		{
-			var t = (Token) base.Clone();
-			// Do a deep clone
-			if (termBuffer != null)
-			{
-				t.termBuffer = new char[termBuffer.Length];
-				termBuffer.CopyTo(t.termBuffer, 0);
-			}
-			if (payload != null)
-			{
-				t.payload = (Payload) payload.Clone();
-			}
-			return t;
-		}
-		
-		/// <summary>Makes a clone, but replaces the term buffer &amp;
-		/// start/end offset in the process.  This is more
-		/// efficient than doing a full clone (and then calling
-		/// setTermBuffer) because it saves a wasted copy of the old
-		/// termBuffer. 
-		/// </summary>
-		public virtual Token Clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
-		{
-			var t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset)
-			        	{positionIncrement = positionIncrement, flags = flags, type = type};
-			if (payload != null)
-				t.payload = (Payload) payload.Clone();
-			return t;
-		}
-		
-		public  override bool Equals(Object obj)
-		{
-			if (obj == this)
-				return true;
+        public override String ToString()
+        {
+            var sb = new System.Text.StringBuilder();
+            sb.Append('(');
+            InitTermBuffer();
+            if (termBuffer == null)
+                sb.Append("null");
+            else
+                sb.Append(termBuffer, 0, termLength);
+            sb.Append(',').Append(startOffset).Append(',').Append(endOffset);
+            if (!type.Equals("word"))
+                sb.Append(",type=").Append(type);
+            if (positionIncrement != 1)
+                sb.Append(",posIncr=").Append(positionIncrement);
+            sb.Append(')');
+            return sb.ToString();
+        }
+        
+        /// <summary>Resets the term text, payload, flags, and positionIncrement,
+        /// startOffset, endOffset and token type to default.
+        /// </summary>
+        public override void  Clear()
+        {
+            payload = null;
+            // Leave termBuffer to allow re-use
+            termLength = 0;
+            positionIncrement = 1;
+            flags = 0;
+            startOffset = endOffset = 0;
+            type = DEFAULT_TYPE;
+        }
+        
+        public override System.Object Clone()
+        {
+            var t = (Token) base.Clone();
+            // Do a deep clone
+            if (termBuffer != null)
+            {
+                t.termBuffer = new char[termBuffer.Length];
+                termBuffer.CopyTo(t.termBuffer, 0);
+            }
+            if (payload != null)
+            {
+                t.payload = (Payload) payload.Clone();
+            }
+            return t;
+        }
+        
+        /// <summary>Makes a clone, but replaces the term buffer &amp;
+        /// start/end offset in the process.  This is more
+        /// efficient than doing a full clone (and then calling
+        /// setTermBuffer) because it saves a wasted copy of the old
+        /// termBuffer. 
+        /// </summary>
+        public virtual Token Clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+        {
+            var t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset)
+                        {positionIncrement = positionIncrement, flags = flags, type = type};
+            if (payload != null)
+                t.payload = (Payload) payload.Clone();
+            return t;
+        }
+        
+        public  override bool Equals(Object obj)
+        {
+            if (obj == this)
+                return true;
 
-			var other = obj as Token;
-			if (other == null)
-				return false;
-			
-			InitTermBuffer();
-			other.InitTermBuffer();
+            var other = obj as Token;
+            if (other == null)
+                return false;
+            
+            InitTermBuffer();
+            other.InitTermBuffer();
 
-			if (termLength == other.termLength && startOffset == other.startOffset && endOffset == other.endOffset &&
-			    flags == other.flags && positionIncrement == other.positionIncrement && SubEqual(type, other.type) &&
-			    SubEqual(payload, other.payload))
-			{
-				for (int i = 0; i < termLength; i++)
-					if (termBuffer[i] != other.termBuffer[i])
-						return false;
-				return true;
-			}
-			return false;
-		}
-		
-		private bool SubEqual(System.Object o1, System.Object o2)
-		{
-			if (o1 == null)
-				return o2 == null;
-			return o1.Equals(o2);
-		}
+            if (termLength == other.termLength && startOffset == other.startOffset && endOffset == other.endOffset &&
+                flags == other.flags && positionIncrement == other.positionIncrement && SubEqual(type, other.type) &&
+                SubEqual(payload, other.payload))
+            {
+                for (int i = 0; i < termLength; i++)
+                    if (termBuffer[i] != other.termBuffer[i])
+                        return false;
+                return true;
+            }
+            return false;
+        }
+        
+        private bool SubEqual(System.Object o1, System.Object o2)
+        {
+            if (o1 == null)
+                return o2 == null;
+            return o1.Equals(o2);
+        }
 
-		public override int GetHashCode()
-		{
-			InitTermBuffer();
-			int code = termLength;
-			code = code * 31 + startOffset;
-			code = code * 31 + endOffset;
-			code = code * 31 + flags;
-			code = code * 31 + positionIncrement;
-			code = code * 31 + type.GetHashCode();
-			code = (payload == null?code:code * 31 + payload.GetHashCode());
-			code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
-			return code;
-		}
-		
-		// like clear() but doesn't clear termBuffer/text
-		private void  ClearNoTermBuffer()
-		{
-			payload = null;
-			positionIncrement = 1;
-			flags = 0;
-			startOffset = endOffset = 0;
-			type = DEFAULT_TYPE;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />,
-		/// <see cref="Type" />
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
-		{
-			ClearNoTermBuffer();
-			payload = null;
-			positionIncrement = 1;
-			SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = newType;
-			return this;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />
-		/// <see cref="Type" /> on Token.DEFAULT_TYPE
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
-		{
-			ClearNoTermBuffer();
-			SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = DEFAULT_TYPE;
-			return this;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(String)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />
-		/// <see cref="Type" />
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset, System.String newType)
-		{
-			ClearNoTermBuffer();
-			SetTermBuffer(newTerm);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = newType;
-			return this;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(String, int, int)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />
-		/// <see cref="Type" />
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
-		{
-			ClearNoTermBuffer();
-			SetTermBuffer(newTerm, newTermOffset, newTermLength);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = newType;
-			return this;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(String)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />
-		/// <see cref="Type" /> on Token.DEFAULT_TYPE
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset)
-		{
-			ClearNoTermBuffer();
-			SetTermBuffer(newTerm);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = DEFAULT_TYPE;
-			return this;
-		}
-		
-		/// <summary>Shorthand for calling <see cref="Clear" />,
-		/// <see cref="SetTermBuffer(String, int, int)" />,
-		/// <see cref="StartOffset" />,
-		/// <see cref="EndOffset" />
-		/// <see cref="Type" /> on Token.DEFAULT_TYPE
-		/// </summary>
-		/// <returns> this Token instance 
-		/// </returns>
-		public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
-		{
-			ClearNoTermBuffer();
-			SetTermBuffer(newTerm, newTermOffset, newTermLength);
-			startOffset = newStartOffset;
-			endOffset = newEndOffset;
-			type = DEFAULT_TYPE;
-			return this;
-		}
-		
-		/// <summary> Copy the prototype token's fields into this one. Note: Payloads are shared.</summary>
-		/// <param name="prototype">
-		/// </param>
-		public virtual void  Reinit(Token prototype)
-		{
-			prototype.InitTermBuffer();
-			SetTermBuffer(prototype.termBuffer, 0, prototype.termLength);
-			positionIncrement = prototype.positionIncrement;
-			flags = prototype.flags;
-			startOffset = prototype.startOffset;
-			endOffset = prototype.endOffset;
-			type = prototype.type;
-			payload = prototype.payload;
-		}
-		
-		/// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
-		/// <param name="prototype">
-		/// </param>
-		/// <param name="newTerm">
-		/// </param>
-		public virtual void  Reinit(Token prototype, System.String newTerm)
-		{
-			SetTermBuffer(newTerm);
-			positionIncrement = prototype.positionIncrement;
-			flags = prototype.flags;
-			startOffset = prototype.startOffset;
-			endOffset = prototype.endOffset;
-			type = prototype.type;
-			payload = prototype.payload;
-		}
-		
-		/// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
-		/// <param name="prototype">
-		/// </param>
-		/// <param name="newTermBuffer">
-		/// </param>
-		/// <param name="offset">
-		/// </param>
-		/// <param name="length">
-		/// </param>
-		public virtual void  Reinit(Token prototype, char[] newTermBuffer, int offset, int length)
-		{
-			SetTermBuffer(newTermBuffer, offset, length);
-			positionIncrement = prototype.positionIncrement;
-			flags = prototype.flags;
-			startOffset = prototype.startOffset;
-			endOffset = prototype.endOffset;
-			type = prototype.type;
-			payload = prototype.payload;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			if (target is Token)
-			{
-				var to = (Token) target;
-				to.Reinit(this);
-				// reinit shares the payload, so clone it:
-				if (payload != null)
-				{
-					to.payload = (Payload) payload.Clone();
-				}
-			}
-			else
-			{
-				InitTermBuffer();
-				((ITermAttribute) target).SetTermBuffer(termBuffer, 0, termLength);
-				((IOffsetAttribute) target).SetOffset(startOffset, endOffset);
-				((IPositionIncrementAttribute) target).PositionIncrement = positionIncrement;
-				((IPayloadAttribute) target).Payload = (payload == null)?null:(Payload) payload.Clone();
-				((IFlagsAttribute) target).Flags = flags;
-				((ITypeAttribute) target).Type = type;
-			}
-		}
+        public override int GetHashCode()
+        {
+            InitTermBuffer();
+            int code = termLength;
+            code = code * 31 + startOffset;
+            code = code * 31 + endOffset;
+            code = code * 31 + flags;
+            code = code * 31 + positionIncrement;
+            code = code * 31 + type.GetHashCode();
+            code = (payload == null?code:code * 31 + payload.GetHashCode());
+            code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
+            return code;
+        }
+        
+        // like clear() but doesn't clear termBuffer/text
+        private void  ClearNoTermBuffer()
+        {
+            payload = null;
+            positionIncrement = 1;
+            flags = 0;
+            startOffset = endOffset = 0;
+            type = DEFAULT_TYPE;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />,
+        /// <see cref="Type" />
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
+        {
+            ClearNoTermBuffer();
+            payload = null;
+            positionIncrement = 1;
+            SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = newType;
+            return this;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(char[], int, int)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />
+        /// <see cref="Type" /> on Token.DEFAULT_TYPE
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+        {
+            ClearNoTermBuffer();
+            SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = DEFAULT_TYPE;
+            return this;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(String)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />
+        /// <see cref="Type" />
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset, System.String newType)
+        {
+            ClearNoTermBuffer();
+            SetTermBuffer(newTerm);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = newType;
+            return this;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(String, int, int)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />
+        /// <see cref="Type" />
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
+        {
+            ClearNoTermBuffer();
+            SetTermBuffer(newTerm, newTermOffset, newTermLength);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = newType;
+            return this;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(String)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />
+        /// <see cref="Type" /> on Token.DEFAULT_TYPE
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset)
+        {
+            ClearNoTermBuffer();
+            SetTermBuffer(newTerm);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = DEFAULT_TYPE;
+            return this;
+        }
+        
+        /// <summary>Shorthand for calling <see cref="Clear" />,
+        /// <see cref="SetTermBuffer(String, int, int)" />,
+        /// <see cref="StartOffset" />,
+        /// <see cref="EndOffset" />
+        /// <see cref="Type" /> on Token.DEFAULT_TYPE
+        /// </summary>
+        /// <returns> this Token instance 
+        /// </returns>
+        public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+        {
+            ClearNoTermBuffer();
+            SetTermBuffer(newTerm, newTermOffset, newTermLength);
+            startOffset = newStartOffset;
+            endOffset = newEndOffset;
+            type = DEFAULT_TYPE;
+            return this;
+        }
+        
+        /// <summary> Copy the prototype token's fields into this one. Note: Payloads are shared.</summary>
+        /// <param name="prototype">
+        /// </param>
+        public virtual void  Reinit(Token prototype)
+        {
+            prototype.InitTermBuffer();
+            SetTermBuffer(prototype.termBuffer, 0, prototype.termLength);
+            positionIncrement = prototype.positionIncrement;
+            flags = prototype.flags;
+            startOffset = prototype.startOffset;
+            endOffset = prototype.endOffset;
+            type = prototype.type;
+            payload = prototype.payload;
+        }
+        
+        /// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
+        /// <param name="prototype">
+        /// </param>
+        /// <param name="newTerm">
+        /// </param>
+        public virtual void  Reinit(Token prototype, System.String newTerm)
+        {
+            SetTermBuffer(newTerm);
+            positionIncrement = prototype.positionIncrement;
+            flags = prototype.flags;
+            startOffset = prototype.startOffset;
+            endOffset = prototype.endOffset;
+            type = prototype.type;
+            payload = prototype.payload;
+        }
+        
+        /// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
+        /// <param name="prototype">
+        /// </param>
+        /// <param name="newTermBuffer">
+        /// </param>
+        /// <param name="offset">
+        /// </param>
+        /// <param name="length">
+        /// </param>
+        public virtual void  Reinit(Token prototype, char[] newTermBuffer, int offset, int length)
+        {
+            SetTermBuffer(newTermBuffer, offset, length);
+            positionIncrement = prototype.positionIncrement;
+            flags = prototype.flags;
+            startOffset = prototype.startOffset;
+            endOffset = prototype.endOffset;
+            type = prototype.type;
+            payload = prototype.payload;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            if (target is Token)
+            {
+                var to = (Token) target;
+                to.Reinit(this);
+                // reinit shares the payload, so clone it:
+                if (payload != null)
+                {
+                    to.payload = (Payload) payload.Clone();
+                }
+            }
+            else
+            {
+                InitTermBuffer();
+                ((ITermAttribute) target).SetTermBuffer(termBuffer, 0, termLength);
+                ((IOffsetAttribute) target).SetOffset(startOffset, endOffset);
+                ((IPositionIncrementAttribute) target).PositionIncrement = positionIncrement;
+                ((IPayloadAttribute) target).Payload = (payload == null)?null:(Payload) payload.Clone();
+                ((IFlagsAttribute) target).Flags = flags;
+                ((ITypeAttribute) target).Type = type;
+            }
+        }
        
         ///<summary>
         /// Convenience factory that returns <c>Token</c> as implementation for the basic
@@ -807,8 +807,8 @@ namespace Lucene.Net.Analysis
         /// attributes.
         /// @since 3.0
         /// </summary>
-	    public static AttributeSource.AttributeFactory TOKEN_ATTRIBUTE_FACTORY =
-	        new TokenAttributeFactory(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
+        public static AttributeSource.AttributeFactory TOKEN_ATTRIBUTE_FACTORY =
+            new TokenAttributeFactory(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
   
         /// <summary>
         /// <b>Expert</b>: Creates an AttributeFactory returning {@link Token} as instance for the basic attributes
@@ -839,8 +839,8 @@ namespace Lucene.Net.Analysis
             {
                 if (this == other) return true;
 
-            	var af = other as TokenAttributeFactory;
-            	return af != null && _delegateFactory.Equals(af._delegateFactory);
+                var af = other as TokenAttributeFactory;
+                return af != null && _delegateFactory.Equals(af._delegateFactory);
             }
 
             public override int GetHashCode()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/TokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TokenFilter.cs b/src/core/Analysis/TokenFilter.cs
index 7483c82..2cd1093 100644
--- a/src/core/Analysis/TokenFilter.cs
+++ b/src/core/Analysis/TokenFilter.cs
@@ -17,35 +17,35 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> A TokenFilter is a TokenStream whose input is another TokenStream.
-	/// <p/>
+    
+    /// <summary> A TokenFilter is a TokenStream whose input is another TokenStream.
+    /// <p/>
     /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />.
-	/// 
-	/// </summary>
-	/// <seealso cref="TokenStream">
-	/// </seealso>
-	public abstract class TokenFilter:TokenStream
-	{
-		/// <summary>The source of tokens for this filter. </summary>
-		protected internal TokenStream input;
+    /// 
+    /// </summary>
+    /// <seealso cref="TokenStream">
+    /// </seealso>
+    public abstract class TokenFilter:TokenStream
+    {
+        /// <summary>The source of tokens for this filter. </summary>
+        protected internal TokenStream input;
 
-	    private bool isDisposed;
+        private bool isDisposed;
 
-	    /// <summary>Construct a token stream filtering the given input. </summary>
-		protected internal TokenFilter(TokenStream input):base(input)
-		{
-			this.input = input;
-		}
-		
-		/// <summary>Performs end-of-stream operations, if any, and calls then <c>end()</c> on the
-		/// input TokenStream.<p/> 
-		/// <b>NOTE:</b> Be sure to call <c>super.end()</c> first when overriding this method.
-		/// </summary>
-		public override void  End()
-		{
-			input.End();
-		}
+        /// <summary>Construct a token stream filtering the given input. </summary>
+        protected internal TokenFilter(TokenStream input):base(input)
+        {
+            this.input = input;
+        }
+        
+        /// <summary>Performs end-of-stream operations, if any, and calls then <c>end()</c> on the
+        /// input TokenStream.<p/> 
+        /// <b>NOTE:</b> Be sure to call <c>super.end()</c> first when overriding this method.
+        /// </summary>
+        public override void  End()
+        {
+            input.End();
+        }
 
         protected override void Dispose(bool disposing)
         {
@@ -62,11 +62,11 @@ namespace Lucene.Net.Analysis
             //input = null;
             isDisposed = true;
         }
-		
-		/// <summary>Reset the filter as well as the input TokenStream. </summary>
-		public override void  Reset()
-		{
-			input.Reset();
-		}
-	}
+        
+        /// <summary>Reset the filter as well as the input TokenStream. </summary>
+        public override void  Reset()
+        {
+            input.Reset();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/TokenStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TokenStream.cs b/src/core/Analysis/TokenStream.cs
index c624696..4a44609 100644
--- a/src/core/Analysis/TokenStream.cs
+++ b/src/core/Analysis/TokenStream.cs
@@ -24,139 +24,139 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> A <c>TokenStream</c> enumerates the sequence of tokens, either from
-	/// <see cref="Field" />s of a <see cref="Document" /> or from query text.
-	/// <p/>
-	/// This is an abstract class. Concrete subclasses are:
-	/// <list type="bullet">
-	/// <item><see cref="Tokenizer" />, a <c>TokenStream</c> whose input is a Reader; and</item>
-	/// <item><see cref="TokenFilter" />, a <c>TokenStream</c> whose input is another
-	/// <c>TokenStream</c>.</item>
-	/// </list>
-	/// A new <c>TokenStream</c> API has been introduced with Lucene 2.9. This API
-	/// has moved from being <see cref="Token" /> based to <see cref="IAttribute" /> based. While
-	/// <see cref="Token" /> still exists in 2.9 as a convenience class, the preferred way
-	/// to store the information of a <see cref="Token" /> is to use <see cref="Util.Attribute" />s.
-	/// <p/>
-	/// <c>TokenStream</c> now extends <see cref="AttributeSource" />, which provides
-	/// access to all of the token <see cref="IAttribute" />s for the <c>TokenStream</c>.
-	/// Note that only one instance per <see cref="Util.Attribute" /> is created and reused
-	/// for every token. This approach reduces object creation and allows local
-	/// caching of references to the <see cref="Util.Attribute" />s. See
-	/// <see cref="IncrementToken()" /> for further details.
-	/// <p/>
-	/// <b>The workflow of the new <c>TokenStream</c> API is as follows:</b>
-	/// <list type="bullet">
-	/// <item>Instantiation of <c>TokenStream</c>/<see cref="TokenFilter" />s which add/get
-	/// attributes to/from the <see cref="AttributeSource" />.</item>
-	/// <item>The consumer calls <see cref="TokenStream.Reset()" />.</item>
-	/// <item>The consumer retrieves attributes from the stream and stores local
-	/// references to all attributes it wants to access</item>
-	/// <item>The consumer calls <see cref="IncrementToken()" /> until it returns false and
-	/// consumes the attributes after each call.</item>
-	/// <item>The consumer calls <see cref="End()" /> so that any end-of-stream operations
-	/// can be performed.</item>
-	/// <item>The consumer calls <see cref="Close()" /> to release any resource when finished
-	/// using the <c>TokenStream</c></item>
-	/// </list>
-	/// To make sure that filters and consumers know which attributes are available,
-	/// the attributes must be added during instantiation. Filters and consumers are
-	/// not required to check for availability of attributes in
-	/// <see cref="IncrementToken()" />.
-	/// <p/>
-	/// You can find some example code for the new API in the analysis package level
-	/// Javadoc.
-	/// <p/>
-	/// Sometimes it is desirable to capture a current state of a <c>TokenStream</c>
-	/// , e. g. for buffering purposes (see <see cref="CachingTokenFilter" />,
-	/// <see cref="TeeSinkTokenFilter" />). For this usecase
-	/// <see cref="AttributeSource.CaptureState" /> and <see cref="AttributeSource.RestoreState" />
-	/// can be used.
-	/// </summary>
-	public abstract class TokenStream : AttributeSource, IDisposable
-	{
-		/// <summary> A TokenStream using the default attribute factory.</summary>
-		protected internal TokenStream()
-		{ }
-		
-		/// <summary> A TokenStream that uses the same attributes as the supplied one.</summary>
+    
+    /// <summary> A <c>TokenStream</c> enumerates the sequence of tokens, either from
+    /// <see cref="Field" />s of a <see cref="Document" /> or from query text.
+    /// <p/>
+    /// This is an abstract class. Concrete subclasses are:
+    /// <list type="bullet">
+    /// <item><see cref="Tokenizer" />, a <c>TokenStream</c> whose input is a Reader; and</item>
+    /// <item><see cref="TokenFilter" />, a <c>TokenStream</c> whose input is another
+    /// <c>TokenStream</c>.</item>
+    /// </list>
+    /// A new <c>TokenStream</c> API has been introduced with Lucene 2.9. This API
+    /// has moved from being <see cref="Token" /> based to <see cref="IAttribute" /> based. While
+    /// <see cref="Token" /> still exists in 2.9 as a convenience class, the preferred way
+    /// to store the information of a <see cref="Token" /> is to use <see cref="Util.Attribute" />s.
+    /// <p/>
+    /// <c>TokenStream</c> now extends <see cref="AttributeSource" />, which provides
+    /// access to all of the token <see cref="IAttribute" />s for the <c>TokenStream</c>.
+    /// Note that only one instance per <see cref="Util.Attribute" /> is created and reused
+    /// for every token. This approach reduces object creation and allows local
+    /// caching of references to the <see cref="Util.Attribute" />s. See
+    /// <see cref="IncrementToken()" /> for further details.
+    /// <p/>
+    /// <b>The workflow of the new <c>TokenStream</c> API is as follows:</b>
+    /// <list type="bullet">
+    /// <item>Instantiation of <c>TokenStream</c>/<see cref="TokenFilter" />s which add/get
+    /// attributes to/from the <see cref="AttributeSource" />.</item>
+    /// <item>The consumer calls <see cref="TokenStream.Reset()" />.</item>
+    /// <item>The consumer retrieves attributes from the stream and stores local
+    /// references to all attributes it wants to access</item>
+    /// <item>The consumer calls <see cref="IncrementToken()" /> until it returns false and
+    /// consumes the attributes after each call.</item>
+    /// <item>The consumer calls <see cref="End()" /> so that any end-of-stream operations
+    /// can be performed.</item>
+    /// <item>The consumer calls <see cref="Close()" /> to release any resource when finished
+    /// using the <c>TokenStream</c></item>
+    /// </list>
+    /// To make sure that filters and consumers know which attributes are available,
+    /// the attributes must be added during instantiation. Filters and consumers are
+    /// not required to check for availability of attributes in
+    /// <see cref="IncrementToken()" />.
+    /// <p/>
+    /// You can find some example code for the new API in the analysis package level
+    /// Javadoc.
+    /// <p/>
+    /// Sometimes it is desirable to capture a current state of a <c>TokenStream</c>
+    /// , e. g. for buffering purposes (see <see cref="CachingTokenFilter" />,
+    /// <see cref="TeeSinkTokenFilter" />). For this usecase
+    /// <see cref="AttributeSource.CaptureState" /> and <see cref="AttributeSource.RestoreState" />
+    /// can be used.
+    /// </summary>
+    public abstract class TokenStream : AttributeSource, IDisposable
+    {
+        /// <summary> A TokenStream using the default attribute factory.</summary>
+        protected internal TokenStream()
+        { }
+        
+        /// <summary> A TokenStream that uses the same attributes as the supplied one.</summary>
         protected internal TokenStream(AttributeSource input)
             : base(input)
-		{ }
-		
-		/// <summary> A TokenStream using the supplied AttributeFactory for creating new <see cref="IAttribute" /> instances.</summary>
+        { }
+        
+        /// <summary> A TokenStream using the supplied AttributeFactory for creating new <see cref="IAttribute" /> instances.</summary>
         protected internal TokenStream(AttributeFactory factory)
             : base(factory)
-		{ }
+        { }
 
-	    /// <summary> Consumers (i.e., <see cref="IndexWriter" />) use this method to advance the stream to
-	    /// the next token. Implementing classes must implement this method and update
-	    /// the appropriate <see cref="Util.Attribute" />s with the attributes of the next
-	    /// token.
-	    /// 
-	    /// The producer must make no assumptions about the attributes after the
-	    /// method has been returned: the caller may arbitrarily change it. If the
-	    /// producer needs to preserve the state for subsequent calls, it can use
-	    /// <see cref="AttributeSource.CaptureState" /> to create a copy of the current attribute state.
-	    /// 
-	    /// This method is called for every token of a document, so an efficient
-	    /// implementation is crucial for good performance. To avoid calls to
-	    /// <see cref="AttributeSource.AddAttribute{T}()" /> and <see cref="AttributeSource.GetAttribute{T}()" />,
-	    /// references to all <see cref="Util.Attribute" />s that this stream uses should be
-	    /// retrieved during instantiation.
-	    /// 
-	    /// To ensure that filters and consumers know which attributes are available,
-	    /// the attributes must be added during instantiation. Filters and consumers
-	    /// are not required to check for availability of attributes in
-	    /// <see cref="IncrementToken()" />.
-	    /// 
-	    /// </summary>
-	    /// <returns> false for end of stream; true otherwise</returns>
-	    public abstract bool IncrementToken();
-		
-		/// <summary> This method is called by the consumer after the last token has been
-		/// consumed, after <see cref="IncrementToken" /> returned <c>false</c>
-		/// (using the new <c>TokenStream</c> API). Streams implementing the old API
-		/// should upgrade to use this feature.
-		/// <p/>
-		/// This method can be used to perform any end-of-stream operations, such as
-		/// setting the final offset of a stream. The final offset of a stream might
-		/// differ from the offset of the last token eg in case one or more whitespaces
-		/// followed after the last token, but a <see cref="WhitespaceTokenizer" /> was used.
-		/// 
-		/// </summary>
-		/// <throws>  IOException </throws>
-		public virtual void  End()
-		{
-			// do nothing by default
-		}
-		
-		/// <summary> Resets this stream to the beginning. This is an optional operation, so
-		/// subclasses may or may not implement this method. <see cref="Reset()" /> is not needed for
-		/// the standard indexing process. However, if the tokens of a
-		/// <c>TokenStream</c> are intended to be consumed more than once, it is
-		/// necessary to implement <see cref="Reset()" />. Note that if your TokenStream
-		/// caches tokens and feeds them back again after a reset, it is imperative
-		/// that you clone the tokens when you store them away (on the first pass) as
-		/// well as when you return them (on future passes after <see cref="Reset()" />).
-		/// </summary>
-		public virtual void  Reset()
-		{
-		}
-		
-		/// <summary>Releases resources associated with this stream. </summary>
-		[Obsolete("Use Dispose() instead")]
-		public void  Close()
-		{
+        /// <summary> Consumers (i.e., <see cref="IndexWriter" />) use this method to advance the stream to
+        /// the next token. Implementing classes must implement this method and update
+        /// the appropriate <see cref="Util.Attribute" />s with the attributes of the next
+        /// token.
+        /// 
+        /// The producer must make no assumptions about the attributes after the
+        /// method has been returned: the caller may arbitrarily change it. If the
+        /// producer needs to preserve the state for subsequent calls, it can use
+        /// <see cref="AttributeSource.CaptureState" /> to create a copy of the current attribute state.
+        /// 
+        /// This method is called for every token of a document, so an efficient
+        /// implementation is crucial for good performance. To avoid calls to
+        /// <see cref="AttributeSource.AddAttribute{T}()" /> and <see cref="AttributeSource.GetAttribute{T}()" />,
+        /// references to all <see cref="Util.Attribute" />s that this stream uses should be
+        /// retrieved during instantiation.
+        /// 
+        /// To ensure that filters and consumers know which attributes are available,
+        /// the attributes must be added during instantiation. Filters and consumers
+        /// are not required to check for availability of attributes in
+        /// <see cref="IncrementToken()" />.
+        /// 
+        /// </summary>
+        /// <returns> false for end of stream; true otherwise</returns>
+        public abstract bool IncrementToken();
+        
+        /// <summary> This method is called by the consumer after the last token has been
+        /// consumed, after <see cref="IncrementToken" /> returned <c>false</c>
+        /// (using the new <c>TokenStream</c> API). Streams implementing the old API
+        /// should upgrade to use this feature.
+        /// <p/>
+        /// This method can be used to perform any end-of-stream operations, such as
+        /// setting the final offset of a stream. The final offset of a stream might
+        /// differ from the offset of the last token eg in case one or more whitespaces
+        /// followed after the last token, but a <see cref="WhitespaceTokenizer" /> was used.
+        /// 
+        /// </summary>
+        /// <throws>  IOException </throws>
+        public virtual void  End()
+        {
+            // do nothing by default
+        }
+        
+        /// <summary> Resets this stream to the beginning. This is an optional operation, so
+        /// subclasses may or may not implement this method. <see cref="Reset()" /> is not needed for
+        /// the standard indexing process. However, if the tokens of a
+        /// <c>TokenStream</c> are intended to be consumed more than once, it is
+        /// necessary to implement <see cref="Reset()" />. Note that if your TokenStream
+        /// caches tokens and feeds them back again after a reset, it is imperative
+        /// that you clone the tokens when you store them away (on the first pass) as
+        /// well as when you return them (on future passes after <see cref="Reset()" />).
+        /// </summary>
+        public virtual void  Reset()
+        {
+        }
+        
+        /// <summary>Releases resources associated with this stream. </summary>
+        [Obsolete("Use Dispose() instead")]
+        public void  Close()
+        {
             Dispose();
-		}
+        }
 
         public void Dispose()
         {
             Dispose(true);
         }
 
-	    protected abstract void Dispose(bool disposing);
-	}
+        protected abstract void Dispose(bool disposing);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/FlagsAttribute.cs b/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
index b5c4b7b..afff36d 100644
--- a/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/FlagsAttribute.cs
@@ -20,66 +20,66 @@ using Attribute = Lucene.Net.Util.Attribute;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> This attribute can be used to pass different flags down the tokenizer chain,
-	/// eg from one TokenFilter to another one. 
-	/// </summary>
-	[Serializable]
-	public class FlagsAttribute:Util.Attribute, IFlagsAttribute, System.ICloneable
-	{
-		private int flags = 0;
+    
+    /// <summary> This attribute can be used to pass different flags down the tokenizer chain,
+    /// eg from one TokenFilter to another one. 
+    /// </summary>
+    [Serializable]
+    public class FlagsAttribute:Util.Attribute, IFlagsAttribute, System.ICloneable
+    {
+        private int flags = 0;
 
-	    /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
-	    /// <p/>
-	    /// 
-	    /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="ITypeAttribute.Type()" />, although they do share similar purposes.
-	    /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
-	    /// 
-	    /// 
-	    /// </summary>
-	    /// <value> The bits </value>
-	    public virtual int Flags
-	    {
-	        get { return flags; }
-	        set { this.flags = value; }
-	    }
+        /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+        /// <p/>
+        /// 
+        /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="ITypeAttribute.Type()" />, although they do share similar purposes.
+        /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
+        /// 
+        /// 
+        /// </summary>
+        /// <value> The bits </value>
+        public virtual int Flags
+        {
+            get { return flags; }
+            set { this.flags = value; }
+        }
 
-	    public override void  Clear()
-		{
-			flags = 0;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (this == other)
-			{
-				return true;
-			}
-			
-			if (other is FlagsAttribute)
-			{
-				return ((FlagsAttribute) other).flags == flags;
-			}
-			
-			return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return flags;
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			IFlagsAttribute t = (IFlagsAttribute) target;
-			t.Flags = flags;
-		}
-		
-		override public System.Object Clone()
-		{
+        public override void  Clear()
+        {
+            flags = 0;
+        }
+        
+        public  override bool Equals(System.Object other)
+        {
+            if (this == other)
+            {
+                return true;
+            }
+            
+            if (other is FlagsAttribute)
+            {
+                return ((FlagsAttribute) other).flags == flags;
+            }
+            
+            return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return flags;
+        }
+        
+        public override void  CopyTo(Attribute target)
+        {
+            IFlagsAttribute t = (IFlagsAttribute) target;
+            t.Flags = flags;
+        }
+        
+        override public System.Object Clone()
+        {
             FlagsAttribute impl = new FlagsAttribute();
             impl.flags = this.flags;
             return impl;
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IFlagsAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IFlagsAttribute.cs b/src/core/Analysis/Tokenattributes/IFlagsAttribute.cs
index 24b2bea..325038e 100644
--- a/src/core/Analysis/Tokenattributes/IFlagsAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IFlagsAttribute.cs
@@ -21,21 +21,21 @@ using Tokenizer = Lucene.Net.Analysis.Tokenizer;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> This attribute can be used to pass different flags down the <see cref="Tokenizer" /> chain,
-	/// eg from one TokenFilter to another one. 
-	/// </summary>
-	public interface IFlagsAttribute:IAttribute
-	{
-	    /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
-	    /// <p/>
-	    /// 
-	    /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="ITypeAttribute.Type()" />, although they do share similar purposes.
-	    /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
-	    /// 
-	    /// 
-	    /// </summary>
-	    /// <value> The bits </value>
-	    int Flags { get; set; }
-	}
+    
+    /// <summary> This attribute can be used to pass different flags down the <see cref="Tokenizer" /> chain,
+    /// eg from one TokenFilter to another one. 
+    /// </summary>
+    public interface IFlagsAttribute:IAttribute
+    {
+        /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+        /// <p/>
+        /// 
+        /// Get the bitset for any bits that have been set.  This is completely distinct from <see cref="ITypeAttribute.Type()" />, although they do share similar purposes.
+        /// The flags can be used to encode information about the token for use by other <see cref="Lucene.Net.Analysis.TokenFilter" />s.
+        /// 
+        /// 
+        /// </summary>
+        /// <value> The bits </value>
+        int Flags { get; set; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Tokenattributes/IOffsetAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/IOffsetAttribute.cs b/src/core/Analysis/Tokenattributes/IOffsetAttribute.cs
index ffbbe02..3b00369 100644
--- a/src/core/Analysis/Tokenattributes/IOffsetAttribute.cs
+++ b/src/core/Analysis/Tokenattributes/IOffsetAttribute.cs
@@ -20,29 +20,29 @@ using Lucene.Net.Util;
 
 namespace Lucene.Net.Analysis.Tokenattributes
 {
-	
-	/// <summary> The start and end character offset of a Token. </summary>
-	public interface IOffsetAttribute : IAttribute
-	{
-	    /// <summary>Returns this Token's starting offset, the position of the first character
-	    /// corresponding to this token in the source text.
-	    /// Note that the difference between endOffset() and startOffset() may not be
-	    /// equal to termText.length(), as the term text may have been altered by a
-	    /// stemmer or some other filter. 
-	    /// </summary>
-	    int StartOffset { get; }
+    
+    /// <summary> The start and end character offset of a Token. </summary>
+    public interface IOffsetAttribute : IAttribute
+    {
+        /// <summary>Returns this Token's starting offset, the position of the first character
+        /// corresponding to this token in the source text.
+        /// Note that the difference between endOffset() and startOffset() may not be
+        /// equal to termText.length(), as the term text may have been altered by a
+        /// stemmer or some other filter. 
+        /// </summary>
+        int StartOffset { get; }
 
 
-	    /// <summary>Set the starting and ending offset.
+        /// <summary>Set the starting and ending offset.
         /// See StartOffset() and EndOffset()
         /// </summary>
-		void  SetOffset(int startOffset, int endOffset);
+        void  SetOffset(int startOffset, int endOffset);
 
 
-	    /// <summary>Returns this Token's ending offset, one greater than the position of the
-	    /// last character corresponding to this token in the source text. The length
-	    /// of the token in the source text is (endOffset - startOffset). 
-	    /// </summary>
-	    int EndOffset { get; }
-	}
+        /// <summary>Returns this Token's ending offset, one greater than the position of the
+        /// last character corresponding to this token in the source text. The length
+        /// of the token in the source text is (endOffset - startOffset). 
+        /// </summary>
+        int EndOffset { get; }
+    }
 }
\ No newline at end of file


[25/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/StopAnalyzer.cs b/src/core/Analysis/StopAnalyzer.cs
index aabe197..96a673d 100644
--- a/src/core/Analysis/StopAnalyzer.cs
+++ b/src/core/Analysis/StopAnalyzer.cs
@@ -20,57 +20,57 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Filters <see cref="LetterTokenizer" /> with <see cref="LowerCaseFilter" /> and
-	/// <see cref="StopFilter" />.
-	/// 
-	/// <a name="version"/>
-	/// <p/>
-	/// You must specify the required <see cref="Version" /> compatibility when creating
-	/// StopAnalyzer:
-	/// <list type="bullet">
-	/// <item>As of 2.9, position increments are preserved</item>
-	/// </list>
-	/// </summary>
-	
-	public sealed class StopAnalyzer:Analyzer
-	{
-		private readonly ISet<string> stopWords;
-		private readonly bool enablePositionIncrements;
+    
+    /// <summary> Filters <see cref="LetterTokenizer" /> with <see cref="LowerCaseFilter" /> and
+    /// <see cref="StopFilter" />.
+    /// 
+    /// <a name="version"/>
+    /// <p/>
+    /// You must specify the required <see cref="Version" /> compatibility when creating
+    /// StopAnalyzer:
+    /// <list type="bullet">
+    /// <item>As of 2.9, position increments are preserved</item>
+    /// </list>
+    /// </summary>
+    
+    public sealed class StopAnalyzer:Analyzer
+    {
+        private readonly ISet<string> stopWords;
+        private readonly bool enablePositionIncrements;
 
-		/// <summary>An unmodifiable set containing some common English words that are not usually useful
-		/// for searching.
-		/// </summary>
+        /// <summary>An unmodifiable set containing some common English words that are not usually useful
+        /// for searching.
+        /// </summary>
         public static ISet<string> ENGLISH_STOP_WORDS_SET;
-		
-		/// <summary> Builds an analyzer which removes words in ENGLISH_STOP_WORDS.</summary>
-		public StopAnalyzer(Version matchVersion)
-		{
-			stopWords = ENGLISH_STOP_WORDS_SET;
-			enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
-		}
+        
+        /// <summary> Builds an analyzer which removes words in ENGLISH_STOP_WORDS.</summary>
+        public StopAnalyzer(Version matchVersion)
+        {
+            stopWords = ENGLISH_STOP_WORDS_SET;
+            enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+        }
 
-		/// <summary>Builds an analyzer with the stop words from the given set.</summary>
-		public StopAnalyzer(Version matchVersion, ISet<string> stopWords)
-		{
-			this.stopWords = stopWords;
-			enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
-		}
-		
-		/// <summary> Builds an analyzer with the stop words from the given file.
-		/// 
-		/// </summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
-		/// </seealso>
-		/// <param name="matchVersion">See <a href="#version">above</a>
-		/// </param>
-		/// <param name="stopwordsFile">File to load stop words from
-		/// </param>
-		public StopAnalyzer(Version matchVersion, System.IO.FileInfo stopwordsFile)
-		{
-			stopWords = WordlistLoader.GetWordSet(stopwordsFile);
-			enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
-		}
+        /// <summary>Builds an analyzer with the stop words from the given set.</summary>
+        public StopAnalyzer(Version matchVersion, ISet<string> stopWords)
+        {
+            this.stopWords = stopWords;
+            enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+        }
+        
+        /// <summary> Builds an analyzer with the stop words from the given file.
+        /// 
+        /// </summary>
+        /// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
+        /// </seealso>
+        /// <param name="matchVersion">See <a href="#version">above</a>
+        /// </param>
+        /// <param name="stopwordsFile">File to load stop words from
+        /// </param>
+        public StopAnalyzer(Version matchVersion, System.IO.FileInfo stopwordsFile)
+        {
+            stopWords = WordlistLoader.GetWordSet(stopwordsFile);
+            enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+        }
 
         /// <summary>Builds an analyzer with the stop words from the given reader. </summary>
         /// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
@@ -86,56 +86,56 @@ namespace Lucene.Net.Analysis
         }
 
         /// <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords);
-		}
-		
-		/// <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
-		private class SavedStreams
-		{
-			public SavedStreams(StopAnalyzer enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(StopAnalyzer enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private StopAnalyzer enclosingInstance;
-			public StopAnalyzer Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal Tokenizer source;
-			internal TokenStream result;
-		}
-		
-		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			var streams = (SavedStreams) PreviousTokenStream;
-			if (streams == null)
-			{
-				streams = new SavedStreams(this) {source = new LowerCaseTokenizer(reader)};
-				streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords);
-				PreviousTokenStream = streams;
-			}
-			else
-				streams.source.Reset(reader);
-			return streams.result;
-		}
-		static StopAnalyzer()
-		{
-			{
-				var stopWords = new System.String[]{"a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"};
-				var stopSet = new CharArraySet(stopWords.Length, false);
-				stopSet.AddAll(stopWords);
-				ENGLISH_STOP_WORDS_SET = CharArraySet.UnmodifiableSet(stopSet);
-			}
-		}
-	}
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords);
+        }
+        
+        /// <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
+        private class SavedStreams
+        {
+            public SavedStreams(StopAnalyzer enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(StopAnalyzer enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private StopAnalyzer enclosingInstance;
+            public StopAnalyzer Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            internal Tokenizer source;
+            internal TokenStream result;
+        }
+        
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            var streams = (SavedStreams) PreviousTokenStream;
+            if (streams == null)
+            {
+                streams = new SavedStreams(this) {source = new LowerCaseTokenizer(reader)};
+                streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords);
+                PreviousTokenStream = streams;
+            }
+            else
+                streams.source.Reset(reader);
+            return streams.result;
+        }
+        static StopAnalyzer()
+        {
+            {
+                var stopWords = new System.String[]{"a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"};
+                var stopSet = new CharArraySet(stopWords.Length, false);
+                stopSet.AddAll(stopWords);
+                ENGLISH_STOP_WORDS_SET = CharArraySet.UnmodifiableSet(stopSet);
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/StopFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/StopFilter.cs b/src/core/Analysis/StopFilter.cs
index 81b7dd0..722faaf 100644
--- a/src/core/Analysis/StopFilter.cs
+++ b/src/core/Analysis/StopFilter.cs
@@ -24,155 +24,155 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> Removes stop words from a token stream.</summary>
-	
-	public sealed class StopFilter:TokenFilter
-	{
-		private readonly CharArraySet stopWords;
-		private bool enablePositionIncrements = false;
-		
-		private readonly ITermAttribute termAtt;
-		private readonly IPositionIncrementAttribute posIncrAtt;
-		
-		/// <summary> Construct a token stream filtering the given input.
-		/// If <c>stopWords</c> is an instance of <see cref="CharArraySet" /> (true if
-		/// <c>makeStopSet()</c> was used to construct the set) it will be directly used
-		/// and <c>ignoreCase</c> will be ignored since <c>CharArraySet</c>
-		/// directly controls case sensitivity.
-		/// <p/>
-		/// If <c>stopWords</c> is not an instance of <see cref="CharArraySet" />,
-		/// a new CharArraySet will be constructed and <c>ignoreCase</c> will be
-		/// used to specify the case sensitivity of that set.
-		/// </summary>
-		/// <param name="enablePositionIncrements">true if token positions should record the removed stop words</param>
-		/// <param name="input">Input TokenStream</param>
-		/// <param name="stopWords">A Set of strings or strings or char[] or any other ToString()-able set representing the stopwords</param>
+    
+    /// <summary> Removes stop words from a token stream.</summary>
+    
+    public sealed class StopFilter:TokenFilter
+    {
+        private readonly CharArraySet stopWords;
+        private bool enablePositionIncrements = false;
+        
+        private readonly ITermAttribute termAtt;
+        private readonly IPositionIncrementAttribute posIncrAtt;
+        
+        /// <summary> Construct a token stream filtering the given input.
+        /// If <c>stopWords</c> is an instance of <see cref="CharArraySet" /> (true if
+        /// <c>makeStopSet()</c> was used to construct the set) it will be directly used
+        /// and <c>ignoreCase</c> will be ignored since <c>CharArraySet</c>
+        /// directly controls case sensitivity.
+        /// <p/>
+        /// If <c>stopWords</c> is not an instance of <see cref="CharArraySet" />,
+        /// a new CharArraySet will be constructed and <c>ignoreCase</c> will be
+        /// used to specify the case sensitivity of that set.
+        /// </summary>
+        /// <param name="enablePositionIncrements">true if token positions should record the removed stop words</param>
+        /// <param name="input">Input TokenStream</param>
+        /// <param name="stopWords">A Set of strings or strings or char[] or any other ToString()-able set representing the stopwords</param>
         /// <param name="ignoreCase">if true, all words are lower cased first</param>
         public StopFilter(bool enablePositionIncrements, TokenStream input, ISet<string> stopWords, bool ignoreCase)
             : base(input)
-		{
-		    if (stopWords is CharArraySet)
-		    {
-		        this.stopWords = (CharArraySet) stopWords;
-		    }
-		    else
-		    {
-		        this.stopWords = new CharArraySet(stopWords.Count, ignoreCase);
-		        this.stopWords.AddAll(stopWords);
-		    }
-		    this.enablePositionIncrements = enablePositionIncrements;
-		    termAtt = AddAttribute<ITermAttribute>();
+        {
+            if (stopWords is CharArraySet)
+            {
+                this.stopWords = (CharArraySet) stopWords;
+            }
+            else
+            {
+                this.stopWords = new CharArraySet(stopWords.Count, ignoreCase);
+                this.stopWords.AddAll(stopWords);
+            }
+            this.enablePositionIncrements = enablePositionIncrements;
+            termAtt = AddAttribute<ITermAttribute>();
             posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
-		}
+        }
 
-	    /// <summary> Constructs a filter which removes words from the input
-		/// TokenStream that are named in the Set.
-		/// </summary>
-		/// <param name="enablePositionIncrements">true if token positions should record the removed stop words</param>
-		///  <param name="in">Input stream</param>
-		/// <param name="stopWords">A Set of strings or char[] or any other ToString()-able set representing the stopwords</param>
-		/// <seealso cref="MakeStopSet(String[])"/>
-		public StopFilter(bool enablePositionIncrements, TokenStream @in, ISet<string> stopWords)
-			: this(enablePositionIncrements, @in, stopWords, false)
-		{ }
-		
-		/// <summary> Builds a Set from an array of stop words,
-		/// appropriate for passing into the StopFilter constructor.
-		/// This permits this stopWords construction to be cached once when
-		/// an Analyzer is constructed.
-		/// 
-		/// </summary>
-		/// <seealso cref="MakeStopSet(String[], bool)">passing false to ignoreCase</seealso>
-		public static ISet<string> MakeStopSet(params string[] stopWords)
-		{
-			return MakeStopSet(stopWords, false);
-		}
-		
-		/// <summary> Builds a Set from an array of stop words,
-		/// appropriate for passing into the StopFilter constructor.
-		/// This permits this stopWords construction to be cached once when
-		/// an Analyzer is constructed.
-		/// </summary>
-		/// <param name="stopWords">A list of strings or char[] or any other ToString()-able list representing the stop words</param>
-		/// <seealso cref="MakeStopSet(String[], bool)">passing false to ignoreCase</seealso>
-		public static ISet<string> MakeStopSet(IList<object> stopWords)
-		{
-			return MakeStopSet(stopWords, false);
-		}
-		
-		/// <summary></summary>
-		/// <param name="stopWords">An array of stopwords</param>
-		/// <param name="ignoreCase">If true, all words are lower cased first.</param>
-		/// <returns> a Set containing the words</returns>
-		public static ISet<string> MakeStopSet(string[] stopWords, bool ignoreCase)
-		{
-			var stopSet = new CharArraySet(stopWords.Length, ignoreCase);
-		    stopSet.AddAll(stopWords);
-			return stopSet;
-		}
-		
-		/// <summary> </summary>
+        /// <summary> Constructs a filter which removes words from the input
+        /// TokenStream that are named in the Set.
+        /// </summary>
+        /// <param name="enablePositionIncrements">true if token positions should record the removed stop words</param>
+        ///  <param name="in">Input stream</param>
+        /// <param name="stopWords">A Set of strings or char[] or any other ToString()-able set representing the stopwords</param>
+        /// <seealso cref="MakeStopSet(String[])"/>
+        public StopFilter(bool enablePositionIncrements, TokenStream @in, ISet<string> stopWords)
+            : this(enablePositionIncrements, @in, stopWords, false)
+        { }
+        
+        /// <summary> Builds a Set from an array of stop words,
+        /// appropriate for passing into the StopFilter constructor.
+        /// This permits this stopWords construction to be cached once when
+        /// an Analyzer is constructed.
+        /// 
+        /// </summary>
+        /// <seealso cref="MakeStopSet(String[], bool)">passing false to ignoreCase</seealso>
+        public static ISet<string> MakeStopSet(params string[] stopWords)
+        {
+            return MakeStopSet(stopWords, false);
+        }
+        
+        /// <summary> Builds a Set from an array of stop words,
+        /// appropriate for passing into the StopFilter constructor.
+        /// This permits this stopWords construction to be cached once when
+        /// an Analyzer is constructed.
+        /// </summary>
+        /// <param name="stopWords">A list of strings or char[] or any other ToString()-able list representing the stop words</param>
+        /// <seealso cref="MakeStopSet(String[], bool)">passing false to ignoreCase</seealso>
+        public static ISet<string> MakeStopSet(IList<object> stopWords)
+        {
+            return MakeStopSet(stopWords, false);
+        }
+        
+        /// <summary></summary>
+        /// <param name="stopWords">An array of stopwords</param>
+        /// <param name="ignoreCase">If true, all words are lower cased first.</param>
+        /// <returns> a Set containing the words</returns>
+        public static ISet<string> MakeStopSet(string[] stopWords, bool ignoreCase)
+        {
+            var stopSet = new CharArraySet(stopWords.Length, ignoreCase);
+            stopSet.AddAll(stopWords);
+            return stopSet;
+        }
+        
+        /// <summary> </summary>
         /// <param name="stopWords">A List of Strings or char[] or any other toString()-able list representing the stopwords </param>
-		/// <param name="ignoreCase">if true, all words are lower cased first</param>
-		/// <returns>A Set (<see cref="CharArraySet"/>)containing the words</returns>
-		public static ISet<string> MakeStopSet(IList<object> stopWords, bool ignoreCase)
-		{
-			var stopSet = new CharArraySet(stopWords.Count, ignoreCase);
+        /// <param name="ignoreCase">if true, all words are lower cased first</param>
+        /// <returns>A Set (<see cref="CharArraySet"/>)containing the words</returns>
+        public static ISet<string> MakeStopSet(IList<object> stopWords, bool ignoreCase)
+        {
+            var stopSet = new CharArraySet(stopWords.Count, ignoreCase);
             foreach(var word in stopWords)
                 stopSet.Add(word.ToString());
-			return stopSet;
-		}
-		
-		/// <summary> Returns the next input Token whose term() is not a stop word.</summary>
-		public override bool IncrementToken()
-		{
-			// return the first non-stop word found
-			int skippedPositions = 0;
-			while (input.IncrementToken())
-			{
-				if (!stopWords.Contains(termAtt.TermBuffer(), 0, termAtt.TermLength()))
-				{
-					if (enablePositionIncrements)
-					{
-						posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
-					}
-					return true;
-				}
-				skippedPositions += posIncrAtt.PositionIncrement;
-			}
-			// reached EOS -- return false
-			return false;
-		}
-		
-		/// <summary> Returns version-dependent default for enablePositionIncrements. Analyzers
-		/// that embed StopFilter use this method when creating the StopFilter. Prior
-		/// to 2.9, this returns false. On 2.9 or later, it returns true.
-		/// </summary>
-		public static bool GetEnablePositionIncrementsVersionDefault(Version matchVersion)
-		{
+            return stopSet;
+        }
+        
+        /// <summary> Returns the next input Token whose term() is not a stop word.</summary>
+        public override bool IncrementToken()
+        {
+            // return the first non-stop word found
+            int skippedPositions = 0;
+            while (input.IncrementToken())
+            {
+                if (!stopWords.Contains(termAtt.TermBuffer(), 0, termAtt.TermLength()))
+                {
+                    if (enablePositionIncrements)
+                    {
+                        posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
+                    }
+                    return true;
+                }
+                skippedPositions += posIncrAtt.PositionIncrement;
+            }
+            // reached EOS -- return false
+            return false;
+        }
+        
+        /// <summary> Returns version-dependent default for enablePositionIncrements. Analyzers
+        /// that embed StopFilter use this method when creating the StopFilter. Prior
+        /// to 2.9, this returns false. On 2.9 or later, it returns true.
+        /// </summary>
+        public static bool GetEnablePositionIncrementsVersionDefault(Version matchVersion)
+        {
             return matchVersion.OnOrAfter(Version.LUCENE_29);
-		}
+        }
 
-	    /// <summary> If <c>true</c>, this StopFilter will preserve
-	    /// positions of the incoming tokens (ie, accumulate and
-	    /// set position increments of the removed stop tokens).
-	    /// Generally, <c>true</c> is best as it does not
-	    /// lose information (positions of the original tokens)
-	    /// during indexing.
-	    /// 
-	    /// <p/> When set, when a token is stopped
-	    /// (omitted), the position increment of the following
-	    /// token is incremented.
-	    /// 
-	    /// <p/> <b>NOTE</b>: be sure to also
-	    /// set <see cref="QueryParser.EnablePositionIncrements" /> if
-	    /// you use QueryParser to create queries.
-	    /// </summary>
-	    public bool EnablePositionIncrements
-	    {
-	        get { return enablePositionIncrements; }
-	        set { enablePositionIncrements = value; }
-	    }
-	}
+        /// <summary> If <c>true</c>, this StopFilter will preserve
+        /// positions of the incoming tokens (ie, accumulate and
+        /// set position increments of the removed stop tokens).
+        /// Generally, <c>true</c> is best as it does not
+        /// lose information (positions of the original tokens)
+        /// during indexing.
+        /// 
+        /// <p/> When set, when a token is stopped
+        /// (omitted), the position increment of the following
+        /// token is incremented.
+        /// 
+        /// <p/> <b>NOTE</b>: be sure to also
+        /// set <see cref="QueryParser.EnablePositionIncrements" /> if
+        /// you use QueryParser to create queries.
+        /// </summary>
+        public bool EnablePositionIncrements
+        {
+            get { return enablePositionIncrements; }
+            set { enablePositionIncrements = value; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/TeeSinkTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TeeSinkTokenFilter.cs b/src/core/Analysis/TeeSinkTokenFilter.cs
index bec605e..6eb217f 100644
--- a/src/core/Analysis/TeeSinkTokenFilter.cs
+++ b/src/core/Analysis/TeeSinkTokenFilter.cs
@@ -22,245 +22,245 @@ using AttributeSource = Lucene.Net.Util.AttributeSource;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> This TokenFilter provides the ability to set aside attribute states
-	/// that have already been analyzed.  This is useful in situations where multiple fields share
-	/// many common analysis steps and then go their separate ways.
-	/// <p/>
-	/// It is also useful for doing things like entity extraction or proper noun analysis as
-	/// part of the analysis workflow and saving off those tokens for use in another field.
-	/// 
-	/// <code>
-	/// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
-	/// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
-	/// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
-	/// TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader2));
-	/// source2.addSinkTokenStream(sink1);
-	/// source2.addSinkTokenStream(sink2);
-	/// TokenStream final1 = new LowerCaseFilter(source1);
-	/// TokenStream final2 = source2;
-	/// TokenStream final3 = new EntityDetect(sink1);
-	/// TokenStream final4 = new URLDetect(sink2);
-	/// d.add(new Field("f1", final1));
-	/// d.add(new Field("f2", final2));
-	/// d.add(new Field("f3", final3));
-	/// d.add(new Field("f4", final4));
-	/// </code>
-	/// In this example, <c>sink1</c> and <c>sink2</c> will both get tokens from both
-	/// <c>reader1</c> and <c>reader2</c> after whitespace tokenizer
-	/// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
-	/// It is important, that tees are consumed before sinks (in the above example, the field names must be
-	/// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
-	/// add another sink and then pass all tokens to the sinks at once using <see cref="ConsumeAllTokens" />.
-	/// This TokenFilter is exhausted after this. In the above example, change
-	/// the example above to:
-	/// <code>
-	/// ...
-	/// TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
-	/// TokenStream final2 = source2.newSinkTokenStream();
-	/// sink1.consumeAllTokens();
-	/// sink2.consumeAllTokens();
-	/// ...
-	/// </code>
-	/// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
-	/// <p/>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
-	/// </summary>
-	public sealed class TeeSinkTokenFilter:TokenFilter
-	{
-		public class AnonymousClassSinkFilter:SinkFilter
-		{
-			public override bool Accept(AttributeSource source)
-			{
-				return true;
-			}
-		}
-		private readonly LinkedList<WeakReference> sinks = new LinkedList<WeakReference>();
-		
-		/// <summary> Instantiates a new TeeSinkTokenFilter.</summary>
-		public TeeSinkTokenFilter(TokenStream input):base(input)
-		{
-		}
-		
-		/// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream.</summary>
-		public SinkTokenStream NewSinkTokenStream()
-		{
-			return NewSinkTokenStream(ACCEPT_ALL_FILTER);
-		}
-		
-		/// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream
-		/// that pass the supplied filter.
-		/// </summary>
-		/// <seealso cref="SinkFilter">
-		/// </seealso>
-		public SinkTokenStream NewSinkTokenStream(SinkFilter filter)
-		{
-			var sink = new SinkTokenStream(this.CloneAttributes(), filter);
-			sinks.AddLast(new WeakReference(sink));
-			return sink;
-		}
-		
-		/// <summary> Adds a <see cref="SinkTokenStream" /> created by another <c>TeeSinkTokenFilter</c>
-		/// to this one. The supplied stream will also receive all consumed tokens.
-		/// This method can be used to pass tokens from two different tees to one sink.
-		/// </summary>
-		public void  AddSinkTokenStream(SinkTokenStream sink)
-		{
-			// check that sink has correct factory
-			if (!this.Factory.Equals(sink.Factory))
-			{
-				throw new System.ArgumentException("The supplied sink is not compatible to this tee");
-			}
-			// add eventually missing attribute impls to the existing sink
+    
+    /// <summary> This TokenFilter provides the ability to set aside attribute states
+    /// that have already been analyzed.  This is useful in situations where multiple fields share
+    /// many common analysis steps and then go their separate ways.
+    /// <p/>
+    /// It is also useful for doing things like entity extraction or proper noun analysis as
+    /// part of the analysis workflow and saving off those tokens for use in another field.
+    /// 
+    /// <code>
+    /// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
+    /// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
+    /// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
+    /// TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader2));
+    /// source2.addSinkTokenStream(sink1);
+    /// source2.addSinkTokenStream(sink2);
+    /// TokenStream final1 = new LowerCaseFilter(source1);
+    /// TokenStream final2 = source2;
+    /// TokenStream final3 = new EntityDetect(sink1);
+    /// TokenStream final4 = new URLDetect(sink2);
+    /// d.add(new Field("f1", final1));
+    /// d.add(new Field("f2", final2));
+    /// d.add(new Field("f3", final3));
+    /// d.add(new Field("f4", final4));
+    /// </code>
+    /// In this example, <c>sink1</c> and <c>sink2</c> will both get tokens from both
+    /// <c>reader1</c> and <c>reader2</c> after whitespace tokenizer
+    /// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
+    /// It is important, that tees are consumed before sinks (in the above example, the field names must be
+    /// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
+    /// add another sink and then pass all tokens to the sinks at once using <see cref="ConsumeAllTokens" />.
+    /// This TokenFilter is exhausted after this. In the above example, change
+    /// the example above to:
+    /// <code>
+    /// ...
+    /// TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
+    /// TokenStream final2 = source2.newSinkTokenStream();
+    /// sink1.consumeAllTokens();
+    /// sink2.consumeAllTokens();
+    /// ...
+    /// </code>
+    /// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
+    /// <p/>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
+    /// </summary>
+    public sealed class TeeSinkTokenFilter:TokenFilter
+    {
+        public class AnonymousClassSinkFilter:SinkFilter
+        {
+            public override bool Accept(AttributeSource source)
+            {
+                return true;
+            }
+        }
+        private readonly LinkedList<WeakReference> sinks = new LinkedList<WeakReference>();
+        
+        /// <summary> Instantiates a new TeeSinkTokenFilter.</summary>
+        public TeeSinkTokenFilter(TokenStream input):base(input)
+        {
+        }
+        
+        /// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream.</summary>
+        public SinkTokenStream NewSinkTokenStream()
+        {
+            return NewSinkTokenStream(ACCEPT_ALL_FILTER);
+        }
+        
+        /// <summary> Returns a new <see cref="SinkTokenStream" /> that receives all tokens consumed by this stream
+        /// that pass the supplied filter.
+        /// </summary>
+        /// <seealso cref="SinkFilter">
+        /// </seealso>
+        public SinkTokenStream NewSinkTokenStream(SinkFilter filter)
+        {
+            var sink = new SinkTokenStream(this.CloneAttributes(), filter);
+            sinks.AddLast(new WeakReference(sink));
+            return sink;
+        }
+        
+        /// <summary> Adds a <see cref="SinkTokenStream" /> created by another <c>TeeSinkTokenFilter</c>
+        /// to this one. The supplied stream will also receive all consumed tokens.
+        /// This method can be used to pass tokens from two different tees to one sink.
+        /// </summary>
+        public void  AddSinkTokenStream(SinkTokenStream sink)
+        {
+            // check that sink has correct factory
+            if (!this.Factory.Equals(sink.Factory))
+            {
+                throw new System.ArgumentException("The supplied sink is not compatible to this tee");
+            }
+            // add eventually missing attribute impls to the existing sink
             foreach (var impl in this.CloneAttributes().GetAttributeImplsIterator())
             {
                 sink.AddAttributeImpl(impl);
             }
-			sinks.AddLast(new WeakReference(sink));
-		}
-		
-		/// <summary> <c>TeeSinkTokenFilter</c> passes all tokens to the added sinks
-		/// when itself is consumed. To be sure, that all tokens from the input
-		/// stream are passed to the sinks, you can call this methods.
-		/// This instance is exhausted after this, but all sinks are instant available.
-		/// </summary>
-		public void  ConsumeAllTokens()
-		{
+            sinks.AddLast(new WeakReference(sink));
+        }
+        
+        /// <summary> <c>TeeSinkTokenFilter</c> passes all tokens to the added sinks
+        /// when itself is consumed. To be sure, that all tokens from the input
+        /// stream are passed to the sinks, you can call this methods.
+        /// This instance is exhausted after this, but all sinks are instant available.
+        /// </summary>
+        public void  ConsumeAllTokens()
+        {
             while (IncrementToken())
             {
             }
-		}
-		
-		public override bool IncrementToken()
-		{
-			if (input.IncrementToken())
-			{
-				// capture state lazily - maybe no SinkFilter accepts this state
-				State state = null;
-				foreach(WeakReference wr in sinks)
-				{
-				    var sink = (SinkTokenStream)wr.Target;
-					if (sink != null)
-					{
-						if (sink.Accept(this))
-						{
-							if (state == null)
-							{
-								state = this.CaptureState();
-							}
-							sink.AddState(state);
-						}
-					}
-				}
-				return true;
-			}
-			
-			return false;
-		}
-		
-		public override void  End()
-		{
-			base.End();
-			State finalState = CaptureState();
-			foreach(WeakReference wr in sinks)
-			{
+        }
+        
+        public override bool IncrementToken()
+        {
+            if (input.IncrementToken())
+            {
+                // capture state lazily - maybe no SinkFilter accepts this state
+                State state = null;
+                foreach(WeakReference wr in sinks)
+                {
+                    var sink = (SinkTokenStream)wr.Target;
+                    if (sink != null)
+                    {
+                        if (sink.Accept(this))
+                        {
+                            if (state == null)
+                            {
+                                state = this.CaptureState();
+                            }
+                            sink.AddState(state);
+                        }
+                    }
+                }
+                return true;
+            }
+            
+            return false;
+        }
+        
+        public override void  End()
+        {
+            base.End();
+            State finalState = CaptureState();
+            foreach(WeakReference wr in sinks)
+            {
                 var sink = (SinkTokenStream)wr.Target;
-				if (sink != null)
-				{
-					sink.SetFinalState(finalState);
-				}
-			}
-		}
-		
-		/// <summary> A filter that decides which <see cref="AttributeSource" /> states to store in the sink.</summary>
-		public abstract class SinkFilter
-		{
-			/// <summary> Returns true, iff the current state of the passed-in <see cref="AttributeSource" /> shall be stored
-			/// in the sink. 
-			/// </summary>
-			public abstract bool Accept(AttributeSource source);
-			
-			/// <summary> Called by <see cref="SinkTokenStream.Reset()" />. This method does nothing by default
-			/// and can optionally be overridden.
-			/// </summary>
-			public virtual void Reset()
-			{
-				// nothing to do; can be overridden
-			}
-		}
-		
-		public sealed class SinkTokenStream : TokenStream
-		{
+                if (sink != null)
+                {
+                    sink.SetFinalState(finalState);
+                }
+            }
+        }
+        
+        /// <summary> A filter that decides which <see cref="AttributeSource" /> states to store in the sink.</summary>
+        public abstract class SinkFilter
+        {
+            /// <summary> Returns true, iff the current state of the passed-in <see cref="AttributeSource" /> shall be stored
+            /// in the sink. 
+            /// </summary>
+            public abstract bool Accept(AttributeSource source);
+            
+            /// <summary> Called by <see cref="SinkTokenStream.Reset()" />. This method does nothing by default
+            /// and can optionally be overridden.
+            /// </summary>
+            public virtual void Reset()
+            {
+                // nothing to do; can be overridden
+            }
+        }
+        
+        public sealed class SinkTokenStream : TokenStream
+        {
             private readonly LinkedList<State> cachedStates = new LinkedList<State>();
-			private State finalState;
-			private IEnumerator<AttributeSource.State> it = null;
-			private readonly SinkFilter filter;
+            private State finalState;
+            private IEnumerator<AttributeSource.State> it = null;
+            private readonly SinkFilter filter;
 
-			internal SinkTokenStream(AttributeSource source, SinkFilter filter)
+            internal SinkTokenStream(AttributeSource source, SinkFilter filter)
                 : base(source)
-			{
-				this.filter = filter;
-			}
-			
-			internal /*private*/ bool Accept(AttributeSource source)
-			{
-				return filter.Accept(source);
-			}
-			
-			internal /*private*/ void  AddState(AttributeSource.State state)
-			{
-				if (it != null)
-				{
-					throw new System.SystemException("The tee must be consumed before sinks are consumed.");
-				}
-				cachedStates.AddLast(state);
-			}
-			
-			internal /*private*/ void  SetFinalState(AttributeSource.State finalState)
-			{
-				this.finalState = finalState;
-			}
-			
-			public override bool IncrementToken()
-			{
-				// lazy init the iterator
-				if (it == null)
-				{
-					it = cachedStates.GetEnumerator();
-				}
-				
-				if (!it.MoveNext())
-				{
-					return false;
-				}
-				
-				State state = it.Current;
-				RestoreState(state);
-				return true;
-			}
-			
-			public override void  End()
-			{
-				if (finalState != null)
-				{
-					RestoreState(finalState);
-				}
-			}
-			
-			public override void  Reset()
-			{
-				it = cachedStates.GetEnumerator();
-			}
+            {
+                this.filter = filter;
+            }
+            
+            internal /*private*/ bool Accept(AttributeSource source)
+            {
+                return filter.Accept(source);
+            }
+            
+            internal /*private*/ void  AddState(AttributeSource.State state)
+            {
+                if (it != null)
+                {
+                    throw new System.SystemException("The tee must be consumed before sinks are consumed.");
+                }
+                cachedStates.AddLast(state);
+            }
+            
+            internal /*private*/ void  SetFinalState(AttributeSource.State finalState)
+            {
+                this.finalState = finalState;
+            }
+            
+            public override bool IncrementToken()
+            {
+                // lazy init the iterator
+                if (it == null)
+                {
+                    it = cachedStates.GetEnumerator();
+                }
+                
+                if (!it.MoveNext())
+                {
+                    return false;
+                }
+                
+                State state = it.Current;
+                RestoreState(state);
+                return true;
+            }
+            
+            public override void  End()
+            {
+                if (finalState != null)
+                {
+                    RestoreState(finalState);
+                }
+            }
+            
+            public override void  Reset()
+            {
+                it = cachedStates.GetEnumerator();
+            }
 
-		    protected override void Dispose(bool disposing)
-		    {
-		        // Do nothing.
-		    }
-		}
-		
-		private static readonly SinkFilter ACCEPT_ALL_FILTER;
-		static TeeSinkTokenFilter()
-		{
-			ACCEPT_ALL_FILTER = new AnonymousClassSinkFilter();
-		}
-	}
+            protected override void Dispose(bool disposing)
+            {
+                // Do nothing.
+            }
+        }
+        
+        private static readonly SinkFilter ACCEPT_ALL_FILTER;
+        static TeeSinkTokenFilter()
+        {
+            ACCEPT_ALL_FILTER = new AnonymousClassSinkFilter();
+        }
+    }
 }
\ No newline at end of file


[19/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/CheckIndex.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CheckIndex.cs b/src/core/Index/CheckIndex.cs
index 8917903..be3dc8e 100644
--- a/src/core/Index/CheckIndex.cs
+++ b/src/core/Index/CheckIndex.cs
@@ -27,991 +27,991 @@ using IndexInput = Lucene.Net.Store.IndexInput;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Basic tool and API to check the health of an index and
-	/// write a new segments file that removes reference to
-	/// problematic segments.
-	/// 
-	/// <p/>As this tool checks every byte in the index, on a large
-	/// index it can take quite a long time to run.
-	/// 
-	/// <p/><b>WARNING</b>: this tool and API is new and
-	/// experimental and is subject to suddenly change in the
-	/// next release.  Please make a complete backup of your
-	/// index before using this to fix your index!
-	/// </summary>
-	public class CheckIndex
-	{
-		private StreamWriter infoStream;
-		private readonly Directory dir;
-		
-		/// <summary> Returned from <see cref="CheckIndex_Renamed_Method()" /> detailing the health and status of the index.
-		/// 
-		/// <p/><b>WARNING</b>: this API is new and experimental and is
-		/// subject to suddenly change in the next release.
-		/// 
-		/// </summary>
-		
-		public class Status
-		{
-			
-			/// <summary>True if no problems were found with the index. </summary>
-			public bool clean;
-			
-			/// <summary>True if we were unable to locate and load the segments_N file. </summary>
-			public bool missingSegments;
-			
-			/// <summary>True if we were unable to open the segments_N file. </summary>
-			public bool cantOpenSegments;
-			
-			/// <summary>True if we were unable to read the version number from segments_N file. </summary>
-			public bool missingSegmentVersion;
-			
-			/// <summary>Name of latest segments_N file in the index. </summary>
-			public System.String segmentsFileName;
-			
-			/// <summary>Number of segments in the index. </summary>
-			public int numSegments;
-			
-			/// <summary>String description of the version of the index. </summary>
-			public System.String segmentFormat;
+    
+    /// <summary> Basic tool and API to check the health of an index and
+    /// write a new segments file that removes reference to
+    /// problematic segments.
+    /// 
+    /// <p/>As this tool checks every byte in the index, on a large
+    /// index it can take quite a long time to run.
+    /// 
+    /// <p/><b>WARNING</b>: this tool and API is new and
+    /// experimental and is subject to suddenly change in the
+    /// next release.  Please make a complete backup of your
+    /// index before using this to fix your index!
+    /// </summary>
+    public class CheckIndex
+    {
+        private StreamWriter infoStream;
+        private readonly Directory dir;
+        
+        /// <summary> Returned from <see cref="CheckIndex_Renamed_Method()" /> detailing the health and status of the index.
+        /// 
+        /// <p/><b>WARNING</b>: this API is new and experimental and is
+        /// subject to suddenly change in the next release.
+        /// 
+        /// </summary>
+        
+        public class Status
+        {
+            
+            /// <summary>True if no problems were found with the index. </summary>
+            public bool clean;
+            
+            /// <summary>True if we were unable to locate and load the segments_N file. </summary>
+            public bool missingSegments;
+            
+            /// <summary>True if we were unable to open the segments_N file. </summary>
+            public bool cantOpenSegments;
+            
+            /// <summary>True if we were unable to read the version number from segments_N file. </summary>
+            public bool missingSegmentVersion;
+            
+            /// <summary>Name of latest segments_N file in the index. </summary>
+            public System.String segmentsFileName;
+            
+            /// <summary>Number of segments in the index. </summary>
+            public int numSegments;
+            
+            /// <summary>String description of the version of the index. </summary>
+            public System.String segmentFormat;
 
-			/// <summary>Empty unless you passed specific segments list to check as optional 3rd argument.</summary>
-			/// <seealso>
-			///   <cref>CheckIndex.CheckIndex_Renamed_Method(System.Collections.IList)</cref>
-			/// </seealso>
-			public List<string> segmentsChecked = new List<string>();
-			
-			/// <summary>True if the index was created with a newer version of Lucene than the CheckIndex tool. </summary>
-			public bool toolOutOfDate;
-			
-			/// <summary>List of <see cref="SegmentInfoStatus" /> instances, detailing status of each segment. </summary>
-			public IList<SegmentInfoStatus> segmentInfos = new List<SegmentInfoStatus>();
-			
-			/// <summary>Directory index is in. </summary>
-			public Directory dir;
-			
-			/// <summary> SegmentInfos instance containing only segments that
-			/// had no problems (this is used with the <see cref="CheckIndex.FixIndex" /> 
-			/// method to repair the index. 
-			/// </summary>
-			internal SegmentInfos newSegments;
-			
-			/// <summary>How many documents will be lost to bad segments. </summary>
-			public int totLoseDocCount;
-			
-			/// <summary>How many bad segments were found. </summary>
-			public int numBadSegments;
-			
-			/// <summary>True if we checked only specific segments (<see cref="CheckIndex.CheckIndex_Renamed_Method(List{string})" />)
-			/// was called with non-null
-			/// argument). 
-			/// </summary>
-			public bool partial;
-			
-			/// <summary>Holds the userData of the last commit in the index </summary>
+            /// <summary>Empty unless you passed specific segments list to check as optional 3rd argument.</summary>
+            /// <seealso>
+            ///   <cref>CheckIndex.CheckIndex_Renamed_Method(System.Collections.IList)</cref>
+            /// </seealso>
+            public List<string> segmentsChecked = new List<string>();
+            
+            /// <summary>True if the index was created with a newer version of Lucene than the CheckIndex tool. </summary>
+            public bool toolOutOfDate;
+            
+            /// <summary>List of <see cref="SegmentInfoStatus" /> instances, detailing status of each segment. </summary>
+            public IList<SegmentInfoStatus> segmentInfos = new List<SegmentInfoStatus>();
+            
+            /// <summary>Directory index is in. </summary>
+            public Directory dir;
+            
+            /// <summary> SegmentInfos instance containing only segments that
+            /// had no problems (this is used with the <see cref="CheckIndex.FixIndex" /> 
+            /// method to repair the index. 
+            /// </summary>
+            internal SegmentInfos newSegments;
+            
+            /// <summary>How many documents will be lost to bad segments. </summary>
+            public int totLoseDocCount;
+            
+            /// <summary>How many bad segments were found. </summary>
+            public int numBadSegments;
+            
+            /// <summary>True if we checked only specific segments (<see cref="CheckIndex.CheckIndex_Renamed_Method(List{string})" />)
+            /// was called with non-null
+            /// argument). 
+            /// </summary>
+            public bool partial;
+            
+            /// <summary>Holds the userData of the last commit in the index </summary>
             public IDictionary<string, string> userData;
-			
-			/// <summary>Holds the status of each segment in the index.
-			/// See <see cref="SegmentInfos" />.
-			/// 
-			/// <p/><b>WARNING</b>: this API is new and experimental and is
-			/// subject to suddenly change in the next release.
-			/// </summary>
-			public class SegmentInfoStatus
-			{
-				/// <summary>Name of the segment. </summary>
-				public System.String name;
-				
-				/// <summary>Document count (does not take deletions into account). </summary>
-				public int docCount;
-				
-				/// <summary>True if segment is compound file format. </summary>
-				public bool compound;
-				
-				/// <summary>Number of files referenced by this segment. </summary>
-				public int numFiles;
-				
-				/// <summary>Net size (MB) of the files referenced by this
-				/// segment. 
-				/// </summary>
-				public double sizeMB;
-				
-				/// <summary>Doc store offset, if this segment shares the doc
-				/// store files (stored fields and term vectors) with
-				/// other segments.  This is -1 if it does not share. 
-				/// </summary>
-				public int docStoreOffset = - 1;
-				
-				/// <summary>String of the shared doc store segment, or null if
-				/// this segment does not share the doc store files. 
-				/// </summary>
-				public System.String docStoreSegment;
-				
-				/// <summary>True if the shared doc store files are compound file
-				/// format. 
-				/// </summary>
-				public bool docStoreCompoundFile;
-				
-				/// <summary>True if this segment has pending deletions. </summary>
-				public bool hasDeletions;
-				
-				/// <summary>Name of the current deletions file name. </summary>
-				public System.String deletionsFileName;
-				
-				/// <summary>Number of deleted documents. </summary>
-				public int numDeleted;
-				
-				/// <summary>True if we were able to open a SegmentReader on this
-				/// segment. 
-				/// </summary>
-				public bool openReaderPassed;
-				
-				/// <summary>Number of fields in this segment. </summary>
-				internal int numFields;
-				
-				/// <summary>True if at least one of the fields in this segment
-				/// does not omitTermFreqAndPositions.
-				/// </summary>
-				/// <seealso cref="AbstractField.OmitTermFreqAndPositions">
-				/// </seealso>
-				public bool hasProx;
+            
+            /// <summary>Holds the status of each segment in the index.
+            /// See <see cref="SegmentInfos" />.
+            /// 
+            /// <p/><b>WARNING</b>: this API is new and experimental and is
+            /// subject to suddenly change in the next release.
+            /// </summary>
+            public class SegmentInfoStatus
+            {
+                /// <summary>Name of the segment. </summary>
+                public System.String name;
+                
+                /// <summary>Document count (does not take deletions into account). </summary>
+                public int docCount;
+                
+                /// <summary>True if segment is compound file format. </summary>
+                public bool compound;
+                
+                /// <summary>Number of files referenced by this segment. </summary>
+                public int numFiles;
+                
+                /// <summary>Net size (MB) of the files referenced by this
+                /// segment. 
+                /// </summary>
+                public double sizeMB;
+                
+                /// <summary>Doc store offset, if this segment shares the doc
+                /// store files (stored fields and term vectors) with
+                /// other segments.  This is -1 if it does not share. 
+                /// </summary>
+                public int docStoreOffset = - 1;
+                
+                /// <summary>String of the shared doc store segment, or null if
+                /// this segment does not share the doc store files. 
+                /// </summary>
+                public System.String docStoreSegment;
+                
+                /// <summary>True if the shared doc store files are compound file
+                /// format. 
+                /// </summary>
+                public bool docStoreCompoundFile;
+                
+                /// <summary>True if this segment has pending deletions. </summary>
+                public bool hasDeletions;
+                
+                /// <summary>Name of the current deletions file name. </summary>
+                public System.String deletionsFileName;
+                
+                /// <summary>Number of deleted documents. </summary>
+                public int numDeleted;
+                
+                /// <summary>True if we were able to open a SegmentReader on this
+                /// segment. 
+                /// </summary>
+                public bool openReaderPassed;
+                
+                /// <summary>Number of fields in this segment. </summary>
+                internal int numFields;
+                
+                /// <summary>True if at least one of the fields in this segment
+                /// does not omitTermFreqAndPositions.
+                /// </summary>
+                /// <seealso cref="AbstractField.OmitTermFreqAndPositions">
+                /// </seealso>
+                public bool hasProx;
 
                 /// <summary>Map&lt;String, String&gt; that includes certain
-				/// debugging details that IndexWriter records into
-				/// each segment it creates 
-				/// </summary>
+                /// debugging details that IndexWriter records into
+                /// each segment it creates 
+                /// </summary>
                 public IDictionary<string, string> diagnostics;
-				
-				/// <summary>Status for testing of field norms (null if field norms could not be tested). </summary>
-				public FieldNormStatus fieldNormStatus;
-				
-				/// <summary>Status for testing of indexed terms (null if indexed terms could not be tested). </summary>
-				public TermIndexStatus termIndexStatus;
-				
-				/// <summary>Status for testing of stored fields (null if stored fields could not be tested). </summary>
-				public StoredFieldStatus storedFieldStatus;
-				
-				/// <summary>Status for testing of term vectors (null if term vectors could not be tested). </summary>
-				public TermVectorStatus termVectorStatus;
-			}
-			
-			/// <summary> Status from testing field norms.</summary>
-			public sealed class FieldNormStatus
-			{
-				/// <summary>Number of fields successfully tested </summary>
-				public long totFields = 0L;
-				
-				/// <summary>Exception thrown during term index test (null on success) </summary>
-				public System.Exception error = null;
-			}
-			
-			/// <summary> Status from testing term index.</summary>
-			public sealed class TermIndexStatus
-			{
-				/// <summary>Total term count </summary>
-				public long termCount = 0L;
-				
-				/// <summary>Total frequency across all terms. </summary>
-				public long totFreq = 0L;
-				
-				/// <summary>Total number of positions. </summary>
-				public long totPos = 0L;
-				
-				/// <summary>Exception thrown during term index test (null on success) </summary>
-				public System.Exception error = null;
-			}
-			
-			/// <summary> Status from testing stored fields.</summary>
-			public sealed class StoredFieldStatus
-			{
-				
-				/// <summary>Number of documents tested. </summary>
-				public int docCount = 0;
-				
-				/// <summary>Total number of stored fields tested. </summary>
-				public long totFields = 0;
-				
-				/// <summary>Exception thrown during stored fields test (null on success) </summary>
-				public System.Exception error = null;
-			}
-			
-			/// <summary> Status from testing stored fields.</summary>
-			public sealed class TermVectorStatus
-			{
-				
-				/// <summary>Number of documents tested. </summary>
-				public int docCount = 0;
-				
-				/// <summary>Total number of term vectors tested. </summary>
-				public long totVectors = 0;
-				
-				/// <summary>Exception thrown during term vector test (null on success) </summary>
-				public System.Exception error = null;
-			}
-		}
-		
-		/// <summary>Create a new CheckIndex on the directory. </summary>
-		public CheckIndex(Directory dir)
-		{
-			this.dir = dir;
-			infoStream = null;
-		}
-		
-		/// <summary>Set infoStream where messages should go.  If null, no
-		/// messages are printed 
-		/// </summary>
-		public virtual void  SetInfoStream(StreamWriter @out)
-		{
-			infoStream = @out;
-		}
-		
-		private void  Msg(System.String msg)
-		{
-			if (infoStream != null)
-				infoStream.WriteLine(msg);
-		}
-		
-		private class MySegmentTermDocs:SegmentTermDocs
-		{
-			
-			internal int delCount;
-			
-			internal MySegmentTermDocs(SegmentReader p):base(p)
-			{
-			}
-			
-			public override void  Seek(Term term)
-			{
-				base.Seek(term);
-				delCount = 0;
-			}
-			
-			protected internal override void  SkippingDoc()
-			{
-				delCount++;
-			}
-		}
-		
-		/// <summary>Returns a <see cref="Status" /> instance detailing
-		/// the state of the index.
-		/// 
-		/// <p/>As this method checks every byte in the index, on a large
-		/// index it can take quite a long time to run.
-		/// 
-		/// <p/><b>WARNING</b>: make sure
-		/// you only call this when the index is not opened by any
-		/// writer. 
-		/// </summary>
-		public virtual Status CheckIndex_Renamed_Method()
-		{
-			return CheckIndex_Renamed_Method(null);
-		}
-		
-		/// <summary>Returns a <see cref="Status" /> instance detailing
-		/// the state of the index.
-		/// 
-		/// </summary>
-		/// <param name="onlySegments">list of specific segment names to check
-		/// 
-		/// <p/>As this method checks every byte in the specified
-		/// segments, on a large index it can take quite a long
-		/// time to run.
-		/// 
-		/// <p/><b>WARNING</b>: make sure
-		/// you only call this when the index is not opened by any
-		/// writer. 
-		/// </param>
-		public virtual Status CheckIndex_Renamed_Method(List<string> onlySegments)
-		{
+                
+                /// <summary>Status for testing of field norms (null if field norms could not be tested). </summary>
+                public FieldNormStatus fieldNormStatus;
+                
+                /// <summary>Status for testing of indexed terms (null if indexed terms could not be tested). </summary>
+                public TermIndexStatus termIndexStatus;
+                
+                /// <summary>Status for testing of stored fields (null if stored fields could not be tested). </summary>
+                public StoredFieldStatus storedFieldStatus;
+                
+                /// <summary>Status for testing of term vectors (null if term vectors could not be tested). </summary>
+                public TermVectorStatus termVectorStatus;
+            }
+            
+            /// <summary> Status from testing field norms.</summary>
+            public sealed class FieldNormStatus
+            {
+                /// <summary>Number of fields successfully tested </summary>
+                public long totFields = 0L;
+                
+                /// <summary>Exception thrown during term index test (null on success) </summary>
+                public System.Exception error = null;
+            }
+            
+            /// <summary> Status from testing term index.</summary>
+            public sealed class TermIndexStatus
+            {
+                /// <summary>Total term count </summary>
+                public long termCount = 0L;
+                
+                /// <summary>Total frequency across all terms. </summary>
+                public long totFreq = 0L;
+                
+                /// <summary>Total number of positions. </summary>
+                public long totPos = 0L;
+                
+                /// <summary>Exception thrown during term index test (null on success) </summary>
+                public System.Exception error = null;
+            }
+            
+            /// <summary> Status from testing stored fields.</summary>
+            public sealed class StoredFieldStatus
+            {
+                
+                /// <summary>Number of documents tested. </summary>
+                public int docCount = 0;
+                
+                /// <summary>Total number of stored fields tested. </summary>
+                public long totFields = 0;
+                
+                /// <summary>Exception thrown during stored fields test (null on success) </summary>
+                public System.Exception error = null;
+            }
+            
+            /// <summary> Status from testing stored fields.</summary>
+            public sealed class TermVectorStatus
+            {
+                
+                /// <summary>Number of documents tested. </summary>
+                public int docCount = 0;
+                
+                /// <summary>Total number of term vectors tested. </summary>
+                public long totVectors = 0;
+                
+                /// <summary>Exception thrown during term vector test (null on success) </summary>
+                public System.Exception error = null;
+            }
+        }
+        
+        /// <summary>Create a new CheckIndex on the directory. </summary>
+        public CheckIndex(Directory dir)
+        {
+            this.dir = dir;
+            infoStream = null;
+        }
+        
+        /// <summary>Set infoStream where messages should go.  If null, no
+        /// messages are printed 
+        /// </summary>
+        public virtual void  SetInfoStream(StreamWriter @out)
+        {
+            infoStream = @out;
+        }
+        
+        private void  Msg(System.String msg)
+        {
+            if (infoStream != null)
+                infoStream.WriteLine(msg);
+        }
+        
+        private class MySegmentTermDocs:SegmentTermDocs
+        {
+            
+            internal int delCount;
+            
+            internal MySegmentTermDocs(SegmentReader p):base(p)
+            {
+            }
+            
+            public override void  Seek(Term term)
+            {
+                base.Seek(term);
+                delCount = 0;
+            }
+            
+            protected internal override void  SkippingDoc()
+            {
+                delCount++;
+            }
+        }
+        
+        /// <summary>Returns a <see cref="Status" /> instance detailing
+        /// the state of the index.
+        /// 
+        /// <p/>As this method checks every byte in the index, on a large
+        /// index it can take quite a long time to run.
+        /// 
+        /// <p/><b>WARNING</b>: make sure
+        /// you only call this when the index is not opened by any
+        /// writer. 
+        /// </summary>
+        public virtual Status CheckIndex_Renamed_Method()
+        {
+            return CheckIndex_Renamed_Method(null);
+        }
+        
+        /// <summary>Returns a <see cref="Status" /> instance detailing
+        /// the state of the index.
+        /// 
+        /// </summary>
+        /// <param name="onlySegments">list of specific segment names to check
+        /// 
+        /// <p/>As this method checks every byte in the specified
+        /// segments, on a large index it can take quite a long
+        /// time to run.
+        /// 
+        /// <p/><b>WARNING</b>: make sure
+        /// you only call this when the index is not opened by any
+        /// writer. 
+        /// </param>
+        public virtual Status CheckIndex_Renamed_Method(List<string> onlySegments)
+        {
             System.Globalization.NumberFormatInfo nf = System.Globalization.CultureInfo.CurrentCulture.NumberFormat;
-			SegmentInfos sis = new SegmentInfos();
-			Status result = new Status();
-			result.dir = dir;
-			try
-			{
-				sis.Read(dir);
-			}
-			catch (System.Exception t)
-			{
-				Msg("ERROR: could not read any segments file in directory");
-				result.missingSegments = true;
-				if (infoStream != null)
-					infoStream.WriteLine(t.StackTrace);
-				return result;
-			}
-			
-			int numSegments = sis.Count;
-			var segmentsFileName = sis.GetCurrentSegmentFileName();
-			IndexInput input = null;
-			try
-			{
-				input = dir.OpenInput(segmentsFileName);
-			}
-			catch (System.Exception t)
-			{
-				Msg("ERROR: could not open segments file in directory");
-				if (infoStream != null)
-					infoStream.WriteLine(t.StackTrace);
-				result.cantOpenSegments = true;
-				return result;
-			}
-			int format = 0;
-			try
-			{
-				format = input.ReadInt();
-			}
-			catch (System.Exception t)
-			{
-				Msg("ERROR: could not read segment file version in directory");
-				if (infoStream != null)
-					infoStream.WriteLine(t.StackTrace);
-				result.missingSegmentVersion = true;
-				return result;
-			}
-			finally
-			{
-				if (input != null)
-					input.Close();
-			}
-			
-			System.String sFormat = "";
-			bool skip = false;
-			
-			if (format == SegmentInfos.FORMAT)
-				sFormat = "FORMAT [Lucene Pre-2.1]";
-			if (format == SegmentInfos.FORMAT_LOCKLESS)
-				sFormat = "FORMAT_LOCKLESS [Lucene 2.1]";
-			else if (format == SegmentInfos.FORMAT_SINGLE_NORM_FILE)
-				sFormat = "FORMAT_SINGLE_NORM_FILE [Lucene 2.2]";
-			else if (format == SegmentInfos.FORMAT_SHARED_DOC_STORE)
-				sFormat = "FORMAT_SHARED_DOC_STORE [Lucene 2.3]";
-			else
-			{
-				if (format == SegmentInfos.FORMAT_CHECKSUM)
-					sFormat = "FORMAT_CHECKSUM [Lucene 2.4]";
-				else if (format == SegmentInfos.FORMAT_DEL_COUNT)
-					sFormat = "FORMAT_DEL_COUNT [Lucene 2.4]";
-				else if (format == SegmentInfos.FORMAT_HAS_PROX)
-					sFormat = "FORMAT_HAS_PROX [Lucene 2.4]";
-				else if (format == SegmentInfos.FORMAT_USER_DATA)
-					sFormat = "FORMAT_USER_DATA [Lucene 2.9]";
-				else if (format == SegmentInfos.FORMAT_DIAGNOSTICS)
-					sFormat = "FORMAT_DIAGNOSTICS [Lucene 2.9]";
-				else if (format < SegmentInfos.CURRENT_FORMAT)
-				{
-					sFormat = "int=" + format + " [newer version of Lucene than this tool]";
-					skip = true;
-				}
-				else
-				{
-					sFormat = format + " [Lucene 1.3 or prior]";
-				}
-			}
-			
-			result.segmentsFileName = segmentsFileName;
-			result.numSegments = numSegments;
-			result.segmentFormat = sFormat;
-			result.userData = sis.UserData;
-			System.String userDataString;
-			if (sis.UserData.Count > 0)
-			{
-				userDataString = " userData=" + CollectionsHelper.CollectionToString(sis.UserData);
-			}
-			else
-			{
-				userDataString = "";
-			}
-			
-			Msg("Segments file=" + segmentsFileName + " numSegments=" + numSegments + " version=" + sFormat + userDataString);
-			
-			if (onlySegments != null)
-			{
-				result.partial = true;
-				if (infoStream != null)
-					infoStream.Write("\nChecking only these segments:");
+            SegmentInfos sis = new SegmentInfos();
+            Status result = new Status();
+            result.dir = dir;
+            try
+            {
+                sis.Read(dir);
+            }
+            catch (System.Exception t)
+            {
+                Msg("ERROR: could not read any segments file in directory");
+                result.missingSegments = true;
+                if (infoStream != null)
+                    infoStream.WriteLine(t.StackTrace);
+                return result;
+            }
+            
+            int numSegments = sis.Count;
+            var segmentsFileName = sis.GetCurrentSegmentFileName();
+            IndexInput input = null;
+            try
+            {
+                input = dir.OpenInput(segmentsFileName);
+            }
+            catch (System.Exception t)
+            {
+                Msg("ERROR: could not open segments file in directory");
+                if (infoStream != null)
+                    infoStream.WriteLine(t.StackTrace);
+                result.cantOpenSegments = true;
+                return result;
+            }
+            int format = 0;
+            try
+            {
+                format = input.ReadInt();
+            }
+            catch (System.Exception t)
+            {
+                Msg("ERROR: could not read segment file version in directory");
+                if (infoStream != null)
+                    infoStream.WriteLine(t.StackTrace);
+                result.missingSegmentVersion = true;
+                return result;
+            }
+            finally
+            {
+                if (input != null)
+                    input.Close();
+            }
+            
+            System.String sFormat = "";
+            bool skip = false;
+            
+            if (format == SegmentInfos.FORMAT)
+                sFormat = "FORMAT [Lucene Pre-2.1]";
+            if (format == SegmentInfos.FORMAT_LOCKLESS)
+                sFormat = "FORMAT_LOCKLESS [Lucene 2.1]";
+            else if (format == SegmentInfos.FORMAT_SINGLE_NORM_FILE)
+                sFormat = "FORMAT_SINGLE_NORM_FILE [Lucene 2.2]";
+            else if (format == SegmentInfos.FORMAT_SHARED_DOC_STORE)
+                sFormat = "FORMAT_SHARED_DOC_STORE [Lucene 2.3]";
+            else
+            {
+                if (format == SegmentInfos.FORMAT_CHECKSUM)
+                    sFormat = "FORMAT_CHECKSUM [Lucene 2.4]";
+                else if (format == SegmentInfos.FORMAT_DEL_COUNT)
+                    sFormat = "FORMAT_DEL_COUNT [Lucene 2.4]";
+                else if (format == SegmentInfos.FORMAT_HAS_PROX)
+                    sFormat = "FORMAT_HAS_PROX [Lucene 2.4]";
+                else if (format == SegmentInfos.FORMAT_USER_DATA)
+                    sFormat = "FORMAT_USER_DATA [Lucene 2.9]";
+                else if (format == SegmentInfos.FORMAT_DIAGNOSTICS)
+                    sFormat = "FORMAT_DIAGNOSTICS [Lucene 2.9]";
+                else if (format < SegmentInfos.CURRENT_FORMAT)
+                {
+                    sFormat = "int=" + format + " [newer version of Lucene than this tool]";
+                    skip = true;
+                }
+                else
+                {
+                    sFormat = format + " [Lucene 1.3 or prior]";
+                }
+            }
+            
+            result.segmentsFileName = segmentsFileName;
+            result.numSegments = numSegments;
+            result.segmentFormat = sFormat;
+            result.userData = sis.UserData;
+            System.String userDataString;
+            if (sis.UserData.Count > 0)
+            {
+                userDataString = " userData=" + CollectionsHelper.CollectionToString(sis.UserData);
+            }
+            else
+            {
+                userDataString = "";
+            }
+            
+            Msg("Segments file=" + segmentsFileName + " numSegments=" + numSegments + " version=" + sFormat + userDataString);
+            
+            if (onlySegments != null)
+            {
+                result.partial = true;
+                if (infoStream != null)
+                    infoStream.Write("\nChecking only these segments:");
                 foreach(string s in onlySegments)
-				{
-					if (infoStream != null)
-					{
-						infoStream.Write(" " + s);
-					}
-				}
+                {
+                    if (infoStream != null)
+                    {
+                        infoStream.Write(" " + s);
+                    }
+                }
                 result.segmentsChecked.AddRange(onlySegments);
                 Msg(":");
-			}
-			
-			if (skip)
-			{
-				Msg("\nERROR: this index appears to be created by a newer version of Lucene than this tool was compiled on; please re-compile this tool on the matching version of Lucene; exiting");
-				result.toolOutOfDate = true;
-				return result;
-			}
-			
-			
-			result.newSegments = (SegmentInfos) sis.Clone();
-			result.newSegments.Clear();
-			
-			for (int i = 0; i < numSegments; i++)
-			{
-				SegmentInfo info = sis.Info(i);
-				if (onlySegments != null && !onlySegments.Contains(info.name))
-					continue;
-				var segInfoStat = new Status.SegmentInfoStatus();
-				result.segmentInfos.Add(segInfoStat);
-				Msg("  " + (1 + i) + " of " + numSegments + ": name=" + info.name + " docCount=" + info.docCount);
-				segInfoStat.name = info.name;
-				segInfoStat.docCount = info.docCount;
-				
-				int toLoseDocCount = info.docCount;
-				
-				SegmentReader reader = null;
-				
-				try
-				{
-					Msg("    compound=" + info.GetUseCompoundFile());
-					segInfoStat.compound = info.GetUseCompoundFile();
-					Msg("    hasProx=" + info.HasProx);
-					segInfoStat.hasProx = info.HasProx;
-					Msg("    numFiles=" + info.Files().Count);
-					segInfoStat.numFiles = info.Files().Count;
-					Msg(System.String.Format(nf, "    size (MB)={0:f}", new System.Object[] { (info.SizeInBytes() / (1024.0 * 1024.0)) }));
-					segInfoStat.sizeMB = info.SizeInBytes() / (1024.0 * 1024.0);
+            }
+            
+            if (skip)
+            {
+                Msg("\nERROR: this index appears to be created by a newer version of Lucene than this tool was compiled on; please re-compile this tool on the matching version of Lucene; exiting");
+                result.toolOutOfDate = true;
+                return result;
+            }
+            
+            
+            result.newSegments = (SegmentInfos) sis.Clone();
+            result.newSegments.Clear();
+            
+            for (int i = 0; i < numSegments; i++)
+            {
+                SegmentInfo info = sis.Info(i);
+                if (onlySegments != null && !onlySegments.Contains(info.name))
+                    continue;
+                var segInfoStat = new Status.SegmentInfoStatus();
+                result.segmentInfos.Add(segInfoStat);
+                Msg("  " + (1 + i) + " of " + numSegments + ": name=" + info.name + " docCount=" + info.docCount);
+                segInfoStat.name = info.name;
+                segInfoStat.docCount = info.docCount;
+                
+                int toLoseDocCount = info.docCount;
+                
+                SegmentReader reader = null;
+                
+                try
+                {
+                    Msg("    compound=" + info.GetUseCompoundFile());
+                    segInfoStat.compound = info.GetUseCompoundFile();
+                    Msg("    hasProx=" + info.HasProx);
+                    segInfoStat.hasProx = info.HasProx;
+                    Msg("    numFiles=" + info.Files().Count);
+                    segInfoStat.numFiles = info.Files().Count;
+                    Msg(System.String.Format(nf, "    size (MB)={0:f}", new System.Object[] { (info.SizeInBytes() / (1024.0 * 1024.0)) }));
+                    segInfoStat.sizeMB = info.SizeInBytes() / (1024.0 * 1024.0);
                     IDictionary<string, string> diagnostics = info.Diagnostics;
-					segInfoStat.diagnostics = diagnostics;
-					if (diagnostics.Count > 0)
-					{
-						Msg("    diagnostics = " + CollectionsHelper.CollectionToString(diagnostics));
-					}
-					
-					int docStoreOffset = info.DocStoreOffset;
-					if (docStoreOffset != - 1)
-					{
-						Msg("    docStoreOffset=" + docStoreOffset);
-						segInfoStat.docStoreOffset = docStoreOffset;
-						Msg("    docStoreSegment=" + info.DocStoreSegment);
-						segInfoStat.docStoreSegment = info.DocStoreSegment;
-						Msg("    docStoreIsCompoundFile=" + info.DocStoreIsCompoundFile);
-						segInfoStat.docStoreCompoundFile = info.DocStoreIsCompoundFile;
-					}
-					System.String delFileName = info.GetDelFileName();
-					if (delFileName == null)
-					{
-						Msg("    no deletions");
-						segInfoStat.hasDeletions = false;
-					}
-					else
-					{
-						Msg("    has deletions [delFileName=" + delFileName + "]");
-						segInfoStat.hasDeletions = true;
-						segInfoStat.deletionsFileName = delFileName;
-					}
-					if (infoStream != null)
-						infoStream.Write("    test: open reader.........");
-					reader = SegmentReader.Get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
-					
-					segInfoStat.openReaderPassed = true;
-					
-					int numDocs = reader.NumDocs();
-					toLoseDocCount = numDocs;
-					if (reader.HasDeletions)
-					{
-						if (reader.deletedDocs.Count() != info.GetDelCount())
-						{
-							throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
-						}
-						if (reader.deletedDocs.Count() > reader.MaxDoc)
-						{
-							throw new System.SystemException("too many deleted docs: MaxDoc=" + reader.MaxDoc + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
-						}
-						if (info.docCount - numDocs != info.GetDelCount())
-						{
-							throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
-						}
-						segInfoStat.numDeleted = info.docCount - numDocs;
-						Msg("OK [" + (segInfoStat.numDeleted) + " deleted docs]");
-					}
-					else
-					{
-						if (info.GetDelCount() != 0)
-						{
-							throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
-						}
-						Msg("OK");
-					}
-					if (reader.MaxDoc != info.docCount)
-						throw new System.SystemException("SegmentReader.MaxDoc " + reader.MaxDoc + " != SegmentInfos.docCount " + info.docCount);
-					
-					// Test getFieldNames()
-					if (infoStream != null)
-					{
-						infoStream.Write("    test: fields..............");
-					}
+                    segInfoStat.diagnostics = diagnostics;
+                    if (diagnostics.Count > 0)
+                    {
+                        Msg("    diagnostics = " + CollectionsHelper.CollectionToString(diagnostics));
+                    }
+                    
+                    int docStoreOffset = info.DocStoreOffset;
+                    if (docStoreOffset != - 1)
+                    {
+                        Msg("    docStoreOffset=" + docStoreOffset);
+                        segInfoStat.docStoreOffset = docStoreOffset;
+                        Msg("    docStoreSegment=" + info.DocStoreSegment);
+                        segInfoStat.docStoreSegment = info.DocStoreSegment;
+                        Msg("    docStoreIsCompoundFile=" + info.DocStoreIsCompoundFile);
+                        segInfoStat.docStoreCompoundFile = info.DocStoreIsCompoundFile;
+                    }
+                    System.String delFileName = info.GetDelFileName();
+                    if (delFileName == null)
+                    {
+                        Msg("    no deletions");
+                        segInfoStat.hasDeletions = false;
+                    }
+                    else
+                    {
+                        Msg("    has deletions [delFileName=" + delFileName + "]");
+                        segInfoStat.hasDeletions = true;
+                        segInfoStat.deletionsFileName = delFileName;
+                    }
+                    if (infoStream != null)
+                        infoStream.Write("    test: open reader.........");
+                    reader = SegmentReader.Get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+                    
+                    segInfoStat.openReaderPassed = true;
+                    
+                    int numDocs = reader.NumDocs();
+                    toLoseDocCount = numDocs;
+                    if (reader.HasDeletions)
+                    {
+                        if (reader.deletedDocs.Count() != info.GetDelCount())
+                        {
+                            throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
+                        }
+                        if (reader.deletedDocs.Count() > reader.MaxDoc)
+                        {
+                            throw new System.SystemException("too many deleted docs: MaxDoc=" + reader.MaxDoc + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
+                        }
+                        if (info.docCount - numDocs != info.GetDelCount())
+                        {
+                            throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
+                        }
+                        segInfoStat.numDeleted = info.docCount - numDocs;
+                        Msg("OK [" + (segInfoStat.numDeleted) + " deleted docs]");
+                    }
+                    else
+                    {
+                        if (info.GetDelCount() != 0)
+                        {
+                            throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
+                        }
+                        Msg("OK");
+                    }
+                    if (reader.MaxDoc != info.docCount)
+                        throw new System.SystemException("SegmentReader.MaxDoc " + reader.MaxDoc + " != SegmentInfos.docCount " + info.docCount);
+                    
+                    // Test getFieldNames()
+                    if (infoStream != null)
+                    {
+                        infoStream.Write("    test: fields..............");
+                    }
                     ICollection<string> fieldNames = reader.GetFieldNames(IndexReader.FieldOption.ALL);
-					Msg("OK [" + fieldNames.Count + " fields]");
-					segInfoStat.numFields = fieldNames.Count;
-					
-					// Test Field Norms
-					segInfoStat.fieldNormStatus = TestFieldNorms(fieldNames, reader);
-					
-					// Test the Term Index
-					segInfoStat.termIndexStatus = TestTermIndex(info, reader);
-					
-					// Test Stored Fields
-					segInfoStat.storedFieldStatus = TestStoredFields(info, reader, nf);
-					
-					// Test Term Vectors
-					segInfoStat.termVectorStatus = TestTermVectors(info, reader, nf);
-					
-					// Rethrow the first exception we encountered
-					//  This will cause stats for failed segments to be incremented properly
-					if (segInfoStat.fieldNormStatus.error != null)
-					{
-						throw new SystemException("Field Norm test failed");
-					}
-					else if (segInfoStat.termIndexStatus.error != null)
-					{
-						throw new SystemException("Term Index test failed");
-					}
-					else if (segInfoStat.storedFieldStatus.error != null)
-					{
-						throw new SystemException("Stored Field test failed");
-					}
-					else if (segInfoStat.termVectorStatus.error != null)
-					{
-						throw new System.SystemException("Term Vector test failed");
-					}
-					
-					Msg("");
-				}
-				catch (System.Exception t)
-				{
-					Msg("FAILED");
-					const string comment = "fixIndex() would remove reference to this segment";
-					Msg("    WARNING: " + comment + "; full exception:");
-					if (infoStream != null)
-						infoStream.WriteLine(t.StackTrace);
-					Msg("");
-					result.totLoseDocCount += toLoseDocCount;
-					result.numBadSegments++;
-					continue;
-				}
-				finally
-				{
-					if (reader != null)
-						reader.Close();
-				}
-				
-				// Keeper
-				result.newSegments.Add((SegmentInfo)info.Clone());
-			}
-			
-			if (0 == result.numBadSegments)
-			{
-				result.clean = true;
-				Msg("No problems were detected with this index.\n");
-			}
-			else
-				Msg("WARNING: " + result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents) detected");
-			
-			return result;
-		}
-		
-		/// <summary> Test field norms.</summary>
+                    Msg("OK [" + fieldNames.Count + " fields]");
+                    segInfoStat.numFields = fieldNames.Count;
+                    
+                    // Test Field Norms
+                    segInfoStat.fieldNormStatus = TestFieldNorms(fieldNames, reader);
+                    
+                    // Test the Term Index
+                    segInfoStat.termIndexStatus = TestTermIndex(info, reader);
+                    
+                    // Test Stored Fields
+                    segInfoStat.storedFieldStatus = TestStoredFields(info, reader, nf);
+                    
+                    // Test Term Vectors
+                    segInfoStat.termVectorStatus = TestTermVectors(info, reader, nf);
+                    
+                    // Rethrow the first exception we encountered
+                    //  This will cause stats for failed segments to be incremented properly
+                    if (segInfoStat.fieldNormStatus.error != null)
+                    {
+                        throw new SystemException("Field Norm test failed");
+                    }
+                    else if (segInfoStat.termIndexStatus.error != null)
+                    {
+                        throw new SystemException("Term Index test failed");
+                    }
+                    else if (segInfoStat.storedFieldStatus.error != null)
+                    {
+                        throw new SystemException("Stored Field test failed");
+                    }
+                    else if (segInfoStat.termVectorStatus.error != null)
+                    {
+                        throw new System.SystemException("Term Vector test failed");
+                    }
+                    
+                    Msg("");
+                }
+                catch (System.Exception t)
+                {
+                    Msg("FAILED");
+                    const string comment = "fixIndex() would remove reference to this segment";
+                    Msg("    WARNING: " + comment + "; full exception:");
+                    if (infoStream != null)
+                        infoStream.WriteLine(t.StackTrace);
+                    Msg("");
+                    result.totLoseDocCount += toLoseDocCount;
+                    result.numBadSegments++;
+                    continue;
+                }
+                finally
+                {
+                    if (reader != null)
+                        reader.Close();
+                }
+                
+                // Keeper
+                result.newSegments.Add((SegmentInfo)info.Clone());
+            }
+            
+            if (0 == result.numBadSegments)
+            {
+                result.clean = true;
+                Msg("No problems were detected with this index.\n");
+            }
+            else
+                Msg("WARNING: " + result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents) detected");
+            
+            return result;
+        }
+        
+        /// <summary> Test field norms.</summary>
         private Status.FieldNormStatus TestFieldNorms(IEnumerable<string> fieldNames, SegmentReader reader)
-		{
-			var status = new Status.FieldNormStatus();
-			
-			try
-			{
-				// Test Field Norms
-				if (infoStream != null)
-				{
-					infoStream.Write("    test: field norms.........");
-				}
+        {
+            var status = new Status.FieldNormStatus();
+            
+            try
+            {
+                // Test Field Norms
+                if (infoStream != null)
+                {
+                    infoStream.Write("    test: field norms.........");
+                }
 
-				var b = new byte[reader.MaxDoc];
-				foreach(string fieldName in fieldNames)
-				{
+                var b = new byte[reader.MaxDoc];
+                foreach(string fieldName in fieldNames)
+                {
                     if (reader.HasNorms(fieldName))
                     {
                         reader.Norms(fieldName, b, 0);
                         ++status.totFields;
                     }
-				}
-				
-				Msg("OK [" + status.totFields + " fields]");
-			}
-			catch (System.Exception e)
-			{
-				Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
-				status.error = e;
-				if (infoStream != null)
-				{
-					infoStream.WriteLine(e.StackTrace);
-				}
-			}
-			
-			return status;
-		}
-		
-		/// <summary> Test the term index.</summary>
-		private Status.TermIndexStatus TestTermIndex(SegmentInfo info, SegmentReader reader)
-		{
-			var status = new Status.TermIndexStatus();
-			
-			try
-			{
-				if (infoStream != null)
-				{
-					infoStream.Write("    test: terms, freq, prox...");
-				}
-				
-				TermEnum termEnum = reader.Terms();
-				TermPositions termPositions = reader.TermPositions();
-				
-				// Used only to count up # deleted docs for this term
-				var myTermDocs = new MySegmentTermDocs(reader);
-				
-				int maxDoc = reader.MaxDoc;
-				
-				while (termEnum.Next())
-				{
-					status.termCount++;
-					Term term = termEnum.Term;
-					int docFreq = termEnum.DocFreq();
-					termPositions.Seek(term);
-					int lastDoc = - 1;
-					int freq0 = 0;
-					status.totFreq += docFreq;
-					while (termPositions.Next())
-					{
-						freq0++;
-						int doc = termPositions.Doc;
-						int freq = termPositions.Freq;
-						if (doc <= lastDoc)
-						{
-							throw new System.SystemException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
-						}
-						if (doc >= maxDoc)
-						{
-							throw new System.SystemException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
-						}
-						
-						lastDoc = doc;
-						if (freq <= 0)
-						{
-							throw new System.SystemException("term " + term + ": doc " + doc + ": freq " + freq + " is out of bounds");
-						}
-						
-						int lastPos = - 1;
-						status.totPos += freq;
-						for (int j = 0; j < freq; j++)
-						{
-							int pos = termPositions.NextPosition();
-							if (pos < - 1)
-							{
-								throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " is out of bounds");
-							}
-							if (pos < lastPos)
-							{
-								throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
-							}
-						    lastPos = pos;
-						}
-					}
-					
-					// Now count how many deleted docs occurred in
-					// this term:
-					int delCount;
-					if (reader.HasDeletions)
-					{
-						myTermDocs.Seek(term);
-						while (myTermDocs.Next())
-						{
-						}
-						delCount = myTermDocs.delCount;
-					}
-					else
-					{
-						delCount = 0;
-					}
-					
-					if (freq0 + delCount != docFreq)
-					{
-						throw new System.SystemException("term " + term + " docFreq=" + docFreq + " != num docs seen " + freq0 + " + num docs deleted " + delCount);
-					}
-				}
-				
-				Msg("OK [" + status.termCount + " terms; " + status.totFreq + " terms/docs pairs; " + status.totPos + " tokens]");
-			}
-			catch (System.Exception e)
-			{
-				Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
-				status.error = e;
-				if (infoStream != null)
-				{
-					infoStream.WriteLine(e.StackTrace);
-				}
-			}
-			
-			return status;
-		}
-		
-		/// <summary> Test stored fields for a segment.</summary>
-		private Status.StoredFieldStatus TestStoredFields(SegmentInfo info, SegmentReader reader, System.Globalization.NumberFormatInfo format)
-		{
-			var status = new Status.StoredFieldStatus();
-			
-			try
-			{
-				if (infoStream != null)
-				{
-					infoStream.Write("    test: stored fields.......");
-				}
-				
-				// Scan stored fields for all documents
-				for (int j = 0; j < info.docCount; ++j)
-				{
-					if (!reader.IsDeleted(j))
-					{
-						status.docCount++;
-						Document doc = reader.Document(j);
-						status.totFields += doc.GetFields().Count;
-					}
-				}
-				
-				// Validate docCount
-				if (status.docCount != reader.NumDocs())
-				{
-					throw new System.SystemException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
-				}
-				
+                }
+                
+                Msg("OK [" + status.totFields + " fields]");
+            }
+            catch (System.Exception e)
+            {
+                Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                status.error = e;
+                if (infoStream != null)
+                {
+                    infoStream.WriteLine(e.StackTrace);
+                }
+            }
+            
+            return status;
+        }
+        
+        /// <summary> Test the term index.</summary>
+        private Status.TermIndexStatus TestTermIndex(SegmentInfo info, SegmentReader reader)
+        {
+            var status = new Status.TermIndexStatus();
+            
+            try
+            {
+                if (infoStream != null)
+                {
+                    infoStream.Write("    test: terms, freq, prox...");
+                }
+                
+                TermEnum termEnum = reader.Terms();
+                TermPositions termPositions = reader.TermPositions();
+                
+                // Used only to count up # deleted docs for this term
+                var myTermDocs = new MySegmentTermDocs(reader);
+                
+                int maxDoc = reader.MaxDoc;
+                
+                while (termEnum.Next())
+                {
+                    status.termCount++;
+                    Term term = termEnum.Term;
+                    int docFreq = termEnum.DocFreq();
+                    termPositions.Seek(term);
+                    int lastDoc = - 1;
+                    int freq0 = 0;
+                    status.totFreq += docFreq;
+                    while (termPositions.Next())
+                    {
+                        freq0++;
+                        int doc = termPositions.Doc;
+                        int freq = termPositions.Freq;
+                        if (doc <= lastDoc)
+                        {
+                            throw new System.SystemException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
+                        }
+                        if (doc >= maxDoc)
+                        {
+                            throw new System.SystemException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
+                        }
+                        
+                        lastDoc = doc;
+                        if (freq <= 0)
+                        {
+                            throw new System.SystemException("term " + term + ": doc " + doc + ": freq " + freq + " is out of bounds");
+                        }
+                        
+                        int lastPos = - 1;
+                        status.totPos += freq;
+                        for (int j = 0; j < freq; j++)
+                        {
+                            int pos = termPositions.NextPosition();
+                            if (pos < - 1)
+                            {
+                                throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " is out of bounds");
+                            }
+                            if (pos < lastPos)
+                            {
+                                throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
+                            }
+                            lastPos = pos;
+                        }
+                    }
+                    
+                    // Now count how many deleted docs occurred in
+                    // this term:
+                    int delCount;
+                    if (reader.HasDeletions)
+                    {
+                        myTermDocs.Seek(term);
+                        while (myTermDocs.Next())
+                        {
+                        }
+                        delCount = myTermDocs.delCount;
+                    }
+                    else
+                    {
+                        delCount = 0;
+                    }
+                    
+                    if (freq0 + delCount != docFreq)
+                    {
+                        throw new System.SystemException("term " + term + " docFreq=" + docFreq + " != num docs seen " + freq0 + " + num docs deleted " + delCount);
+                    }
+                }
+                
+                Msg("OK [" + status.termCount + " terms; " + status.totFreq + " terms/docs pairs; " + status.totPos + " tokens]");
+            }
+            catch (System.Exception e)
+            {
+                Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                status.error = e;
+                if (infoStream != null)
+                {
+                    infoStream.WriteLine(e.StackTrace);
+                }
+            }
+            
+            return status;
+        }
+        
+        /// <summary> Test stored fields for a segment.</summary>
+        private Status.StoredFieldStatus TestStoredFields(SegmentInfo info, SegmentReader reader, System.Globalization.NumberFormatInfo format)
+        {
+            var status = new Status.StoredFieldStatus();
+            
+            try
+            {
+                if (infoStream != null)
+                {
+                    infoStream.Write("    test: stored fields.......");
+                }
+                
+                // Scan stored fields for all documents
+                for (int j = 0; j < info.docCount; ++j)
+                {
+                    if (!reader.IsDeleted(j))
+                    {
+                        status.docCount++;
+                        Document doc = reader.Document(j);
+                        status.totFields += doc.GetFields().Count;
+                    }
+                }
+                
+                // Validate docCount
+                if (status.docCount != reader.NumDocs())
+                {
+                    throw new System.SystemException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
+                }
+                
                 Msg(string.Format(format, "OK [{0:d} total field count; avg {1:f} fields per doc]", new object[] { status.totFields, (((float) status.totFields) / status.docCount) }));
             }
-			catch (System.Exception e)
-			{
-				Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
-				status.error = e;
-				if (infoStream != null)
-				{
-					infoStream.WriteLine(e.StackTrace);
-				}
-			}
-			
-			return status;
-		}
-		
-		/// <summary> Test term vectors for a segment.</summary>
+            catch (System.Exception e)
+            {
+                Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                status.error = e;
+                if (infoStream != null)
+                {
+                    infoStream.WriteLine(e.StackTrace);
+                }
+            }
+            
+            return status;
+        }
+        
+        /// <summary> Test term vectors for a segment.</summary>
         private Status.TermVectorStatus TestTermVectors(SegmentInfo info, SegmentReader reader, System.Globalization.NumberFormatInfo format)
-		{
-			var status = new Status.TermVectorStatus();
-			
-			try
-			{
-				if (infoStream != null)
-				{
-					infoStream.Write("    test: term vectors........");
-				}
-				
-				for (int j = 0; j < info.docCount; ++j)
-				{
-					if (!reader.IsDeleted(j))
-					{
-						status.docCount++;
-						ITermFreqVector[] tfv = reader.GetTermFreqVectors(j);
-						if (tfv != null)
-						{
-							status.totVectors += tfv.Length;
-						}
-					}
-				}
-				
+        {
+            var status = new Status.TermVectorStatus();
+            
+            try
+            {
+                if (infoStream != null)
+                {
+                    infoStream.Write("    test: term vectors........");
+                }
+                
+                for (int j = 0; j < info.docCount; ++j)
+                {
+                    if (!reader.IsDeleted(j))
+                    {
+                        status.docCount++;
+                        ITermFreqVector[] tfv = reader.GetTermFreqVectors(j);
+                        if (tfv != null)
+                        {
+                            status.totVectors += tfv.Length;
+                        }
+                    }
+                }
+                
                 Msg(System.String.Format(format, "OK [{0:d} total vector count; avg {1:f} term/freq vector fields per doc]", new object[] { status.totVectors, (((float) status.totVectors) / status.docCount) }));
             }
-			catch (System.Exception e)
-			{
-				Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
-				status.error = e;
-				if (infoStream != null)
-				{
-					infoStream.WriteLine(e.StackTrace);
-				}
-			}
-			
-			return status;
-		}
-		
-		/// <summary>Repairs the index using previously returned result
-		/// from <see cref="CheckIndex" />.  Note that this does not
-		/// remove any of the unreferenced files after it's done;
-		/// you must separately open an <see cref="IndexWriter" />, which
-		/// deletes unreferenced files when it's created.
-		/// 
-		/// <p/><b>WARNING</b>: this writes a
-		/// new segments file into the index, effectively removing
-		/// all documents in broken segments from the index.
-		/// BE CAREFUL.
-		/// 
-		/// <p/><b>WARNING</b>: Make sure you only call this when the
-		/// index is not opened  by any writer. 
-		/// </summary>
-		public virtual void  FixIndex(Status result)
-		{
-			if (result.partial)
-				throw new System.ArgumentException("can only fix an index that was fully checked (this status checked a subset of segments)");
-			result.newSegments.Commit(result.dir);
-		}
-		
-		private static bool assertsOn;
-		
-		private static bool TestAsserts()
-		{
-			assertsOn = true;
-			return true;
-		}
-		
-		private static bool AssertsOn()
-		{
-			System.Diagnostics.Debug.Assert(TestAsserts());
-			return assertsOn;
-		}
-		
-		/// <summary>Command-line interface to check and fix an index.
-		/// <p/>
-		/// Run it like this:
+            catch (System.Exception e)
+            {
+                Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                status.error = e;
+                if (infoStream != null)
+                {
+                    infoStream.WriteLine(e.StackTrace);
+                }
+            }
+            
+            return status;
+        }
+        
+        /// <summary>Repairs the index using previously returned result
+        /// from <see cref="CheckIndex" />.  Note that this does not
+        /// remove any of the unreferenced files after it's done;
+        /// you must separately open an <see cref="IndexWriter" />, which
+        /// deletes unreferenced files when it's created.
+        /// 
+        /// <p/><b>WARNING</b>: this writes a
+        /// new segments file into the index, effectively removing
+        /// all documents in broken segments from the index.
+        /// BE CAREFUL.
+        /// 
+        /// <p/><b>WARNING</b>: Make sure you only call this when the
+        /// index is not opened  by any writer. 
+        /// </summary>
+        public virtual void  FixIndex(Status result)
+        {
+            if (result.partial)
+                throw new System.ArgumentException("can only fix an index that was fully checked (this status checked a subset of segments)");
+            result.newSegments.Commit(result.dir);
+        }
+        
+        private static bool assertsOn;
+        
+        private static bool TestAsserts()
+        {
+            assertsOn = true;
+            return true;
+        }
+        
+        private static bool AssertsOn()
+        {
+            System.Diagnostics.Debug.Assert(TestAsserts());
+            return assertsOn;
+        }
+        
+        /// <summary>Command-line interface to check and fix an index.
+        /// <p/>
+        /// Run it like this:
         /// <code>
-		/// java -ea:Lucene.Net... Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]
+        /// java -ea:Lucene.Net... Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]
         /// </code>
-		/// <list type="bullet">
-		/// <item><c>-fix</c>: actually write a new segments_N file, removing any problematic segments</item>
-		/// <item><c>-segment X</c>: only check the specified
-		/// segment(s).  This can be specified multiple times,
-		/// to check more than one segment, eg <c>-segment _2
-		/// -segment _a</c>.  You can't use this with the -fix
-		/// option.</item>
-		/// </list>
-		/// <p/><b>WARNING</b>: <c>-fix</c> should only be used on an emergency basis as it will cause
-		/// documents (perhaps many) to be permanently removed from the index.  Always make
-		/// a backup copy of your index before running this!  Do not run this tool on an index
-		/// that is actively being written to.  You have been warned!
-		/// <p/>                Run without -fix, this tool will open the index, report version information
-		/// and report any exceptions it hits and what action it would take if -fix were
-		/// specified.  With -fix, this tool will remove any segments that have issues and
-		/// write a new segments_N file.  This means all documents contained in the affected
-		/// segments will be removed.
-		/// <p/>
-		/// This tool exits with exit code 1 if the index cannot be opened or has any
-		/// corruption, else 0.
-		/// </summary>
-		[STAThread]
-		public static void  Main(System.String[] args)
-		{
-			
-			bool doFix = false;
-			var onlySegments = new List<string>();
-			System.String indexPath = null;
-			int i = 0;
-			while (i < args.Length)
-			{
-				if (args[i].Equals("-fix"))
-				{
-					doFix = true;
-					i++;
-				}
-				else if (args[i].Equals("-segment"))
-				{
-					if (i == args.Length - 1)
-					{
-						System.Console.Out.WriteLine("ERROR: missing name for -segment option");
-						System.Environment.Exit(1);
-					}
-					onlySegments.Add(args[i + 1]);
-					i += 2;
-				}
-				else
-				{
-					if (indexPath != null)
-					{
-						System.Console.Out.WriteLine("ERROR: unexpected extra argument '" + args[i] + "'");
-						System.Environment.Exit(1);
-					}
-					indexPath = args[i];
-					i++;
-				}
-			}
-			
-			if (indexPath == null)
-			{
-				System.Console.Out.WriteLine("\nERROR: index path not specified");
-				System.Console.Out.WriteLine("\nUsage: java Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]\n" + "\n" + "  -fix: actually write a new segments_N file, removing any problematic segments\n" + "  -segment X: only check the specified segments.  This can be specified multiple\n" + "              times, to check more than one segment, eg '-segment _2 -segment _a'.\n" + "              You can't use this with the -fix option\n" + "\n" + "**WARNING**: -fix should only be used on an emergency basis as it will cause\n" + "documents (perhaps many) to be permanently removed from the index.  Always make\n" + "a backup copy of your index before running this!  Do not run this tool on an index\n" + "that is actively being written to.  You have been warned!\n" + "\n" + "Run without -fix, this tool will open the index, report version information\n" + "and report any exceptions it hits and what action it would take if -fix were\n" + "specified.  With -fix, this tool will re
 move any segments that have issues and\n" + "write a new segments_N file.  This means all documents contained in the affected\n" + "segments will be removed.\n" + "\n" + "This tool exits with exit code 1 if the index cannot be opened or has any\n" + "corruption, else 0.\n");
-				System.Environment.Exit(1);
-			}
-			
-			if (!AssertsOn())
-				System.Console.Out.WriteLine("\nNOTE: testing will be more thorough if you run java with '-ea:Lucene.Net...', so assertions are enabled");
-			
-			if (onlySegments.Count == 0)
-				onlySegments = null;
-			else if (doFix)
-			{
-				System.Console.Out.WriteLine("ERROR: cannot specify both -fix and -segment");
-				System.Environment.Exit(1);
-			}
-			
-			System.Console.Out.WriteLine("\nOpening index @ " + indexPath + "\n");
-			Directory dir = null;
-			try
-			{
-				dir = FSDirectory.Open(new System.IO.DirectoryInfo(indexPath));
-			}
-			catch (Exception t)
-			{
-				Console.Out.WriteLine("ERROR: could not open directory \"" + indexPath + "\"; exiting");
-				Console.Out.WriteLine(t.StackTrace);
-				Environment.Exit(1);
-			}
-			
-			var checker = new CheckIndex(dir);
-			var tempWriter = new System.IO.StreamWriter(System.Console.OpenStandardOutput(), System.Console.Out.Encoding)
-			                 	{AutoFlush = true};
-			checker.SetInfoStream(tempWriter);
-			
-			Status result = checker.CheckIndex_Renamed_Method(onlySegments);
-			if (result.missingSegments)
-			{
-				System.Environment.Exit(1);
-			}
-			
-			if (!result.clean)
-			{
-				if (!doFix)
-				{
-					System.Console.Out.WriteLine("WARNING: would write new segments file, and " + result.totLoseDocCount + " documents would be lost, if -fix were specified\n");
-				}
-				else
-				{
-					Console.Out.WriteLine("WARNING: " + result.totLoseDocCount + " documents will be lost\n");
-					Console.Out.WriteLine("NOTE: will write new segments file in 5 seconds; this will remove " + result.totLoseDocCount + " docs from the index. THIS IS YOUR LAST CHANCE TO CTRL+C!");
-					for (var s = 0; s < 5; s++)
-					{
-						System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1000));
-						System.Console.Out.WriteLine("  " + (5 - s) + "...");
-					}
-					Console.Out.WriteLine("Writing...");
-					checker.FixIndex(result);
-					Console.Out.WriteLine("OK");
-					Console.Out.WriteLine("Wrote new segments file \"" + result.newSegments.GetCurrentSegmentFileName() + "\"");
-				}
-			}
-			System.Console.Out.WriteLine("");
-			
-			int exitCode;
-			if (result != null && result.clean == true)
-				exitCode = 0;
-			else
-				exitCode = 1;
-			System.Environment.Exit(exitCode);
-		}
-	}
+        /// <list type="bullet">
+        /// <item><c>-fix</c>: actually write a new segments_N file, removing any problematic segments</item>
+        /// <item><c>-segment X</c>: only check the specified
+        /// segment(s).  This can be specified multiple times,
+        /// to check more than one segment, eg <c>-segment _2
+        /// -segment _a</c>.  You can't use this with the -fix
+        /// option.</item>
+        /// </list>
+        /// <p/><b>WARNING</b>: <c>-fix</c> should only be used on an emergency basis as it will cause
+        /// documents (perhaps many) to be permanently removed from the index.  Always make
+        /// a backup copy of your index before running this!  Do not run this tool on an index
+        /// that is actively being written to.  You have been warned!
+        /// <p/>                Run without -fix, this tool will open the index, report version information
+        /// and report any exceptions it hits and what action it would take if -fix were
+        /// specified.  With -fix, this tool will remove any segments that have issues and
+        /// write a new segments_N file.  This means all documents contained in the affected
+        /// segments will be removed.
+        /// <p/>
+        /// This tool exits with exit code 1 if the index cannot be opened or has any
+        /// corruption, else 0.
+        /// </summary>
+        [STAThread]
+        public static void  Main(System.String[] args)
+        {
+            
+            bool doFix = false;
+            var onlySegments = new List<string>();
+            System.String indexPath = null;
+            int i = 0;
+            while (i < args.Length)
+            {
+                if (args[i].Equals("-fix"))
+                {
+                    doFix = true;
+                    i++;
+                }
+                else if (args[i].Equals("-segment"))
+                {
+                    if (i == args.Length - 1)
+                    {
+                        System.Console.Out.WriteLine("ERROR: missing name for -segment option");
+                        System.Environment.Exit(1);
+                    }
+                    onlySegments.Add(args[i + 1]);
+                    i += 2;
+                }
+                else
+                {
+                    if (indexPath != null)
+                    {
+                        System.Console.Out.WriteLine("ERROR: unexpected extra argument '" + args[i] + "'");
+                        System.Environment.Exit(1);
+                    }
+                    indexPath = args[i];
+                    i++;
+                }
+            }
+            
+            if (indexPath == null)
+            {
+                System.Console.Out.WriteLine("\nERROR: index path not specified");
+                System.Console.Out.WriteLine("\nUsage: java Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]\n" + "\n" + "  -fix: actually write a new segments_N file, removing any problematic segments\n" + "  -segment X: only check the specified segments.  This can be specified multiple\n" + "              times, to check more than one segment, eg '-segment _2 -segment _a'.\n" + "              You can't use this with the -fix option\n" + "\n" + "**WARNING**: -fix should only be used on an emergency basis as it will cause\n" + "documents (perhaps many) to be permanently removed from the index.  Always make\n" + "a backup copy of your index before running this!  Do not run this tool on an index\n" + "that is actively being written to.  You have been warned!\n" + "\n" + "Run without -fix, this tool will open the index, report version information\n" + "and report any exceptions it hits and what action it would take if -fix were\n" + "specified.  With -fix, this 
 tool will remove any segments that have issues and\n" + "write a new segments_N file.  This means all documents contained in the affected\n" + "segments will be removed.\n" + "\n" + "This tool exits with exit code 1 if the index cannot be opened or has any\n" + "corruption, else 0.\n");
+                System.Environment.Exit(1);
+            }
+            
+            if (!AssertsOn())
+                System.Console.Out.WriteLine("\nNOTE: testing will be more thorough if you run java with '-ea:Lucene.Net...', so assertions are enabled");
+            
+            if (onlySegments.Count == 0)
+                onlySegments = null;
+            else if (doFix)
+            {
+                System.Console.Out.WriteLine("ERROR: cannot specify both -fix and -segment");
+                System.Environment.Exit(1);
+            }
+            
+            System.Console.Out.WriteLine("\nOpening index @ " + indexPath + "\n");
+            Directory dir = null;
+            try
+            {
+                dir = FSDirectory.Open(new System.IO.DirectoryInfo(indexPath));
+            }
+            catch (Exception t)
+            {
+                Console.Out.WriteLine("ERROR: could not open directory \"" + indexPath + "\"; exiting");
+                Console.Out.WriteLine(t.StackTrace);
+                Environment.Exit(1);
+            }
+            
+            var checker = new CheckIndex(dir);
+            var tempWriter = new System.IO.StreamWriter(System.Console.OpenStandardOutput(), System.Console.Out.Encoding)
+                                 {AutoFlush = true};
+            checker.SetInfoStream(tempWriter);
+            
+            Status result = checker.CheckIndex_Renamed_Method(onlySegments);
+            if (result.missingSegments)
+            {
+                System.Environment.Exit(1);
+            }
+            
+            if (!result.clean)
+            {
+                if (!doFix)
+                {
+                    System.Console.Out.WriteLine("WARNING: would write new segments file, and " + result.totLoseDocCount + " documents would be lost, if -fix were specified\n");
+                }
+                else
+                {
+                    Console.Out.WriteLine("WARNING: " + result.totLoseDocCount + " documents will be lost\n");
+                    Console.Out.WriteLine("NOTE: will write new segments file in 5 seconds; this will remove " + result.totLoseDocCount + " docs from the index. THIS IS YOUR LAST CHANCE TO CTRL+C!");
+                    for (var s = 0; s < 5; s++)
+                    {
+                        System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1000));
+                        System.Console.Out.WriteLine("  " + (5 - s) + "...");
+                    }
+                    Console.Out.WriteLine("Writing...");
+                    checker.FixIndex(result);
+                    Console.Out.WriteLine("OK");
+                    Console.Out.WriteLine("Wrote new segments file \"" + result.newSegments.GetCurrentSegmentFileName() + "\"");
+                }
+            }
+            System.Console.Out.WriteLine("");
+            
+            int exitCode;
+            if (result != null && result.clean == true)
+                exitCode = 0;
+            else
+                exitCode = 1;
+            System.Environment.Exit(exitCode);
+        }
+    }
 }
\ No newline at end of file


[32/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs b/src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs
index d038fde..230fd4a 100644
--- a/src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs
+++ b/src/contrib/Spatial/Prefix/Tree/QuadPrefixTree.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -28,296 +28,296 @@ namespace Lucene.Net.Spatial.Prefix.Tree
     /// Implementation of {@link SpatialPrefixTree} which uses a quad tree
     /// (http://en.wikipedia.org/wiki/Quadtree)
     /// </summary>
-	public class QuadPrefixTree : SpatialPrefixTree
-	{
+    public class QuadPrefixTree : SpatialPrefixTree
+    {
         /// <summary>
         /// Factory for creating {@link QuadPrefixTree} instances with useful defaults
         /// </summary>
-		public class Factory : SpatialPrefixTreeFactory
-		{
-			protected override int GetLevelForDistance(double degrees)
-			{
-				var grid = new QuadPrefixTree(ctx, MAX_LEVELS_POSSIBLE);
-				return grid.GetLevelForDistance(degrees);
-			}
-
-			protected override SpatialPrefixTree NewSPT()
-			{
-				return new QuadPrefixTree(ctx, maxLevels != null ? maxLevels.Value : MAX_LEVELS_POSSIBLE);
-			}
-		}
-
-		public static readonly int MAX_LEVELS_POSSIBLE = 50;//not really sure how big this should be
-
-		public static readonly int DEFAULT_MAX_LEVELS = 12;
-		private double xmin;
-		private double xmax;
-		private double ymin;
-		private double ymax;
-		private double xmid;
-		private double ymid;
-
-		private double gridW;
-		private double gridH;
-
-		double[] levelW;
-		double[] levelH;
-		int[] levelS; // side
-		int[] levelN; // number
-
-		public QuadPrefixTree(SpatialContext ctx, Rectangle bounds, int maxLevels)
-			: base(ctx, maxLevels)
-		{
-			Init(ctx, bounds, maxLevels);
-		}
-
-		public QuadPrefixTree(SpatialContext ctx)
-			: base(ctx, DEFAULT_MAX_LEVELS)
-		{
-			Init(ctx, ctx.GetWorldBounds(), DEFAULT_MAX_LEVELS);
-		}
-
-		public QuadPrefixTree(SpatialContext ctx, int maxLevels)
-			: base(ctx, maxLevels)
-		{
-			Init(ctx, ctx.GetWorldBounds(), maxLevels);
-		}
-
-		protected void Init(SpatialContext ctx, Rectangle bounds, int maxLevels)
-		{
-			this.xmin = bounds.GetMinX();
-			this.xmax = bounds.GetMaxX();
-			this.ymin = bounds.GetMinY();
-			this.ymax = bounds.GetMaxY();
-
-			levelW = new double[maxLevels];
-			levelH = new double[maxLevels];
-			levelS = new int[maxLevels];
-			levelN = new int[maxLevels];
-
-			gridW = xmax - xmin;
-			gridH = ymax - ymin;
-			xmid = xmin + gridW / 2.0;
-			ymid = ymin + gridH / 2.0;
-			levelW[0] = gridW / 2.0;
-			levelH[0] = gridH / 2.0;
-			levelS[0] = 2;
-			levelN[0] = 4;
-
-			for (int i = 1; i < levelW.Length; i++)
-			{
-				levelW[i] = levelW[i - 1] / 2.0;
-				levelH[i] = levelH[i - 1] / 2.0;
-				levelS[i] = levelS[i - 1] * 2;
-				levelN[i] = levelN[i - 1] * 4;
-			}
-
-		}
-
-		public override int GetLevelForDistance(double dist)
-		{
+        public class Factory : SpatialPrefixTreeFactory
+        {
+            protected override int GetLevelForDistance(double degrees)
+            {
+                var grid = new QuadPrefixTree(ctx, MAX_LEVELS_POSSIBLE);
+                return grid.GetLevelForDistance(degrees);
+            }
+
+            protected override SpatialPrefixTree NewSPT()
+            {
+                return new QuadPrefixTree(ctx, maxLevels != null ? maxLevels.Value : MAX_LEVELS_POSSIBLE);
+            }
+        }
+
+        public static readonly int MAX_LEVELS_POSSIBLE = 50;//not really sure how big this should be
+
+        public static readonly int DEFAULT_MAX_LEVELS = 12;
+        private double xmin;
+        private double xmax;
+        private double ymin;
+        private double ymax;
+        private double xmid;
+        private double ymid;
+
+        private double gridW;
+        private double gridH;
+
+        double[] levelW;
+        double[] levelH;
+        int[] levelS; // side
+        int[] levelN; // number
+
+        public QuadPrefixTree(SpatialContext ctx, Rectangle bounds, int maxLevels)
+            : base(ctx, maxLevels)
+        {
+            Init(ctx, bounds, maxLevels);
+        }
+
+        public QuadPrefixTree(SpatialContext ctx)
+            : base(ctx, DEFAULT_MAX_LEVELS)
+        {
+            Init(ctx, ctx.GetWorldBounds(), DEFAULT_MAX_LEVELS);
+        }
+
+        public QuadPrefixTree(SpatialContext ctx, int maxLevels)
+            : base(ctx, maxLevels)
+        {
+            Init(ctx, ctx.GetWorldBounds(), maxLevels);
+        }
+
+        protected void Init(SpatialContext ctx, Rectangle bounds, int maxLevels)
+        {
+            this.xmin = bounds.GetMinX();
+            this.xmax = bounds.GetMaxX();
+            this.ymin = bounds.GetMinY();
+            this.ymax = bounds.GetMaxY();
+
+            levelW = new double[maxLevels];
+            levelH = new double[maxLevels];
+            levelS = new int[maxLevels];
+            levelN = new int[maxLevels];
+
+            gridW = xmax - xmin;
+            gridH = ymax - ymin;
+            xmid = xmin + gridW / 2.0;
+            ymid = ymin + gridH / 2.0;
+            levelW[0] = gridW / 2.0;
+            levelH[0] = gridH / 2.0;
+            levelS[0] = 2;
+            levelN[0] = 4;
+
+            for (int i = 1; i < levelW.Length; i++)
+            {
+                levelW[i] = levelW[i - 1] / 2.0;
+                levelH[i] = levelH[i - 1] / 2.0;
+                levelS[i] = levelS[i - 1] * 2;
+                levelN[i] = levelN[i - 1] * 4;
+            }
+
+        }
+
+        public override int GetLevelForDistance(double dist)
+        {
             if (dist == 0)//short circuit
                 return maxLevels;
             for (int i = 0; i < maxLevels - 1; i++)
-			{
-				//note: level[i] is actually a lookup for level i+1
+            {
+                //note: level[i] is actually a lookup for level i+1
                 if (dist > levelW[i] && dist > levelH[i])
-				{
-					return i + 1;
-				}
-			}
-			return maxLevels;
-		}
-
-		protected override Node GetNode(Point p, int level)
-		{
-			var cells = new List<Node>(1);
+                {
+                    return i + 1;
+                }
+            }
+            return maxLevels;
+        }
+
+        protected override Node GetNode(Point p, int level)
+        {
+            var cells = new List<Node>(1);
             Build(xmid, ymid, 0, cells, new StringBuilder(), ctx.MakePoint(p.GetX(), p.GetY()), level);
-			return cells[0];//note cells could be longer if p on edge
-		}
-
-		public override Node GetNode(string token)
-		{
-			return new QuadCell(token, this);
-		}
-
-		public override Node GetNode(byte[] bytes, int offset, int len)
-		{
-			throw new System.NotImplementedException();
-		}
-
-		public override IList<Node> GetNodes(Shape shape, int detailLevel, bool inclParents)
-		{
-			var point = shape as Point;
-			if (point != null)
-				return base.GetNodesAltPoint(point, detailLevel, inclParents);
-			else
-				return base.GetNodes(shape, detailLevel, inclParents);
-		}
-
-		private void Build(double x, double y, int level, List<Node> matches, StringBuilder str, Shape shape, int maxLevel)
-		{
-			Debug.Assert(str.Length == level);
-			double w = levelW[level] / 2;
-			double h = levelH[level] / 2;
-
-			// Z-Order
-			// http://en.wikipedia.org/wiki/Z-order_%28curve%29
-			CheckBattenberg('A', x - w, y + h, level, matches, str, shape, maxLevel);
-			CheckBattenberg('B', x + w, y + h, level, matches, str, shape, maxLevel);
-			CheckBattenberg('C', x - w, y - h, level, matches, str, shape, maxLevel);
-			CheckBattenberg('D', x + w, y - h, level, matches, str, shape, maxLevel);
-
-			// possibly consider hilbert curve
-			// http://en.wikipedia.org/wiki/Hilbert_curve
-			// http://blog.notdot.net/2009/11/Damn-Cool-Algorithms-Spatial-indexing-with-Quadtrees-and-Hilbert-Curves
-			// if we actually use the range property in the query, this could be useful
-		}
-
-		private void CheckBattenberg(
-			char c,
-			double cx,
-			double cy,
-			int level,
-			List<Node> matches,
-			StringBuilder str,
-			Shape shape,
-			int maxLevel)
-		{
-			Debug.Assert(str.Length == level);
-			double w = levelW[level] / 2;
-			double h = levelH[level] / 2;
-
-			int strlen = str.Length;
+            return cells[0];//note cells could be longer if p on edge
+        }
+
+        public override Node GetNode(string token)
+        {
+            return new QuadCell(token, this);
+        }
+
+        public override Node GetNode(byte[] bytes, int offset, int len)
+        {
+            throw new System.NotImplementedException();
+        }
+
+        public override IList<Node> GetNodes(Shape shape, int detailLevel, bool inclParents)
+        {
+            var point = shape as Point;
+            if (point != null)
+                return base.GetNodesAltPoint(point, detailLevel, inclParents);
+            else
+                return base.GetNodes(shape, detailLevel, inclParents);
+        }
+
+        private void Build(double x, double y, int level, List<Node> matches, StringBuilder str, Shape shape, int maxLevel)
+        {
+            Debug.Assert(str.Length == level);
+            double w = levelW[level] / 2;
+            double h = levelH[level] / 2;
+
+            // Z-Order
+            // http://en.wikipedia.org/wiki/Z-order_%28curve%29
+            CheckBattenberg('A', x - w, y + h, level, matches, str, shape, maxLevel);
+            CheckBattenberg('B', x + w, y + h, level, matches, str, shape, maxLevel);
+            CheckBattenberg('C', x - w, y - h, level, matches, str, shape, maxLevel);
+            CheckBattenberg('D', x + w, y - h, level, matches, str, shape, maxLevel);
+
+            // possibly consider hilbert curve
+            // http://en.wikipedia.org/wiki/Hilbert_curve
+            // http://blog.notdot.net/2009/11/Damn-Cool-Algorithms-Spatial-indexing-with-Quadtrees-and-Hilbert-Curves
+            // if we actually use the range property in the query, this could be useful
+        }
+
+        private void CheckBattenberg(
+            char c,
+            double cx,
+            double cy,
+            int level,
+            List<Node> matches,
+            StringBuilder str,
+            Shape shape,
+            int maxLevel)
+        {
+            Debug.Assert(str.Length == level);
+            double w = levelW[level] / 2;
+            double h = levelH[level] / 2;
+
+            int strlen = str.Length;
             Rectangle rectangle = ctx.MakeRectangle(cx - w, cx + w, cy - h, cy + h);
             SpatialRelation v = shape.Relate(rectangle);
-			if (SpatialRelation.CONTAINS == v)
-			{
-				str.Append(c);
-				//str.append(SpatialPrefixGrid.COVER);
-				matches.Add(new QuadCell(str.ToString(), v.Transpose(), this));
-			}
-			else if (SpatialRelation.DISJOINT == v)
-			{
-				// nothing
-			}
-			else
-			{ // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS
-				str.Append(c);
-
-				int nextLevel = level + 1;
-				if (nextLevel >= maxLevel)
-				{
-					//str.append(SpatialPrefixGrid.INTERSECTS);
-					matches.Add(new QuadCell(str.ToString(), v.Transpose(), this));
-				}
-				else
-				{
-					Build(cx, cy, nextLevel, matches, str, shape, maxLevel);
-				}
-			}
-			str.Length = strlen;
-		}
-
-		public class QuadCell : Node
-		{
-
-			public QuadCell(String token, QuadPrefixTree enclosingInstance)
-				: base(enclosingInstance, token)
-			{
-			}
-
-			public QuadCell(String token, SpatialRelation shapeRel, QuadPrefixTree enclosingInstance)
-				: base(enclosingInstance, token)
-			{
-				this.shapeRel = shapeRel;
-			}
-
-			public override void Reset(string newToken)
-			{
-				base.Reset(newToken);
-				shape = null;
-			}
-
-			public override IList<Node> GetSubCells()
-			{
-				var tree = (QuadPrefixTree)spatialPrefixTree;
-				var cells = new List<Node>(4)
-                  	{
-                  		new QuadCell(GetTokenString() + "A", tree),
-                  		new QuadCell(GetTokenString() + "B", tree),
-                  		new QuadCell(GetTokenString() + "C", tree),
-                  		new QuadCell(GetTokenString() + "D", tree)
-                  	};
-				return cells;
-			}
-
-			public override int GetSubCellsSize()
-			{
-				return 4;
-			}
-
-			public override Node GetSubCell(Point p)
-			{
-				return ((QuadPrefixTree)spatialPrefixTree).GetNode(p, GetLevel() + 1); //not performant!
-			}
-
-			private Shape shape;//cache
-
-			public override Shape GetShape()
-			{
-				if (shape == null)
-					shape = MakeShape();
-				return shape;
-			}
-
-			private Rectangle MakeShape()
-			{
-				String token = GetTokenString();
-				var tree = ((QuadPrefixTree)spatialPrefixTree);
-				double xmin = tree.xmin;
-				double ymin = tree.ymin;
-
-				for (int i = 0; i < token.Length; i++)
-				{
-					char c = token[i];
-					if ('A' == c || 'a' == c)
-					{
-						ymin += tree.levelH[i];
-					}
-					else if ('B' == c || 'b' == c)
-					{
-						xmin += tree.levelW[i];
-						ymin += tree.levelH[i];
-					}
-					else if ('C' == c || 'c' == c)
-					{
-						// nothing really
-					}
-					else if ('D' == c || 'd' == c)
-					{
-						xmin += tree.levelW[i];
-					}
-					else
-					{
-						throw new Exception("unexpected char: " + c);
-					}
-				}
-				int len = token.Length;
-				double width, height;
-				if (len > 0)
-				{
-					width = tree.levelW[len - 1];
-					height = tree.levelH[len - 1];
-				}
-				else
-				{
-					width = tree.gridW;
-					height = tree.gridH;
-				}
+            if (SpatialRelation.CONTAINS == v)
+            {
+                str.Append(c);
+                //str.append(SpatialPrefixGrid.COVER);
+                matches.Add(new QuadCell(str.ToString(), v.Transpose(), this));
+            }
+            else if (SpatialRelation.DISJOINT == v)
+            {
+                // nothing
+            }
+            else
+            { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS
+                str.Append(c);
+
+                int nextLevel = level + 1;
+                if (nextLevel >= maxLevel)
+                {
+                    //str.append(SpatialPrefixGrid.INTERSECTS);
+                    matches.Add(new QuadCell(str.ToString(), v.Transpose(), this));
+                }
+                else
+                {
+                    Build(cx, cy, nextLevel, matches, str, shape, maxLevel);
+                }
+            }
+            str.Length = strlen;
+        }
+
+        public class QuadCell : Node
+        {
+
+            public QuadCell(String token, QuadPrefixTree enclosingInstance)
+                : base(enclosingInstance, token)
+            {
+            }
+
+            public QuadCell(String token, SpatialRelation shapeRel, QuadPrefixTree enclosingInstance)
+                : base(enclosingInstance, token)
+            {
+                this.shapeRel = shapeRel;
+            }
+
+            public override void Reset(string newToken)
+            {
+                base.Reset(newToken);
+                shape = null;
+            }
+
+            public override IList<Node> GetSubCells()
+            {
+                var tree = (QuadPrefixTree)spatialPrefixTree;
+                var cells = new List<Node>(4)
+                      {
+                          new QuadCell(GetTokenString() + "A", tree),
+                          new QuadCell(GetTokenString() + "B", tree),
+                          new QuadCell(GetTokenString() + "C", tree),
+                          new QuadCell(GetTokenString() + "D", tree)
+                      };
+                return cells;
+            }
+
+            public override int GetSubCellsSize()
+            {
+                return 4;
+            }
+
+            public override Node GetSubCell(Point p)
+            {
+                return ((QuadPrefixTree)spatialPrefixTree).GetNode(p, GetLevel() + 1); //not performant!
+            }
+
+            private Shape shape;//cache
+
+            public override Shape GetShape()
+            {
+                if (shape == null)
+                    shape = MakeShape();
+                return shape;
+            }
+
+            private Rectangle MakeShape()
+            {
+                String token = GetTokenString();
+                var tree = ((QuadPrefixTree)spatialPrefixTree);
+                double xmin = tree.xmin;
+                double ymin = tree.ymin;
+
+                for (int i = 0; i < token.Length; i++)
+                {
+                    char c = token[i];
+                    if ('A' == c || 'a' == c)
+                    {
+                        ymin += tree.levelH[i];
+                    }
+                    else if ('B' == c || 'b' == c)
+                    {
+                        xmin += tree.levelW[i];
+                        ymin += tree.levelH[i];
+                    }
+                    else if ('C' == c || 'c' == c)
+                    {
+                        // nothing really
+                    }
+                    else if ('D' == c || 'd' == c)
+                    {
+                        xmin += tree.levelW[i];
+                    }
+                    else
+                    {
+                        throw new Exception("unexpected char: " + c);
+                    }
+                }
+                int len = token.Length;
+                double width, height;
+                if (len > 0)
+                {
+                    width = tree.levelW[len - 1];
+                    height = tree.levelH[len - 1];
+                }
+                else
+                {
+                    width = tree.gridW;
+                    height = tree.gridH;
+                }
                 return spatialPrefixTree.ctx.MakeRectangle(xmin, xmin + width, ymin, ymin + height);
-			}
-		}//QuadCell
+            }
+        }//QuadCell
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs b/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
index d539b1a..908fded 100644
--- a/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
+++ b/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTree.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -26,251 +26,251 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Prefix.Tree
 {
-	/// <summary>
+    /// <summary>
     /// A spatial Prefix Tree, or Trie, which decomposes shapes into prefixed strings at variable lengths corresponding to
-	/// variable precision.  Each string corresponds to a spatial region.
-	/// 
-	/// Implementations of this class should be thread-safe and immutable once initialized. 
-	/// </summary>
-	public abstract class SpatialPrefixTree
-	{
-		protected readonly int maxLevels;
-		internal readonly SpatialContext ctx;// it's internal to allow Node to access it
+    /// variable precision.  Each string corresponds to a spatial region.
+    /// 
+    /// Implementations of this class should be thread-safe and immutable once initialized. 
+    /// </summary>
+    public abstract class SpatialPrefixTree
+    {
+        protected readonly int maxLevels;
+        internal readonly SpatialContext ctx;// it's internal to allow Node to access it
 
-		protected SpatialPrefixTree(SpatialContext ctx, int maxLevels)
-		{
-			Debug.Assert(maxLevels > 0);
-			this.ctx = ctx;
-			this.maxLevels = maxLevels;
-		}
+        protected SpatialPrefixTree(SpatialContext ctx, int maxLevels)
+        {
+            Debug.Assert(maxLevels > 0);
+            this.ctx = ctx;
+            this.maxLevels = maxLevels;
+        }
 
-		public SpatialContext GetSpatialContext()
-		{
-			return ctx;
-		}
+        public SpatialContext GetSpatialContext()
+        {
+            return ctx;
+        }
 
-		public int GetMaxLevels()
-		{
-			return maxLevels;
-		}
+        public int GetMaxLevels()
+        {
+            return maxLevels;
+        }
 
-		public override String ToString()
-		{
-			return GetType().Name + "(maxLevels:" + maxLevels + ",ctx:" + ctx + ")";
-		}
+        public override String ToString()
+        {
+            return GetType().Name + "(maxLevels:" + maxLevels + ",ctx:" + ctx + ")";
+        }
 
-	    /// <summary>
-	    /// Returns the level of the largest grid in which its longest side is less
-	    /// than or equal to the provided distance (in degrees). Consequently {@link
-	    /// dist} acts as an error epsilon declaring the amount of detail needed in the
-	    /// grid, such that you can get a grid with just the right amount of
-	    /// precision.
-	    /// </summary>
+        /// <summary>
+        /// Returns the level of the largest grid in which its longest side is less
+        /// than or equal to the provided distance (in degrees). Consequently {@link
+        /// dist} acts as an error epsilon declaring the amount of detail needed in the
+        /// grid, such that you can get a grid with just the right amount of
+        /// precision.
+        /// </summary>
         /// <param name="dist">>= 0</param>
         /// <returns>level [1 to maxLevels]</returns>
-		public abstract int GetLevelForDistance(double dist);
+        public abstract int GetLevelForDistance(double dist);
 
-		//TODO double getDistanceForLevel(int level)
+        //TODO double getDistanceForLevel(int level)
 
-		//[NotSerialized]
-		private Node worldNode;//cached
+        //[NotSerialized]
+        private Node worldNode;//cached
 
-		/*
-		 * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #getNode(String)} with "".
-		 * This cell is threadsafe, just like a spatial prefix grid is, although cells aren't
-		 * generally threadsafe.
-		 * TODO rename to getTopCell or is this fine?
-		 */
-		public Node GetWorldNode()
-		{
-			if (worldNode == null)
-			{
-				worldNode = GetNode("");
-			}
-			return worldNode;
-		}
+        /*
+         * Returns the level 0 cell which encompasses all spatial data. Equivalent to {@link #getNode(String)} with "".
+         * This cell is threadsafe, just like a spatial prefix grid is, although cells aren't
+         * generally threadsafe.
+         * TODO rename to getTopCell or is this fine?
+         */
+        public Node GetWorldNode()
+        {
+            if (worldNode == null)
+            {
+                worldNode = GetNode("");
+            }
+            return worldNode;
+        }
 
-		/*
-		 * The cell for the specified token. The empty string should be equal to {@link #getWorldNode()}.
-		 * Precondition: Never called when token length > maxLevel.
-		 */
-		public abstract Node GetNode(String token);
+        /*
+         * The cell for the specified token. The empty string should be equal to {@link #getWorldNode()}.
+         * Precondition: Never called when token length > maxLevel.
+         */
+        public abstract Node GetNode(String token);
 
-		public abstract Node GetNode(byte[] bytes, int offset, int len);
+        public abstract Node GetNode(byte[] bytes, int offset, int len);
 
-		//public Node GetNode(byte[] bytes, int offset, int len, Node target)
-		//{
-		//    if (target == null)
-		//    {
-		//        return GetNode(bytes, offset, len);
-		//    }
+        //public Node GetNode(byte[] bytes, int offset, int len, Node target)
+        //{
+        //    if (target == null)
+        //    {
+        //        return GetNode(bytes, offset, len);
+        //    }
 
-		//    target.Reset(bytes, offset, len);
-		//    return target;
-		//}
+        //    target.Reset(bytes, offset, len);
+        //    return target;
+        //}
 
-		public Node GetNode(string token, Node target)
-		{
-			if (target == null)
-			{
-				return GetNode(token);
-			}
+        public Node GetNode(string token, Node target)
+        {
+            if (target == null)
+            {
+                return GetNode(token);
+            }
 
-			target.Reset(token);
-			return target;
-		}
+            target.Reset(token);
+            return target;
+        }
 
-		protected virtual Node GetNode(Point p, int level)
-		{
-			return GetNodes(p, level, false).ElementAt(0);
-		}
+        protected virtual Node GetNode(Point p, int level)
+        {
+            return GetNodes(p, level, false).ElementAt(0);
+        }
 
-		/*
-		 * Gets the intersecting & including cells for the specified shape, without exceeding detail level.
-		 * The result is a set of cells (no dups), sorted. Unmodifiable.
-		 * <p/>
-		 * This implementation checks if shape is a Point and if so uses an implementation that
-		 * recursively calls {@link Node#getSubCell(com.spatial4j.core.shape.Point)}. Cell subclasses
-		 * ideally implement that method with a quick implementation, otherwise, subclasses should
-		 * override this method to invoke {@link #getNodesAltPoint(com.spatial4j.core.shape.Point, int, boolean)}.
-		 * TODO consider another approach returning an iterator -- won't build up all cells in memory.
-		 */
-		public virtual IList<Node> GetNodes(Shape shape, int detailLevel, bool inclParents)
-		{
-			if (detailLevel > maxLevels)
-			{
-				throw new ArgumentException("detailLevel > maxLevels", "detailLevel");
-			}
+        /*
+         * Gets the intersecting & including cells for the specified shape, without exceeding detail level.
+         * The result is a set of cells (no dups), sorted. Unmodifiable.
+         * <p/>
+         * This implementation checks if shape is a Point and if so uses an implementation that
+         * recursively calls {@link Node#getSubCell(com.spatial4j.core.shape.Point)}. Cell subclasses
+         * ideally implement that method with a quick implementation, otherwise, subclasses should
+         * override this method to invoke {@link #getNodesAltPoint(com.spatial4j.core.shape.Point, int, boolean)}.
+         * TODO consider another approach returning an iterator -- won't build up all cells in memory.
+         */
+        public virtual IList<Node> GetNodes(Shape shape, int detailLevel, bool inclParents)
+        {
+            if (detailLevel > maxLevels)
+            {
+                throw new ArgumentException("detailLevel > maxLevels", "detailLevel");
+            }
 
-			List<Node> cells;
-			if (shape is Point)
-			{
-				//optimized point algorithm
-				int initialCapacity = inclParents ? 1 + detailLevel : 1;
-				cells = new List<Node>(initialCapacity);
-				RecursiveGetNodes(GetWorldNode(), (Point)shape, detailLevel, true, cells);
-				Debug.Assert(cells.Count == initialCapacity);
-			}
-			else
-			{
-				cells = new List<Node>(inclParents ? 1024 : 512);
-				RecursiveGetNodes(GetWorldNode(), shape, detailLevel, inclParents, cells);
-			}
-			if (inclParents)
-			{
-				Debug.Assert(cells[0].GetLevel() == 0);
-				cells.RemoveAt(0);//remove getWorldNode()
-			}
-			return cells;
-		}
+            List<Node> cells;
+            if (shape is Point)
+            {
+                //optimized point algorithm
+                int initialCapacity = inclParents ? 1 + detailLevel : 1;
+                cells = new List<Node>(initialCapacity);
+                RecursiveGetNodes(GetWorldNode(), (Point)shape, detailLevel, true, cells);
+                Debug.Assert(cells.Count == initialCapacity);
+            }
+            else
+            {
+                cells = new List<Node>(inclParents ? 1024 : 512);
+                RecursiveGetNodes(GetWorldNode(), shape, detailLevel, inclParents, cells);
+            }
+            if (inclParents)
+            {
+                Debug.Assert(cells[0].GetLevel() == 0);
+                cells.RemoveAt(0);//remove getWorldNode()
+            }
+            return cells;
+        }
 
-		private void RecursiveGetNodes(Node node, Shape shape, int detailLevel, bool inclParents, IList<Node> result)
-		{
-			if (node.IsLeaf())
-			{//cell is within shape
-				result.Add(node);
-				return;
-			}
+        private void RecursiveGetNodes(Node node, Shape shape, int detailLevel, bool inclParents, IList<Node> result)
+        {
+            if (node.IsLeaf())
+            {//cell is within shape
+                result.Add(node);
+                return;
+            }
 
-			var subCells = node.GetSubCells(shape);
-			if (node.GetLevel() == detailLevel - 1)
-			{
-				if (subCells.Count < node.GetSubCellsSize())
-				{
-					if (inclParents)
-						result.Add(node);
-					foreach (var subCell in subCells)
-					{
-						subCell.SetLeaf();
-						result.Add(subCell);
-					}
-				}
-				else
-				{//a bottom level (i.e. detail level) optimization where all boxes intersect, so use parent cell.
-					node.SetLeaf();
-					result.Add(node);
-				}
-			}
-			else
-			{
-				if (inclParents)
-				{
-					result.Add(node);
-				}
-				foreach (var subCell in subCells)
-				{
-					RecursiveGetNodes(subCell, shape, detailLevel, inclParents, result);//tail call
-				}
-			}
-		}
+            var subCells = node.GetSubCells(shape);
+            if (node.GetLevel() == detailLevel - 1)
+            {
+                if (subCells.Count < node.GetSubCellsSize())
+                {
+                    if (inclParents)
+                        result.Add(node);
+                    foreach (var subCell in subCells)
+                    {
+                        subCell.SetLeaf();
+                        result.Add(subCell);
+                    }
+                }
+                else
+                {//a bottom level (i.e. detail level) optimization where all boxes intersect, so use parent cell.
+                    node.SetLeaf();
+                    result.Add(node);
+                }
+            }
+            else
+            {
+                if (inclParents)
+                {
+                    result.Add(node);
+                }
+                foreach (var subCell in subCells)
+                {
+                    RecursiveGetNodes(subCell, shape, detailLevel, inclParents, result);//tail call
+                }
+            }
+        }
 
-		private void RecursiveGetNodes(Node node, Point point, int detailLevel, bool inclParents, IList<Node> result)
-		{
-			if (inclParents)
-			{
-				result.Add(node);
-			}
-			Node pCell = node.GetSubCell(point);
-			if (node.GetLevel() == detailLevel - 1)
-			{
-				pCell.SetLeaf();
-				result.Add(pCell);
-			}
-			else
-			{
-				RecursiveGetNodes(pCell, point, detailLevel, inclParents, result);//tail call
-			}
-		}
+        private void RecursiveGetNodes(Node node, Point point, int detailLevel, bool inclParents, IList<Node> result)
+        {
+            if (inclParents)
+            {
+                result.Add(node);
+            }
+            Node pCell = node.GetSubCell(point);
+            if (node.GetLevel() == detailLevel - 1)
+            {
+                pCell.SetLeaf();
+                result.Add(pCell);
+            }
+            else
+            {
+                RecursiveGetNodes(pCell, point, detailLevel, inclParents, result);//tail call
+            }
+        }
 
-		/*
-		 * Subclasses might override {@link #getNodes(com.spatial4j.core.shape.Shape, int, boolean)}
-		 * and check if the argument is a shape and if so, delegate
-		 * to this implementation, which calls {@link #getNode(com.spatial4j.core.shape.Point, int)} and
-		 * then calls {@link #getNode(String)} repeatedly if inclParents is true.
-		 */
-		protected virtual IList<Node> GetNodesAltPoint(Point p, int detailLevel, bool inclParents)
-		{
-			Node cell = GetNode(p, detailLevel);
-			if (!inclParents)
-			{
+        /*
+         * Subclasses might override {@link #getNodes(com.spatial4j.core.shape.Shape, int, boolean)}
+         * and check if the argument is a shape and if so, delegate
+         * to this implementation, which calls {@link #getNode(com.spatial4j.core.shape.Point, int)} and
+         * then calls {@link #getNode(String)} repeatedly if inclParents is true.
+         */
+        protected virtual IList<Node> GetNodesAltPoint(Point p, int detailLevel, bool inclParents)
+        {
+            Node cell = GetNode(p, detailLevel);
+            if (!inclParents)
+            {
 #if !NET35
-				return new ReadOnlyCollectionBuilder<Node>(new[] { cell }).ToReadOnlyCollection();
+                return new ReadOnlyCollectionBuilder<Node>(new[] { cell }).ToReadOnlyCollection();
 #else
                 return new List<Node>(new[] { cell }).AsReadOnly();
 #endif
-			}
+            }
 
-			String endToken = cell.GetTokenString();
-			Debug.Assert(endToken.Length == detailLevel);
-			var cells = new List<Node>(detailLevel);
-			for (int i = 1; i < detailLevel; i++)
-			{
-				cells.Add(GetNode(endToken.Substring(0, i)));
-			}
-			cells.Add(cell);
-			return cells;
-		}
+            String endToken = cell.GetTokenString();
+            Debug.Assert(endToken.Length == detailLevel);
+            var cells = new List<Node>(detailLevel);
+            for (int i = 1; i < detailLevel; i++)
+            {
+                cells.Add(GetNode(endToken.Substring(0, i)));
+            }
+            cells.Add(cell);
+            return cells;
+        }
 
-		/*
-		 * Will add the trailing leaf byte for leaves. This isn't particularly efficient.
-		 */
-		public static List<String> NodesToTokenStrings(Collection<Node> nodes)
-		{
-			var tokens = new List<String>((nodes.Count));
-			foreach (Node node in nodes)
-			{
-				String token = node.GetTokenString();
-				if (node.IsLeaf())
-				{
-					tokens.Add(token + (char)Node.LEAF_BYTE);
-				}
-				else
-				{
-					tokens.Add(token);
-				}
-			}
-			return tokens;
-		}
+        /*
+         * Will add the trailing leaf byte for leaves. This isn't particularly efficient.
+         */
+        public static List<String> NodesToTokenStrings(Collection<Node> nodes)
+        {
+            var tokens = new List<String>((nodes.Count));
+            foreach (Node node in nodes)
+            {
+                String token = node.GetTokenString();
+                if (node.IsLeaf())
+                {
+                    tokens.Add(token + (char)Node.LEAF_BYTE);
+                }
+                else
+                {
+                    tokens.Add(token);
+                }
+            }
+            return tokens;
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs b/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
index 1d26f3a..e4e73c6 100644
--- a/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
+++ b/src/contrib/Spatial/Prefix/Tree/SpatialPrefixTreeFactory.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -26,49 +26,49 @@ namespace Lucene.Net.Spatial.Prefix.Tree
     /// Abstract Factory for creating {@link SpatialPrefixTree} instances with useful
     /// defaults and passed on configurations defined in a Map.
     /// </summary>
-	public abstract class SpatialPrefixTreeFactory
-	{
-		private const double DEFAULT_GEO_MAX_DETAIL_KM = 0.001; //1m
+    public abstract class SpatialPrefixTreeFactory
+    {
+        private const double DEFAULT_GEO_MAX_DETAIL_KM = 0.001; //1m
         public static readonly String PREFIX_TREE = "prefixTree";
         public static readonly String MAX_LEVELS = "maxLevels";
         public static readonly String MAX_DIST_ERR = "maxDistErr";
 
-		protected Dictionary<String, String> args;
-		protected SpatialContext ctx;
-		protected int? maxLevels;
+        protected Dictionary<String, String> args;
+        protected SpatialContext ctx;
+        protected int? maxLevels;
 
-		/// <summary>
-		/// The factory  is looked up via "prefixTree" in args, expecting "geohash" or "quad".
-		/// If its neither of these, then "geohash" is chosen for a geo context, otherwise "quad" is chosen.
-		/// </summary>
-		/// <param name="args"></param>
-		/// <param name="ctx"></param>
-		/// <returns></returns>
-		public static SpatialPrefixTree MakeSPT(Dictionary<String, String> args, SpatialContext ctx)
-		{
-			SpatialPrefixTreeFactory instance;
-			String cname;
+        /// <summary>
+        /// The factory  is looked up via "prefixTree" in args, expecting "geohash" or "quad".
+        /// If its neither of these, then "geohash" is chosen for a geo context, otherwise "quad" is chosen.
+        /// </summary>
+        /// <param name="args"></param>
+        /// <param name="ctx"></param>
+        /// <returns></returns>
+        public static SpatialPrefixTree MakeSPT(Dictionary<String, String> args, SpatialContext ctx)
+        {
+            SpatialPrefixTreeFactory instance;
+            String cname;
             if (!args.TryGetValue(PREFIX_TREE, out cname) || cname == null)
-				cname = ctx.IsGeo() ? "geohash" : "quad";
-			if ("geohash".Equals(cname, StringComparison.InvariantCultureIgnoreCase))
-				instance = new GeohashPrefixTree.Factory();
-			else if ("quad".Equals(cname, StringComparison.InvariantCultureIgnoreCase))
-				instance = new QuadPrefixTree.Factory();
-			else
-			{
-				Type t = Type.GetType(cname);
-				instance = (SpatialPrefixTreeFactory)Activator.CreateInstance(t);
-			}
-			instance.Init(args, ctx);
-			return instance.NewSPT();
-		}
+                cname = ctx.IsGeo() ? "geohash" : "quad";
+            if ("geohash".Equals(cname, StringComparison.InvariantCultureIgnoreCase))
+                instance = new GeohashPrefixTree.Factory();
+            else if ("quad".Equals(cname, StringComparison.InvariantCultureIgnoreCase))
+                instance = new QuadPrefixTree.Factory();
+            else
+            {
+                Type t = Type.GetType(cname);
+                instance = (SpatialPrefixTreeFactory)Activator.CreateInstance(t);
+            }
+            instance.Init(args, ctx);
+            return instance.NewSPT();
+        }
 
-		protected void Init(Dictionary<String, String> args, SpatialContext ctx)
-		{
-			this.args = args;
-			this.ctx = ctx;
-			InitMaxLevels();
-		}
+        protected void Init(Dictionary<String, String> args, SpatialContext ctx)
+        {
+            this.args = args;
+            this.ctx = ctx;
+            InitMaxLevels();
+        }
 
         protected void InitMaxLevels()
         {
@@ -93,10 +93,10 @@ namespace Lucene.Net.Spatial.Prefix.Tree
             maxLevels = GetLevelForDistance(degrees);
         }
 
-	    /* Calls {@link SpatialPrefixTree#getLevelForDistance(double)}. */
-		protected abstract int GetLevelForDistance(double degrees);
+        /* Calls {@link SpatialPrefixTree#getLevelForDistance(double)}. */
+        protected abstract int GetLevelForDistance(double degrees);
 
-		protected abstract SpatialPrefixTree NewSPT();
+        protected abstract SpatialPrefixTree NewSPT();
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Properties/AssemblyInfo.cs b/src/contrib/Spatial/Properties/AssemblyInfo.cs
index dd324bd..a5b4d0a 100644
--- a/src/contrib/Spatial/Properties/AssemblyInfo.cs
+++ b/src/contrib/Spatial/Properties/AssemblyInfo.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Queries/SpatialArgs.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Queries/SpatialArgs.cs b/src/contrib/Spatial/Queries/SpatialArgs.cs
index 796e55a..9af4c7f 100644
--- a/src/contrib/Spatial/Queries/SpatialArgs.cs
+++ b/src/contrib/Spatial/Queries/SpatialArgs.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -23,31 +23,31 @@ using Spatial4n.Core.Shapes;
 
 namespace Spatial4n.Core.Exceptions
 {
-	[Serializable]
-	public class InvalidSpatialArgument : ArgumentException
-	{
-		public InvalidSpatialArgument(String reason)
-			: base(reason)
-		{
-		}
-	}
+    [Serializable]
+    public class InvalidSpatialArgument : ArgumentException
+    {
+        public InvalidSpatialArgument(String reason)
+            : base(reason)
+        {
+        }
+    }
 }
 
 namespace Lucene.Net.Spatial.Queries
 {
-	public class SpatialArgs
-	{
-		public static readonly double DEFAULT_DISTERRPCT = 0.025d;
+    public class SpatialArgs
+    {
+        public static readonly double DEFAULT_DISTERRPCT = 0.025d;
 
-		public SpatialOperation Operation { get; set; }
+        public SpatialOperation Operation { get; set; }
 
-	    public SpatialArgs(SpatialOperation operation, Shape shape)
-		{
+        public SpatialArgs(SpatialOperation operation, Shape shape)
+        {
             if (operation == null || shape == null)
                 throw new ArgumentException("operation and shape are required");
-			this.Operation = operation;
-			this.Shape = shape;
-		}
+            this.Operation = operation;
+            this.Shape = shape;
+        }
 
         /// <summary>
         /// Computes the distance given a shape and the {@code distErrPct}.  The
@@ -92,51 +92,51 @@ namespace Lucene.Net.Spatial.Queries
             return CalcDistanceFromErrPct(Shape, distErrPct.Value, ctx);
         }
 
-	    /// <summary>
-		/// Check if the arguments make sense -- throw an exception if not
-		/// </summary>
-		public void Validate()
-		{
-			if (Operation.IsTargetNeedsArea() && !Shape.HasArea())
-			{
+        /// <summary>
+        /// Check if the arguments make sense -- throw an exception if not
+        /// </summary>
+        public void Validate()
+        {
+            if (Operation.IsTargetNeedsArea() && !Shape.HasArea())
+            {
                 throw new ArgumentException(Operation + " only supports geometry with area");
-			}
-		}
+            }
+        }
 
-		public override String ToString()
-		{
+        public override String ToString()
+        {
             return SpatialArgsParser.WriteSpatialArgs(this);
-		}
+        }
 
-		//------------------------------------------------
-		// Getters & Setters
-		//------------------------------------------------
+        //------------------------------------------------
+        // Getters & Setters
+        //------------------------------------------------
 
-	    public Shape Shape { get; set; }
+        public Shape Shape { get; set; }
 
-	    /// <summary>
-	    /// A measure of acceptable error of the shape as a fraction. This effectively
-	    /// inflates the size of the shape but should not shrink it.
-	    /// <p/>
-	    /// The default is {@link #DEFAULT_DIST_PRECISION}
-	    /// </summary>
-	    /// <returns>0 to 0.5</returns>
-	    public double? DistErrPct
-	    {
-	        get { return distErrPct; }
-	        set
-	        {
-	            if (value != null)
-	                distErrPct = value.Value;
-	        }
-	    }
+        /// <summary>
+        /// A measure of acceptable error of the shape as a fraction. This effectively
+        /// inflates the size of the shape but should not shrink it.
+        /// <p/>
+        /// The default is {@link #DEFAULT_DIST_PRECISION}
+        /// </summary>
+        /// <returns>0 to 0.5</returns>
+        public double? DistErrPct
+        {
+            get { return distErrPct; }
+            set
+            {
+                if (value != null)
+                    distErrPct = value.Value;
+            }
+        }
         private double? distErrPct;
 
-	    /// <summary>
+        /// <summary>
         /// The acceptable error of the shape.  This effectively inflates the
         /// size of the shape but should not shrink it.
         /// </summary>
         /// <returns>>= 0</returns>
-	    public double? DistErr { get; set; }
-	}
+        public double? DistErr { get; set; }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Queries/SpatialArgsParser.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Queries/SpatialArgsParser.cs b/src/contrib/Spatial/Queries/SpatialArgsParser.cs
index 78c1b5d..6335780 100644
--- a/src/contrib/Spatial/Queries/SpatialArgsParser.cs
+++ b/src/contrib/Spatial/Queries/SpatialArgsParser.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -24,8 +24,8 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial.Queries
 {
-	public class SpatialArgsParser
-	{
+    public class SpatialArgsParser
+    {
         public const String DIST_ERR_PCT = "distErrPct";
         public const String DIST_ERR = "distErr";
 
@@ -54,58 +54,58 @@ namespace Lucene.Net.Spatial.Queries
         /// <param name="v"></param>
         /// <param name="ctx"></param>
         /// <returns></returns>
-	    public SpatialArgs Parse(String v, SpatialContext ctx)
-		{
-			int idx = v.IndexOf('(');
-			int edx = v.LastIndexOf(')');
+        public SpatialArgs Parse(String v, SpatialContext ctx)
+        {
+            int idx = v.IndexOf('(');
+            int edx = v.LastIndexOf(')');
 
-			if (idx < 0 || idx > edx)
-			{
+            if (idx < 0 || idx > edx)
+            {
                 throw new ArgumentException("missing parens: " + v);
-			}
+            }
 
-			SpatialOperation op = SpatialOperation.Get(v.Substring(0, idx).Trim());
+            SpatialOperation op = SpatialOperation.Get(v.Substring(0, idx).Trim());
 
-			//Substring in .NET is (startPosn, length), But in Java it's (startPosn, endPosn)
-			//see http://docs.oracle.com/javase/1.4.2/docs/api/java/lang/String.html#substring(int, int)
-			String body = v.Substring(idx + 1, edx - (idx + 1)).Trim();
-			if (body.Length < 1)
-			{
-				throw new ArgumentException("missing body : " + v);
-			}
+            //Substring in .NET is (startPosn, length), But in Java it's (startPosn, endPosn)
+            //see http://docs.oracle.com/javase/1.4.2/docs/api/java/lang/String.html#substring(int, int)
+            String body = v.Substring(idx + 1, edx - (idx + 1)).Trim();
+            if (body.Length < 1)
+            {
+                throw new ArgumentException("missing body : " + v);
+            }
 
             var shape = ctx.ReadShape(body);
-			var args = new SpatialArgs(op, shape);
+            var args = new SpatialArgs(op, shape);
 
-			if (v.Length > (edx + 1))
-			{
-				body = v.Substring(edx + 1).Trim();
-				if (body.Length > 0)
-				{
-					Dictionary<String, String> aa = ParseMap(body);
+            if (v.Length > (edx + 1))
+            {
+                body = v.Substring(edx + 1).Trim();
+                if (body.Length > 0)
+                {
+                    Dictionary<String, String> aa = ParseMap(body);
                     args.DistErrPct = ReadDouble(aa["distErrPct"]); aa.Remove(DIST_ERR_PCT);
                     args.DistErr = ReadDouble(aa["distErr"]); aa.Remove(DIST_ERR);
-					if (aa.Count != 0)
-					{
-						throw new ArgumentException("unused parameters: " + aa);
-					}
-				}
-			}
+                    if (aa.Count != 0)
+                    {
+                        throw new ArgumentException("unused parameters: " + aa);
+                    }
+                }
+            }
             args.Validate();
-			return args;
-		}
+            return args;
+        }
 
-		protected static double? ReadDouble(String v)
-		{
-			double val;
-			return double.TryParse(v, out val) ? val : (double?)null;
-		}
+        protected static double? ReadDouble(String v)
+        {
+            double val;
+            return double.TryParse(v, out val) ? val : (double?)null;
+        }
 
-		protected static bool ReadBool(String v, bool defaultValue)
-		{
-			bool ret;
-			return bool.TryParse(v, out ret) ? ret : defaultValue;
-		}
+        protected static bool ReadBool(String v, bool defaultValue)
+        {
+            bool ret;
+            return bool.TryParse(v, out ret) ? ret : defaultValue;
+        }
 
         /// <summary>
         /// Parses "a=b c=d f" (whitespace separated) into name-value pairs. If there
@@ -113,28 +113,28 @@ namespace Lucene.Net.Spatial.Queries
         /// </summary>
         /// <param name="body"></param>
         /// <returns></returns>
-		protected static Dictionary<String, String> ParseMap(String body)
-		{
-			var map = new Dictionary<String, String>();
-			int tokenPos = 0;
-			var st = body.Split(new[] {' ', '\n', '\t'}, StringSplitOptions.RemoveEmptyEntries);
-			while (tokenPos < st.Length)
-			{
-				String a = st[tokenPos++];
-				int idx = a.IndexOf('=');
-				if (idx > 0)
-				{
-					String k = a.Substring(0, idx);
-					String v = a.Substring(idx + 1);
-					map[k] = v;
-				}
-				else
-				{
-					map[a] = a;
-				}
-			}
-			return map;
-		}
+        protected static Dictionary<String, String> ParseMap(String body)
+        {
+            var map = new Dictionary<String, String>();
+            int tokenPos = 0;
+            var st = body.Split(new[] {' ', '\n', '\t'}, StringSplitOptions.RemoveEmptyEntries);
+            while (tokenPos < st.Length)
+            {
+                String a = st[tokenPos++];
+                int idx = a.IndexOf('=');
+                if (idx > 0)
+                {
+                    String k = a.Substring(0, idx);
+                    String v = a.Substring(idx + 1);
+                    map[k] = v;
+                }
+                else
+                {
+                    map[a] = a;
+                }
+            }
+            return map;
+        }
 
-	}
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Queries/SpatialOperation.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Queries/SpatialOperation.cs b/src/contrib/Spatial/Queries/SpatialOperation.cs
index 60a26d9..af82b7d 100644
--- a/src/contrib/Spatial/Queries/SpatialOperation.cs
+++ b/src/contrib/Spatial/Queries/SpatialOperation.cs
@@ -1,4 +1,4 @@
-/* See the NOTICE file distributed with
+/* See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
  * Esri Inc. licenses this file to You under the Apache License, Version 2.0
  * (the "License"); you may not use this file except in compliance with
@@ -21,96 +21,96 @@ using Spatial4n.Core.Exceptions;
 
 namespace Lucene.Net.Spatial.Queries
 {
-	public class SpatialOperation
-	{
-		// Private registry
-		private static readonly Dictionary<String, SpatialOperation> registry = new Dictionary<string, SpatialOperation>();
-		private static readonly IList<SpatialOperation> list = new List<SpatialOperation>();
+    public class SpatialOperation
+    {
+        // Private registry
+        private static readonly Dictionary<String, SpatialOperation> registry = new Dictionary<string, SpatialOperation>();
+        private static readonly IList<SpatialOperation> list = new List<SpatialOperation>();
 
-		// Geometry Operations
+        // Geometry Operations
 
         /// <summary>
         /// Bounding box of the *indexed* shape.
         /// </summary>
-		public static readonly SpatialOperation BBoxIntersects = new SpatialOperation("BBoxIntersects", true, false, false);
+        public static readonly SpatialOperation BBoxIntersects = new SpatialOperation("BBoxIntersects", true, false, false);
         
         /// <summary>
         /// Bounding box of the *indexed* shape.
         /// </summary>
-		public static readonly SpatialOperation BBoxWithin = new SpatialOperation("BBoxWithin", true, false, false);
-
-		public static readonly SpatialOperation Contains = new SpatialOperation("Contains", true, true, false);
-		public static readonly SpatialOperation Intersects = new SpatialOperation("Intersects", true, false, false);
-		public static readonly SpatialOperation IsEqualTo = new SpatialOperation("IsEqualTo", false, false, false);
-		public static readonly SpatialOperation IsDisjointTo = new SpatialOperation("IsDisjointTo", false, false, false);
-		public static readonly SpatialOperation IsWithin = new SpatialOperation("IsWithin", true, false, true);
-		public static readonly SpatialOperation Overlaps = new SpatialOperation("Overlaps", true, false, true);
-
-		// Member variables
-		private readonly bool scoreIsMeaningful;
-		private readonly bool sourceNeedsArea;
-		private readonly bool targetNeedsArea;
-		private readonly String name;
-
-		protected SpatialOperation(String name, bool scoreIsMeaningful, bool sourceNeedsArea, bool targetNeedsArea)
-		{
-			this.name = name;
-			this.scoreIsMeaningful = scoreIsMeaningful;
-			this.sourceNeedsArea = sourceNeedsArea;
-			this.targetNeedsArea = targetNeedsArea;
-			registry[name] = this;
-			registry[name.ToUpper(CultureInfo.CreateSpecificCulture("en-US"))] = this;
-			list.Add(this);
-		}
-
-		public static SpatialOperation Get(String v)
-		{
-			SpatialOperation op;
-			if (!registry.TryGetValue(v, out op) || op == null)
-			{
-				if (!registry.TryGetValue(v.ToUpper(CultureInfo.CreateSpecificCulture("en-US")), out op) || op == null)
-					throw new ArgumentException("Unknown Operation: " + v, "v");
-			}
-			return op;
-		}
-
-		public static IList<SpatialOperation> Values()
-		{
-			return list;
-		}
-
-		public static bool Is(SpatialOperation op, params SpatialOperation[] tst)
-		{
-			return tst.Any(t => op == t);
-		}
-
-
-		// ================================================= Getters / Setters =============================================
-
-		public bool IsScoreIsMeaningful()
-		{
-			return scoreIsMeaningful;
-		}
-
-		public bool IsSourceNeedsArea()
-		{
-			return sourceNeedsArea;
-		}
-
-		public bool IsTargetNeedsArea()
-		{
-			return targetNeedsArea;
-		}
-
-		public String GetName()
-		{
-			return name;
-		}
-
-		public override String ToString()
-		{
-			return name;
-		}
-
-	}
+        public static readonly SpatialOperation BBoxWithin = new SpatialOperation("BBoxWithin", true, false, false);
+
+        public static readonly SpatialOperation Contains = new SpatialOperation("Contains", true, true, false);
+        public static readonly SpatialOperation Intersects = new SpatialOperation("Intersects", true, false, false);
+        public static readonly SpatialOperation IsEqualTo = new SpatialOperation("IsEqualTo", false, false, false);
+        public static readonly SpatialOperation IsDisjointTo = new SpatialOperation("IsDisjointTo", false, false, false);
+        public static readonly SpatialOperation IsWithin = new SpatialOperation("IsWithin", true, false, true);
+        public static readonly SpatialOperation Overlaps = new SpatialOperation("Overlaps", true, false, true);
+
+        // Member variables
+        private readonly bool scoreIsMeaningful;
+        private readonly bool sourceNeedsArea;
+        private readonly bool targetNeedsArea;
+        private readonly String name;
+
+        protected SpatialOperation(String name, bool scoreIsMeaningful, bool sourceNeedsArea, bool targetNeedsArea)
+        {
+            this.name = name;
+            this.scoreIsMeaningful = scoreIsMeaningful;
+            this.sourceNeedsArea = sourceNeedsArea;
+            this.targetNeedsArea = targetNeedsArea;
+            registry[name] = this;
+            registry[name.ToUpper(CultureInfo.CreateSpecificCulture("en-US"))] = this;
+            list.Add(this);
+        }
+
+        public static SpatialOperation Get(String v)
+        {
+            SpatialOperation op;
+            if (!registry.TryGetValue(v, out op) || op == null)
+            {
+                if (!registry.TryGetValue(v.ToUpper(CultureInfo.CreateSpecificCulture("en-US")), out op) || op == null)
+                    throw new ArgumentException("Unknown Operation: " + v, "v");
+            }
+            return op;
+        }
+
+        public static IList<SpatialOperation> Values()
+        {
+            return list;
+        }
+
+        public static bool Is(SpatialOperation op, params SpatialOperation[] tst)
+        {
+            return tst.Any(t => op == t);
+        }
+
+
+        // ================================================= Getters / Setters =============================================
+
+        public bool IsScoreIsMeaningful()
+        {
+            return scoreIsMeaningful;
+        }
+
+        public bool IsSourceNeedsArea()
+        {
+            return sourceNeedsArea;
+        }
+
+        public bool IsTargetNeedsArea()
+        {
+            return targetNeedsArea;
+        }
+
+        public String GetName()
+        {
+            return name;
+        }
+
+        public override String ToString()
+        {
+            return name;
+        }
+
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Queries/UnsupportedSpatialOperation.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Queries/UnsupportedSpatialOperation.cs b/src/contrib/Spatial/Queries/UnsupportedSpatialOperation.cs
index b542f3a..ad93734 100644
--- a/src/contrib/Spatial/Queries/UnsupportedSpatialOperation.cs
+++ b/src/contrib/Spatial/Queries/UnsupportedSpatialOperation.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -19,11 +19,11 @@ using System;
 
 namespace Lucene.Net.Spatial.Queries
 {
-	[Serializable]
-	public class UnsupportedSpatialOperation : InvalidOperationException
-	{
-		public UnsupportedSpatialOperation(SpatialOperation op) : base(op.GetName())
-		{
-		}
-	}
+    [Serializable]
+    public class UnsupportedSpatialOperation : InvalidOperationException
+    {
+        public UnsupportedSpatialOperation(SpatialOperation op) : base(op.GetName())
+        {
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/SpatialStrategy.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/SpatialStrategy.cs b/src/contrib/Spatial/SpatialStrategy.cs
index c4bcc10..f47deca 100644
--- a/src/contrib/Spatial/SpatialStrategy.cs
+++ b/src/contrib/Spatial/SpatialStrategy.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -26,101 +26,101 @@ using Spatial4n.Core.Shapes;
 
 namespace Lucene.Net.Spatial
 {
-	/// <summary>
-	/// The SpatialStrategy encapsulates an approach to indexing and searching based on shapes.
-	/// <p/>
-	/// Note that a SpatialStrategy is not involved with the Lucene stored field values of shapes, which is
-	/// immaterial to indexing and search.
-	/// <p/>
-	/// Thread-safe.
-	/// </summary>
-	public abstract class SpatialStrategy
-	{
-		protected readonly SpatialContext ctx;
-		protected readonly string fieldName;
+    /// <summary>
+    /// The SpatialStrategy encapsulates an approach to indexing and searching based on shapes.
+    /// <p/>
+    /// Note that a SpatialStrategy is not involved with the Lucene stored field values of shapes, which is
+    /// immaterial to indexing and search.
+    /// <p/>
+    /// Thread-safe.
+    /// </summary>
+    public abstract class SpatialStrategy
+    {
+        protected readonly SpatialContext ctx;
+        protected readonly string fieldName;
 
-	    /// <summary>
-	    /// Constructs the spatial strategy with its mandatory arguments.
-	    /// </summary>
-	    /// <param name="ctx"></param>
-	    /// <param name="fieldName"> </param>
-	    protected SpatialStrategy(SpatialContext ctx, string fieldName)
-		{
-			if (ctx == null)
-				throw new ArgumentException("ctx is required", "ctx");
-			this.ctx = ctx;
-			if (string.IsNullOrEmpty(fieldName))
-				throw new ArgumentException("fieldName is required", "fieldName");
-			this.fieldName = fieldName;
-		}
+        /// <summary>
+        /// Constructs the spatial strategy with its mandatory arguments.
+        /// </summary>
+        /// <param name="ctx"></param>
+        /// <param name="fieldName"> </param>
+        protected SpatialStrategy(SpatialContext ctx, string fieldName)
+        {
+            if (ctx == null)
+                throw new ArgumentException("ctx is required", "ctx");
+            this.ctx = ctx;
+            if (string.IsNullOrEmpty(fieldName))
+                throw new ArgumentException("fieldName is required", "fieldName");
+            this.fieldName = fieldName;
+        }
 
-		public SpatialContext GetSpatialContext()
-		{
-			return ctx;
-		}
+        public SpatialContext GetSpatialContext()
+        {
+            return ctx;
+        }
 
-		/// <summary>
-		/// The name of the field or the prefix of them if there are multiple
-		/// fields needed internally.
-		/// </summary>
-		/// <returns></returns>
-		public String GetFieldName()
-		{
-			return fieldName;
-		}
+        /// <summary>
+        /// The name of the field or the prefix of them if there are multiple
+        /// fields needed internally.
+        /// </summary>
+        /// <returns></returns>
+        public String GetFieldName()
+        {
+            return fieldName;
+        }
 
-		/// <summary>
-		/// Returns the IndexableField(s) from the <c>shape</c> that are to be
-		/// added to the {@link org.apache.lucene.document.Document}.  These fields
-		/// are expected to be marked as indexed and not stored.
-		/// <p/>
-		/// Note: If you want to <i>store</i> the shape as a string for retrieval in search
-		/// results, you could add it like this:
-		/// <pre>document.add(new StoredField(fieldName,ctx.toString(shape)));</pre>
-		/// The particular string representation used doesn't matter to the Strategy since it
-		/// doesn't use it.
-		/// </summary>
-		/// <param name="shape"></param>
-		/// <returns>Not null nor will it have null elements.</returns>
-		public abstract AbstractField[] CreateIndexableFields(Shape shape);
+        /// <summary>
+        /// Returns the IndexableField(s) from the <c>shape</c> that are to be
+        /// added to the {@link org.apache.lucene.document.Document}.  These fields
+        /// are expected to be marked as indexed and not stored.
+        /// <p/>
+        /// Note: If you want to <i>store</i> the shape as a string for retrieval in search
+        /// results, you could add it like this:
+        /// <pre>document.add(new StoredField(fieldName,ctx.toString(shape)));</pre>
+        /// The particular string representation used doesn't matter to the Strategy since it
+        /// doesn't use it.
+        /// </summary>
+        /// <param name="shape"></param>
+        /// <returns>Not null nor will it have null elements.</returns>
+        public abstract AbstractField[] CreateIndexableFields(Shape shape);
 
-		public AbstractField CreateStoredField(Shape shape)
-		{
-			return new Field(GetFieldName(), ctx.ToString(shape), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO);
-		}
+        public AbstractField CreateStoredField(Shape shape)
+        {
+            return new Field(GetFieldName(), ctx.ToString(shape), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO);
+        }
 
-		/// <summary>
-		/// Make a ValueSource returning the distance between the center of the
-		/// indexed shape and {@code queryPoint}.  If there are multiple indexed shapes
-		/// then the closest one is chosen.
-		/// </summary>
-		public abstract ValueSource MakeDistanceValueSource(Point queryPoint);
+        /// <summary>
+        /// Make a ValueSource returning the distance between the center of the
+        /// indexed shape and {@code queryPoint}.  If there are multiple indexed shapes
+        /// then the closest one is chosen.
+        /// </summary>
+        public abstract ValueSource MakeDistanceValueSource(Point queryPoint);
 
-	    /// <summary>
-	    /// Make a (ConstantScore) Query based principally on {@link org.apache.lucene.spatial.query.SpatialOperation}
-	    /// and {@link Shape} from the supplied {@code args}.
-	    /// The default implementation is
-	    /// <pre>return new ConstantScoreQuery(makeFilter(args));</pre>
-	    /// </summary>
-	    /// <param name="args"></param>
-	    /// <returns></returns>
-	    public virtual ConstantScoreQuery MakeQuery(SpatialArgs args)
-		{
+        /// <summary>
+        /// Make a (ConstantScore) Query based principally on {@link org.apache.lucene.spatial.query.SpatialOperation}
+        /// and {@link Shape} from the supplied {@code args}.
+        /// The default implementation is
+        /// <pre>return new ConstantScoreQuery(makeFilter(args));</pre>
+        /// </summary>
+        /// <param name="args"></param>
+        /// <returns></returns>
+        public virtual ConstantScoreQuery MakeQuery(SpatialArgs args)
+        {
             return new ConstantScoreQuery(MakeFilter(args));
-		}
+        }
 
-		/// <summary>
-		/// Make a Filter based principally on {@link org.apache.lucene.spatial.query.SpatialOperation}
-		/// and {@link Shape} from the supplied {@code args}.
-		/// <p />
-		/// If a subclasses implements
-		/// {@link #makeQuery(org.apache.lucene.spatial.query.SpatialArgs)}
-		/// then this method could be simply:
-		/// <pre>return new QueryWrapperFilter(makeQuery(args).getQuery());</pre>
-		/// </summary>
-		/// <param name="args"></param>
-		/// <returns></returns>
-		public abstract Filter MakeFilter(SpatialArgs args);
+        /// <summary>
+        /// Make a Filter based principally on {@link org.apache.lucene.spatial.query.SpatialOperation}
+        /// and {@link Shape} from the supplied {@code args}.
+        /// <p />
+        /// If a subclasses implements
+        /// {@link #makeQuery(org.apache.lucene.spatial.query.SpatialArgs)}
+        /// then this method could be simply:
+        /// <pre>return new QueryWrapperFilter(makeQuery(args).getQuery());</pre>
+        /// </summary>
+        /// <param name="args"></param>
+        /// <returns></returns>
+        public abstract Filter MakeFilter(SpatialArgs args);
 
         /// <summary>
         /// Returns a ValueSource with values ranging from 1 to 0, depending inversely
@@ -142,9 +142,9 @@ namespace Lucene.Net.Spatial
             return new ReciprocalFloatFunction(MakeDistanceValueSource(queryShape.GetCenter()), 1f, c, c);
         }
 
-	    public override string ToString()
-		{
-			return GetType().Name + " field:" + fieldName + " ctx=" + ctx;
-		}
-	}
+        public override string ToString()
+        {
+            return GetType().Name + " field:" + fieldName + " ctx=" + ctx;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/Bits.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/Bits.cs b/src/contrib/Spatial/Util/Bits.cs
index d6c7bfa..f0040ee 100644
--- a/src/contrib/Spatial/Util/Bits.cs
+++ b/src/contrib/Spatial/Util/Bits.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -17,76 +17,76 @@
 
 namespace Lucene.Net.Spatial.Util
 {
-	/// <summary>
-	/// Interface for Bitset-like structures.
-	/// </summary>
-	public interface IBits
-	{
-		bool Get(int index);
-		int Length();
-	}
+    /// <summary>
+    /// Interface for Bitset-like structures.
+    /// </summary>
+    public interface IBits
+    {
+        bool Get(int index);
+        int Length();
+    }
 
-	/// <summary>
-	/// Empty implementation, basically just so we can provide EMPTY_ARRAY
-	/// </summary>
-	public abstract class Bits : IBits
-	{
-		public static readonly Bits[] EMPTY_ARRAY = new Bits[0];
+    /// <summary>
+    /// Empty implementation, basically just so we can provide EMPTY_ARRAY
+    /// </summary>
+    public abstract class Bits : IBits
+    {
+        public static readonly Bits[] EMPTY_ARRAY = new Bits[0];
 
-		public virtual bool Get(int index)
-		{
-			throw new System.NotImplementedException();
-		}
+        public virtual bool Get(int index)
+        {
+            throw new System.NotImplementedException();
+        }
 
-		public virtual int Length()
-		{
-			throw new System.NotImplementedException();
-		}
-	}
+        public virtual int Length()
+        {
+            throw new System.NotImplementedException();
+        }
+    }
 
-	/// <summary>
-	/// Bits impl of the specified length with all bits set.
-	/// </summary>
-	public class MatchAllBits : Bits
-	{
-		private readonly int len;
+    /// <summary>
+    /// Bits impl of the specified length with all bits set.
+    /// </summary>
+    public class MatchAllBits : Bits
+    {
+        private readonly int len;
 
-		public MatchAllBits(int len)
-		{
-			this.len = len;
-		}
+        public MatchAllBits(int len)
+        {
+            this.len = len;
+        }
 
-		public override bool Get(int index)
-		{
-			return true;
-		}
+        public override bool Get(int index)
+        {
+            return true;
+        }
 
-		public override int Length()
-		{
-			return len;
-		}
-	}
+        public override int Length()
+        {
+            return len;
+        }
+    }
 
-	/// <summary>
-	/// Bits impl of the specified length with no bits set. 
-	/// </summary>
-	public class MatchNoBits : Bits
-	{
-		private readonly int len;
+    /// <summary>
+    /// Bits impl of the specified length with no bits set. 
+    /// </summary>
+    public class MatchNoBits : Bits
+    {
+        private readonly int len;
 
-		public MatchNoBits(int len)
-		{
-			this.len = len;
-		}
+        public MatchNoBits(int len)
+        {
+            this.len = len;
+        }
 
-		public override bool Get(int index)
-		{
-			return false;
-		}
+        public override bool Get(int index)
+        {
+            return false;
+        }
 
-		public override int Length()
-		{
-			return len;
-		}
-	}
+        public override int Length()
+        {
+            return len;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/CachingDoubleValueSource.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/CachingDoubleValueSource.cs b/src/contrib/Spatial/Util/CachingDoubleValueSource.cs
index 0a52b66..ef7a174 100644
--- a/src/contrib/Spatial/Util/CachingDoubleValueSource.cs
+++ b/src/contrib/Spatial/Util/CachingDoubleValueSource.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -21,81 +21,81 @@ using Lucene.Net.Search.Function;
 
 namespace Lucene.Net.Spatial.Util
 {
-	public class CachingDoubleValueSource : ValueSource
-	{
-		protected readonly ValueSource source;
-		protected readonly Dictionary<int, double> cache;
-
-		public CachingDoubleValueSource(ValueSource source)
-		{
-			this.source = source;
-			cache = new Dictionary<int, double>();
-		}
-
-		public class CachingDoubleDocValue : DocValues
-		{
-			private readonly int docBase;
-			private readonly DocValues values;
-			private readonly Dictionary<int, double> cache;
-
-			public CachingDoubleDocValue(int docBase, DocValues vals, Dictionary<int, double> cache)
-			{
-				this.docBase = docBase;
-				this.values = vals;
-				this.cache = cache;
-			}
-
-			public override double DoubleVal(int doc)
-			{
-				var key = docBase + doc;
-				double v;
-				if (!cache.TryGetValue(key, out v))
-				{
-					v = values.DoubleVal(doc);
-					cache[key] = v;
-				}
-				return v;
-			}
-
-			public override float FloatVal(int doc)
-			{
-				return (float)DoubleVal(doc);
-			}
-
-			public override string ToString(int doc)
-			{
-				return DoubleVal(doc) + string.Empty;
-			}
-		}
-
-		public override DocValues GetValues(IndexReader reader)
-		{
-			var @base = 0; //reader.DocBase;
-			var vals = source.GetValues(reader);
-			return new CachingDoubleDocValue(@base, vals, cache);
-
-		}
-
-		public override string Description()
-		{
-			return "Cached[" + source.Description() + "]";
-		}
-
-		public override bool Equals(object o)
-		{
-			if (this == o) return true;
-
-			var that = o as CachingDoubleValueSource;
-
-			if (that == null) return false;
-			if (source != null ? !source.Equals(that.source) : that.source != null) return false;
-
-			return true;
-		}
-
-		public override int GetHashCode()
-		{
-			return source != null ? source.GetHashCode() : 0;
-		}
-	}
+    public class CachingDoubleValueSource : ValueSource
+    {
+        protected readonly ValueSource source;
+        protected readonly Dictionary<int, double> cache;
+
+        public CachingDoubleValueSource(ValueSource source)
+        {
+            this.source = source;
+            cache = new Dictionary<int, double>();
+        }
+
+        public class CachingDoubleDocValue : DocValues
+        {
+            private readonly int docBase;
+            private readonly DocValues values;
+            private readonly Dictionary<int, double> cache;
+
+            public CachingDoubleDocValue(int docBase, DocValues vals, Dictionary<int, double> cache)
+            {
+                this.docBase = docBase;
+                this.values = vals;
+                this.cache = cache;
+            }
+
+            public override double DoubleVal(int doc)
+            {
+                var key = docBase + doc;
+                double v;
+                if (!cache.TryGetValue(key, out v))
+                {
+                    v = values.DoubleVal(doc);
+                    cache[key] = v;
+                }
+                return v;
+            }
+
+            public override float FloatVal(int doc)
+            {
+                return (float)DoubleVal(doc);
+            }
+
+            public override string ToString(int doc)
+            {
+                return DoubleVal(doc) + string.Empty;
+            }
+        }
+
+        public override DocValues GetValues(IndexReader reader)
+        {
+            var @base = 0; //reader.DocBase;
+            var vals = source.GetValues(reader);
+            return new CachingDoubleDocValue(@base, vals, cache);
+
+        }
+
+        public override string Description()
+        {
+            return "Cached[" + source.Description() + "]";
+        }
+
+        public override bool Equals(object o)
+        {
+            if (this == o) return true;
+
+            var that = o as CachingDoubleValueSource;
+
+            if (that == null) return false;
+            if (source != null ? !source.Equals(that.source) : that.source != null) return false;
+
+            return true;
+        }
+
+        public override int GetHashCode()
+        {
+            return source != null ? source.GetHashCode() : 0;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Spatial/Util/CompatibilityExtensions.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Spatial/Util/CompatibilityExtensions.cs b/src/contrib/Spatial/Util/CompatibilityExtensions.cs
index 1a1bebd..e802662 100644
--- a/src/contrib/Spatial/Util/CompatibilityExtensions.cs
+++ b/src/contrib/Spatial/Util/CompatibilityExtensions.cs
@@ -1,4 +1,4 @@
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -28,24 +28,24 @@ using Lucene.Net.Search;
 
 namespace Lucene.Net.Spatial.Util
 {
-	public static class CompatibilityExtensions
-	{
-		public static void Append(this ITermAttribute termAtt, string str)
-		{
-			termAtt.SetTermBuffer(termAtt.Term + str); // TODO: Not optimal, but works
-		}
+    public static class CompatibilityExtensions
+    {
+        public static void Append(this ITermAttribute termAtt, string str)
+        {
+            termAtt.SetTermBuffer(termAtt.Term + str); // TODO: Not optimal, but works
+        }
 
-		public static void Append(this ITermAttribute termAtt, char ch)
-		{
-			termAtt.SetTermBuffer(termAtt.Term + new string(new[] { ch })); // TODO: Not optimal, but works
-		}
+        public static void Append(this ITermAttribute termAtt, char ch)
+        {
+            termAtt.SetTermBuffer(termAtt.Term + new string(new[] { ch })); // TODO: Not optimal, but works
+        }
 
-		private static readonly ConcurrentDictionary<string, IBits> _docsWithFieldCache = new ConcurrentDictionary<string, IBits>();
+        private static readonly ConcurrentDictionary<string, IBits> _docsWithFieldCache = new ConcurrentDictionary<string, IBits>();
 
-		internal static IBits GetDocsWithField(this FieldCache fc, IndexReader reader, String field)
-		{
-			return _docsWithFieldCache.GetOrAdd(field, f => DocsWithFieldCacheEntry_CreateValue(reader, new Entry(field, null), false));
-		}
+        internal static IBits GetDocsWithField(this FieldCache fc, IndexReader reader, String field)
+        {
+            return _docsWithFieldCache.GetOrAdd(field, f => DocsWithFieldCacheEntry_CreateValue(reader, new Entry(field, null), false));
+        }
 
         /// <summary> <p/>
         /// EXPERT: Instructs the FieldCache to forcibly expunge all entries 
@@ -67,149 +67,149 @@ namespace Lucene.Net.Spatial.Util
             _docsWithFieldCache.Clear();
         }
 
-		private static IBits DocsWithFieldCacheEntry_CreateValue(IndexReader reader, Entry entryKey, bool setDocsWithField /* ignored */)
-		{
-			var field = entryKey.field;
-			FixedBitSet res = null;
-			var terms = new TermsEnumCompatibility(reader, field);
-			var maxDoc = reader.MaxDoc;
-
-			var term = terms.Next();
-			if (term != null)
-			{
-				int termsDocCount = terms.GetDocCount();
-				Debug.Assert(termsDocCount <= maxDoc);
-				if (termsDocCount == maxDoc)
-				{
-					// Fast case: all docs have this field:
-					return new MatchAllBits(maxDoc);
-				}
-
-				while (true)
-				{
-					if (res == null)
-					{
-						// lazy init
-						res = new FixedBitSet(maxDoc);
-					}
-
-					var termDocs = reader.TermDocs(term);
-					while (termDocs.Next())
-					{
-						res.Set(termDocs.Doc);
-					}
-		
-					term = terms.Next();
-					if (term == null)
-					{
-						break;
-					}
-				}
-			}
-			if (res == null)
-			{
-				return new MatchNoBits(maxDoc);
-			}
-			int numSet = res.Cardinality();
-			if (numSet >= maxDoc)
-			{
-				// The cardinality of the BitSet is maxDoc if all documents have a value.
-				Debug.Assert(numSet == maxDoc);
-				return new MatchAllBits(maxDoc);
-			}
-			return res;
-		}
-
-		/* table of number of leading zeros in a byte */
-		public static readonly byte[] nlzTable = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-		/// <summary>
-		/// Returns the number of leading zero bits.
-		/// </summary>
-		/// <param name="x"></param>
-		/// <returns></returns>
-		public static int BitUtilNlz(long x)
-		{
-			int n = 0;
-			// do the first step as a long
-			var y = (int)((ulong)x >> 32);
-			if (y == 0) { n += 32; y = (int)(x); }
-			if ((y & 0xFFFF0000) == 0) { n += 16; y <<= 16; }
-			if ((y & 0xFF000000) == 0) { n += 8; y <<= 8; }
-			return n + nlzTable[(uint)y >> 24];
-			/* implementation without table:
-			  if ((y & 0xF0000000) == 0) { n+=4; y<<=4; }
-			  if ((y & 0xC0000000) == 0) { n+=2; y<<=2; }
-			  if ((y & 0x80000000) == 0) { n+=1; y<<=1; }
-			  if ((y & 0x80000000) == 0) { n+=1;}
-			  return n;
-			 */
-		}
-	}
-
-	public static class Arrays
-	{
-		public static void Fill<T>(T[] array, int fromIndex, int toIndex, T value)
-		{
-			if (array == null)
-			{
-				throw new ArgumentNullException("array");
-			}
-			if (fromIndex < 0 || fromIndex > toIndex)
-			{
-				throw new ArgumentOutOfRangeException("fromIndex");
-			}
-			if (toIndex > array.Length)
-			{
-				throw new ArgumentOutOfRangeException("toIndex");
-			}
-			for (var i = fromIndex; i < toIndex; i++)
-			{
-				array[i] = value;
-			}
-		}
-	}
-
-	/// <summary>
-	/// Expert: Every composite-key in the internal cache is of this type.
-	/// </summary>
-	internal class Entry
-	{
-		internal readonly String field;        // which Fieldable
-		internal readonly Object custom;       // which custom comparator or parser
-
-		/* Creates one of these objects for a custom comparator/parser. */
-		public Entry(String field, Object custom)
-		{
-			this.field = field;
-			this.custom = custom;
-		}
-
-		/* Two of these are equal iff they reference the same field and type. */
-		public override bool Equals(Object o)
-		{
-			var other = o as Entry;
-			if (other != null)
-			{
-				if (other.field.Equals(field))
-				{
-					if (other.custom == null)
-					{
-						if (custom == null) return true;
-					}
-					else if (other.custom.Equals(custom))
-					{
-						return true;
-					}
-				}
-			}
-			return false;
-		}
-
-		/* Composes a hashcode based on the field and type. */
-		public override int GetHashCode()
-		{
-			return field.GetHashCode() ^ (custom == null ? 0 : custom.GetHashCode());
-		}
-	}
+        private static IBits DocsWithFieldCacheEntry_CreateValue(IndexReader reader, Entry entryKey, bool setDocsWithField /* ignored */)
+        {
+            var field = entryKey.field;
+            FixedBitSet res = null;
+            var terms = new TermsEnumCompatibility(reader, field);
+            var maxDoc = reader.MaxDoc;
+
+            var term = terms.Next();
+            if (term != null)
+            {
+                int termsDocCount = terms.GetDocCount();
+                Debug.Assert(termsDocCount <= maxDoc);
+                if (termsDocCount == maxDoc)
+                {
+                    // Fast case: all docs have this field:
+                    return new MatchAllBits(maxDoc);
+                }
+
+                while (true)
+                {
+                    if (res == null)
+                    {
+                        // lazy init
+                        res = new FixedBitSet(maxDoc);
+                    }
+
+                    var termDocs = reader.TermDocs(term);
+                    while (termDocs.Next())
+                    {
+                        res.Set(termDocs.Doc);
+                    }
+        
+                    term = terms.Next();
+                    if (term == null)
+                    {
+                        break;
+                    }
+                }
+            }
+            if (res == null)
+            {
+                return new MatchNoBits(maxDoc);
+            }
+            int numSet = res.Cardinality();
+            if (numSet >= maxDoc)
+            {
+                // The cardinality of the BitSet is maxDoc if all documents have a value.
+                Debug.Assert(numSet == maxDoc);
+                return new MatchAllBits(maxDoc);
+            }
+            return res;
+        }
+
+        /* table of number of leading zeros in a byte */
+        public static readonly byte[] nlzTable = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+        /// <summary>
+        /// Returns the number of leading zero bits.
+        /// </summary>
+        /// <param name="x"></param>
+        /// <returns></returns>
+        public static int BitUtilNlz(long x)
+        {
+            int n = 0;
+            // do the first step as a long
+            var y = (int)((ulong)x >> 32);
+            if (y == 0) { n += 32; y = (int)(x); }
+            if ((y & 0xFFFF0000) == 0) { n += 16; y <<= 16; }
+            if ((y & 0xFF000000) == 0) { n += 8; y <<= 8; }
+            return n + nlzTable[(uint)y >> 24];
+            /* implementation without table:
+              if ((y & 0xF0000000) == 0) { n+=4; y<<=4; }
+              if ((y & 0xC0000000) == 0) { n+=2; y<<=2; }
+              if ((y & 0x80000000) == 0) { n+=1; y<<=1; }
+              if ((y & 0x80000000) == 0) { n+=1;}
+              return n;
+             */
+        }
+    }
+
+    public static class Arrays
+    {
+        public static void Fill<T>(T[] array, int fromIndex, int toIndex, T value)
+        {
+            if (array == null)
+            {
+                throw new ArgumentNullException("array");
+            }
+            if (fromIndex < 0 || fromIndex > toIndex)
+            {
+                throw new ArgumentOutOfRangeException("fromIndex");
+            }
+            if (toIndex > array.Length)
+            {
+                throw new ArgumentOutOfRangeException("toIndex");
+            }
+            for (var i = fromIndex; i < toIndex; i++)
+            {
+                array[i] = value;
+            }
+        }
+    }
+
+    /// <summary>
+    /// Expert: Every composite-key in the internal cache is of this type.
+    /// </summary>
+    internal class Entry
+    {
+        internal readonly String field;        // which Fieldable
+        internal readonly Object custom;       // which custom comparator or parser
+
+        /* Creates one of these objects for a custom comparator/parser. */
+        public Entry(String field, Object custom)
+        {
+            this.field = field;
+            this.custom = custom;
+        }
+
+        /* Two of these are equal iff they reference the same field and type. */
+        public override bool Equals(Object o)
+        {
+            var other = o as Entry;
+            if (other != null)
+            {
+                if (other.field.Equals(field))
+                {
+                    if (other.custom == null)
+                    {
+                        if (custom == null) return true;
+                    }
+                    else if (other.custom.Equals(custom))
+                    {
+                        return true;
+                    }
+                }
+            }
+            return false;
+        }
+
+        /* Composes a hashcode based on the field and type. */
+        public override int GetHashCode()
+        {
+            return field.GetHashCode() ^ (custom == null ? 0 : custom.GetHashCode());
+        }
+    }
 }


[18/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/CompoundFileReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompoundFileReader.cs b/src/core/Index/CompoundFileReader.cs
index 74f4fb4..3b7f4f3 100644
--- a/src/core/Index/CompoundFileReader.cs
+++ b/src/core/Index/CompoundFileReader.cs
@@ -25,101 +25,101 @@ using Lock = Lucene.Net.Store.Lock;
 
 namespace Lucene.Net.Index
 {
-	
-	
-	/// <summary> Class for accessing a compound stream.
-	/// This class implements a directory, but is limited to only read operations.
-	/// Directory methods that would normally modify data throw an exception.
-	/// </summary>
-	public class CompoundFileReader : Directory
-	{
-		
-		private readonly int readBufferSize;
-		
-		private sealed class FileEntry
-		{
-			internal long offset;
-			internal long length;
-		}
+    
+    
+    /// <summary> Class for accessing a compound stream.
+    /// This class implements a directory, but is limited to only read operations.
+    /// Directory methods that would normally modify data throw an exception.
+    /// </summary>
+    public class CompoundFileReader : Directory
+    {
+        
+        private readonly int readBufferSize;
+        
+        private sealed class FileEntry
+        {
+            internal long offset;
+            internal long length;
+        }
 
-	    private bool isDisposed;
-		
-		// Base info
-		private readonly Directory directory;
-		private readonly System.String fileName;
-		
-		private IndexInput stream;
-		private HashMap<string, FileEntry> entries = new HashMap<string, FileEntry>();
-		
-		
-		public CompoundFileReader(Directory dir, System.String name):this(dir, name, BufferedIndexInput.BUFFER_SIZE)
-		{
-		}
-		
-		public CompoundFileReader(Directory dir, System.String name, int readBufferSize)
-		{
-			directory = dir;
-			fileName = name;
-			this.readBufferSize = readBufferSize;
-			
-			bool success = false;
-			
-			try
-			{
-				stream = dir.OpenInput(name, readBufferSize);
-				
-				// read the directory and init files
-				int count = stream.ReadVInt();
-				FileEntry entry = null;
-				for (int i = 0; i < count; i++)
-				{
-					long offset = stream.ReadLong();
-					System.String id = stream.ReadString();
-					
-					if (entry != null)
-					{
-						// set length of the previous entry
-						entry.length = offset - entry.offset;
-					}
+        private bool isDisposed;
+        
+        // Base info
+        private readonly Directory directory;
+        private readonly System.String fileName;
+        
+        private IndexInput stream;
+        private HashMap<string, FileEntry> entries = new HashMap<string, FileEntry>();
+        
+        
+        public CompoundFileReader(Directory dir, System.String name):this(dir, name, BufferedIndexInput.BUFFER_SIZE)
+        {
+        }
+        
+        public CompoundFileReader(Directory dir, System.String name, int readBufferSize)
+        {
+            directory = dir;
+            fileName = name;
+            this.readBufferSize = readBufferSize;
+            
+            bool success = false;
+            
+            try
+            {
+                stream = dir.OpenInput(name, readBufferSize);
+                
+                // read the directory and init files
+                int count = stream.ReadVInt();
+                FileEntry entry = null;
+                for (int i = 0; i < count; i++)
+                {
+                    long offset = stream.ReadLong();
+                    System.String id = stream.ReadString();
+                    
+                    if (entry != null)
+                    {
+                        // set length of the previous entry
+                        entry.length = offset - entry.offset;
+                    }
 
-					entry = new FileEntry {offset = offset};
-					entries[id] = entry;
-				}
-				
-				// set the length of the final entry
-				if (entry != null)
-				{
-					entry.length = stream.Length() - entry.offset;
-				}
-				
-				success = true;
-			}
-			finally
-			{
-				if (!success && (stream != null))
-				{
-					try
-					{
-						stream.Close();
-					}
-					catch (System.IO.IOException)
-					{
-					}
-				}
-			}
-		}
+                    entry = new FileEntry {offset = offset};
+                    entries[id] = entry;
+                }
+                
+                // set the length of the final entry
+                if (entry != null)
+                {
+                    entry.length = stream.Length() - entry.offset;
+                }
+                
+                success = true;
+            }
+            finally
+            {
+                if (!success && (stream != null))
+                {
+                    try
+                    {
+                        stream.Close();
+                    }
+                    catch (System.IO.IOException)
+                    {
+                    }
+                }
+            }
+        }
 
-	    public virtual Directory Directory
-	    {
-	        get { return directory; }
-	    }
+        public virtual Directory Directory
+        {
+            get { return directory; }
+        }
 
-	    public virtual string Name
-	    {
-	        get { return fileName; }
-	    }
+        public virtual string Name
+        {
+            get { return fileName; }
+        }
 
-	    protected override void Dispose(bool disposing)
+        protected override void Dispose(bool disposing)
         {
             lock (this)
             {
@@ -141,152 +141,152 @@ namespace Lucene.Net.Index
                 isDisposed = true;
             }
         }
-		
-		public override IndexInput OpenInput(System.String id)
-		{
-			lock (this)
-			{
-				// Default to readBufferSize passed in when we were opened
-				return OpenInput(id, readBufferSize);
-			}
-		}
-		
-		public override IndexInput OpenInput(System.String id, int readBufferSize)
-		{
-			lock (this)
-			{
-				if (stream == null)
-					throw new System.IO.IOException("Stream closed");
-				
-				FileEntry entry = entries[id];
-				if (entry == null)
-					throw new System.IO.IOException("No sub-file with id " + id + " found");
-				
-				return new CSIndexInput(stream, entry.offset, entry.length, readBufferSize);
-			}
-		}
-		
-		/// <summary>Returns an array of strings, one for each file in the directory. </summary>
-		public override System.String[] ListAll()
-		{
-		    return entries.Keys.ToArray();
-		}
-		
-		/// <summary>Returns true iff a file with the given name exists. </summary>
-		public override bool FileExists(System.String name)
-		{
-			return entries.ContainsKey(name);
-		}
-		
-		/// <summary>Returns the time the compound file was last modified. </summary>
-		public override long FileModified(System.String name)
-		{
-			return directory.FileModified(fileName);
-		}
-		
-		/// <summary>Set the modified time of the compound file to now. </summary>
-		public override void  TouchFile(System.String name)
-		{
-			directory.TouchFile(fileName);
-		}
-		
-		/// <summary>Not implemented</summary>
-		/// <throws>  UnsupportedOperationException  </throws>
-		public override void  DeleteFile(System.String name)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary>Not implemented</summary>
-		/// <throws>  UnsupportedOperationException  </throws>
-		public void RenameFile(System.String from, System.String to)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary>Returns the length of a file in the directory.</summary>
-		/// <throws>  IOException if the file does not exist  </throws>
-		public override long FileLength(System.String name)
-		{
-			FileEntry e = entries[name];
-			if (e == null)
-				throw new System.IO.IOException("File " + name + " does not exist");
-			return e.length;
-		}
-		
-		/// <summary>Not implemented</summary>
-		/// <throws>  UnsupportedOperationException  </throws>
-		public override IndexOutput CreateOutput(System.String name)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary>Not implemented</summary>
-		/// <throws>  UnsupportedOperationException  </throws>
-		public override Lock MakeLock(System.String name)
-		{
-			throw new System.NotSupportedException();
-		}
-		
-		/// <summary>Implementation of an IndexInput that reads from a portion of the
-		/// compound file. The visibility is left as "package" *only* because
-		/// this helps with testing since JUnit test cases in a different class
-		/// can then access package fields of this class.
-		/// </summary>
-		public /*internal*/ sealed class CSIndexInput : BufferedIndexInput
-		{
-			internal IndexInput base_Renamed;
-			internal long fileOffset;
-			internal long length;
+        
+        public override IndexInput OpenInput(System.String id)
+        {
+            lock (this)
+            {
+                // Default to readBufferSize passed in when we were opened
+                return OpenInput(id, readBufferSize);
+            }
+        }
+        
+        public override IndexInput OpenInput(System.String id, int readBufferSize)
+        {
+            lock (this)
+            {
+                if (stream == null)
+                    throw new System.IO.IOException("Stream closed");
+                
+                FileEntry entry = entries[id];
+                if (entry == null)
+                    throw new System.IO.IOException("No sub-file with id " + id + " found");
+                
+                return new CSIndexInput(stream, entry.offset, entry.length, readBufferSize);
+            }
+        }
+        
+        /// <summary>Returns an array of strings, one for each file in the directory. </summary>
+        public override System.String[] ListAll()
+        {
+            return entries.Keys.ToArray();
+        }
+        
+        /// <summary>Returns true iff a file with the given name exists. </summary>
+        public override bool FileExists(System.String name)
+        {
+            return entries.ContainsKey(name);
+        }
+        
+        /// <summary>Returns the time the compound file was last modified. </summary>
+        public override long FileModified(System.String name)
+        {
+            return directory.FileModified(fileName);
+        }
+        
+        /// <summary>Set the modified time of the compound file to now. </summary>
+        public override void  TouchFile(System.String name)
+        {
+            directory.TouchFile(fileName);
+        }
+        
+        /// <summary>Not implemented</summary>
+        /// <throws>  UnsupportedOperationException  </throws>
+        public override void  DeleteFile(System.String name)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary>Not implemented</summary>
+        /// <throws>  UnsupportedOperationException  </throws>
+        public void RenameFile(System.String from, System.String to)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary>Returns the length of a file in the directory.</summary>
+        /// <throws>  IOException if the file does not exist  </throws>
+        public override long FileLength(System.String name)
+        {
+            FileEntry e = entries[name];
+            if (e == null)
+                throw new System.IO.IOException("File " + name + " does not exist");
+            return e.length;
+        }
+        
+        /// <summary>Not implemented</summary>
+        /// <throws>  UnsupportedOperationException  </throws>
+        public override IndexOutput CreateOutput(System.String name)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary>Not implemented</summary>
+        /// <throws>  UnsupportedOperationException  </throws>
+        public override Lock MakeLock(System.String name)
+        {
+            throw new System.NotSupportedException();
+        }
+        
+        /// <summary>Implementation of an IndexInput that reads from a portion of the
+        /// compound file. The visibility is left as "package" *only* because
+        /// this helps with testing since JUnit test cases in a different class
+        /// can then access package fields of this class.
+        /// </summary>
+        public /*internal*/ sealed class CSIndexInput : BufferedIndexInput
+        {
+            internal IndexInput base_Renamed;
+            internal long fileOffset;
+            internal long length;
 
-		    private bool isDisposed;
-			
-			internal CSIndexInput(IndexInput @base, long fileOffset, long length):this(@base, fileOffset, length, BufferedIndexInput.BUFFER_SIZE)
-			{
-			}
-			
-			internal CSIndexInput(IndexInput @base, long fileOffset, long length, int readBufferSize):base(readBufferSize)
-			{
-				this.base_Renamed = (IndexInput) @base.Clone();
-				this.fileOffset = fileOffset;
-				this.length = length;
-			}
-			
-			public override System.Object Clone()
-			{
-				var clone = (CSIndexInput) base.Clone();
-				clone.base_Renamed = (IndexInput) base_Renamed.Clone();
-				clone.fileOffset = fileOffset;
-				clone.length = length;
-				return clone;
-			}
-			
-			/// <summary>Expert: implements buffer refill.  Reads bytes from the current
-			/// position in the input.
-			/// </summary>
-			/// <param name="b">the array to read bytes into
-			/// </param>
-			/// <param name="offset">the offset in the array to start storing bytes
-			/// </param>
-			/// <param name="len">the number of bytes to read
-			/// </param>
-			public override void  ReadInternal(byte[] b, int offset, int len)
-			{
-				long start = FilePointer;
-				if (start + len > length)
-					throw new System.IO.IOException("read past EOF");
-				base_Renamed.Seek(fileOffset + start);
-				base_Renamed.ReadBytes(b, offset, len, false);
-			}
-			
-			/// <summary>Expert: implements seek.  Sets current position in this file, where
-			/// the next <see cref="ReadInternal(byte[],int,int)" /> will occur.
-			/// </summary>
-			/// <seealso cref="ReadInternal(byte[],int,int)">
-			/// </seealso>
-			public override void  SeekInternal(long pos)
-			{
-			}
+            private bool isDisposed;
+            
+            internal CSIndexInput(IndexInput @base, long fileOffset, long length):this(@base, fileOffset, length, BufferedIndexInput.BUFFER_SIZE)
+            {
+            }
+            
+            internal CSIndexInput(IndexInput @base, long fileOffset, long length, int readBufferSize):base(readBufferSize)
+            {
+                this.base_Renamed = (IndexInput) @base.Clone();
+                this.fileOffset = fileOffset;
+                this.length = length;
+            }
+            
+            public override System.Object Clone()
+            {
+                var clone = (CSIndexInput) base.Clone();
+                clone.base_Renamed = (IndexInput) base_Renamed.Clone();
+                clone.fileOffset = fileOffset;
+                clone.length = length;
+                return clone;
+            }
+            
+            /// <summary>Expert: implements buffer refill.  Reads bytes from the current
+            /// position in the input.
+            /// </summary>
+            /// <param name="b">the array to read bytes into
+            /// </param>
+            /// <param name="offset">the offset in the array to start storing bytes
+            /// </param>
+            /// <param name="len">the number of bytes to read
+            /// </param>
+            public override void  ReadInternal(byte[] b, int offset, int len)
+            {
+                long start = FilePointer;
+                if (start + len > length)
+                    throw new System.IO.IOException("read past EOF");
+                base_Renamed.Seek(fileOffset + start);
+                base_Renamed.ReadBytes(b, offset, len, false);
+            }
+            
+            /// <summary>Expert: implements seek.  Sets current position in this file, where
+            /// the next <see cref="ReadInternal(byte[],int,int)" /> will occur.
+            /// </summary>
+            /// <seealso cref="ReadInternal(byte[],int,int)">
+            /// </seealso>
+            public override void  SeekInternal(long pos)
+            {
+            }
 
             protected override void Dispose(bool disposing)
             {
@@ -302,16 +302,16 @@ namespace Lucene.Net.Index
                 
                 isDisposed = true;
             }
-			
-			public override long Length()
-			{
-				return length;
-			}
+            
+            public override long Length()
+            {
+                return length;
+            }
 
             public IndexInput base_Renamed_ForNUnit
             {
                 get { return base_Renamed; }
             }
-		}
-	}
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/CompoundFileWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompoundFileWriter.cs b/src/core/Index/CompoundFileWriter.cs
index e2905e1..ff25487 100644
--- a/src/core/Index/CompoundFileWriter.cs
+++ b/src/core/Index/CompoundFileWriter.cs
@@ -23,118 +23,118 @@ using IndexOutput = Lucene.Net.Store.IndexOutput;
 
 namespace Lucene.Net.Index
 {
-	
-	
-	/// <summary> Combines multiple files into a single compound file.
-	/// The file format:<br/>
-	/// <list type="bullet">
-	/// <item>VInt fileCount</item>
-	/// <item>{Directory}
-	/// fileCount entries with the following structure:</item>
-	/// <list type="bullet">
-	/// <item>long dataOffset</item>
-	/// <item>String fileName</item>
-	/// </list>
-	/// <item>{File Data}
-	/// fileCount entries with the raw data of the corresponding file</item>
-	/// </list>
-	/// 
-	/// The fileCount integer indicates how many files are contained in this compound
-	/// file. The {directory} that follows has that many entries. Each directory entry
-	/// contains a long pointer to the start of this file's data section, and a String
-	/// with that file's name.
-	/// </summary>
-	public sealed class CompoundFileWriter : IDisposable
-	{
-		
-		private sealed class FileEntry
-		{
-			/// <summary>source file </summary>
-			internal System.String file;
-			
-			/// <summary>temporary holder for the start of directory entry for this file </summary>
-			internal long directoryOffset;
-			
-			/// <summary>temporary holder for the start of this file's data section </summary>
-			internal long dataOffset;
-		}
-		
-		
-		private readonly Directory directory;
-		private readonly String fileName;
+    
+    
+    /// <summary> Combines multiple files into a single compound file.
+    /// The file format:<br/>
+    /// <list type="bullet">
+    /// <item>VInt fileCount</item>
+    /// <item>{Directory}
+    /// fileCount entries with the following structure:</item>
+    /// <list type="bullet">
+    /// <item>long dataOffset</item>
+    /// <item>String fileName</item>
+    /// </list>
+    /// <item>{File Data}
+    /// fileCount entries with the raw data of the corresponding file</item>
+    /// </list>
+    /// 
+    /// The fileCount integer indicates how many files are contained in this compound
+    /// file. The {directory} that follows has that many entries. Each directory entry
+    /// contains a long pointer to the start of this file's data section, and a String
+    /// with that file's name.
+    /// </summary>
+    public sealed class CompoundFileWriter : IDisposable
+    {
+        
+        private sealed class FileEntry
+        {
+            /// <summary>source file </summary>
+            internal System.String file;
+            
+            /// <summary>temporary holder for the start of directory entry for this file </summary>
+            internal long directoryOffset;
+            
+            /// <summary>temporary holder for the start of this file's data section </summary>
+            internal long dataOffset;
+        }
+        
+        
+        private readonly Directory directory;
+        private readonly String fileName;
         private readonly HashSet<string> ids;
-		private readonly LinkedList<FileEntry> entries;
-		private bool merged = false;
-		private readonly SegmentMerger.CheckAbort checkAbort;
-		
-		/// <summary>Create the compound stream in the specified file. The file name is the
-		/// entire name (no extensions are added).
-		/// </summary>
-		/// <throws>  NullPointerException if <c>dir</c> or <c>name</c> is null </throws>
-		public CompoundFileWriter(Directory dir, System.String name):this(dir, name, null)
-		{
-		}
-		
-		internal CompoundFileWriter(Directory dir, System.String name, SegmentMerger.CheckAbort checkAbort)
-		{
-			if (dir == null)
-				throw new ArgumentNullException("dir");
-			if (name == null)
-				throw new ArgumentNullException("name");
-			this.checkAbort = checkAbort;
-			directory = dir;
-			fileName = name;
+        private readonly LinkedList<FileEntry> entries;
+        private bool merged = false;
+        private readonly SegmentMerger.CheckAbort checkAbort;
+        
+        /// <summary>Create the compound stream in the specified file. The file name is the
+        /// entire name (no extensions are added).
+        /// </summary>
+        /// <throws>  NullPointerException if <c>dir</c> or <c>name</c> is null </throws>
+        public CompoundFileWriter(Directory dir, System.String name):this(dir, name, null)
+        {
+        }
+        
+        internal CompoundFileWriter(Directory dir, System.String name, SegmentMerger.CheckAbort checkAbort)
+        {
+            if (dir == null)
+                throw new ArgumentNullException("dir");
+            if (name == null)
+                throw new ArgumentNullException("name");
+            this.checkAbort = checkAbort;
+            directory = dir;
+            fileName = name;
             ids = new HashSet<string>();
-			entries = new LinkedList<FileEntry>();
-		}
+            entries = new LinkedList<FileEntry>();
+        }
 
-	    /// <summary>Returns the directory of the compound file. </summary>
-	    public Directory Directory
-	    {
-	        get { return directory; }
-	    }
+        /// <summary>Returns the directory of the compound file. </summary>
+        public Directory Directory
+        {
+            get { return directory; }
+        }
 
-	    /// <summary>Returns the name of the compound file. </summary>
-	    public string Name
-	    {
-	        get { return fileName; }
-	    }
+        /// <summary>Returns the name of the compound file. </summary>
+        public string Name
+        {
+            get { return fileName; }
+        }
 
-	    /// <summary>Add a source stream. <c>file</c> is the string by which the 
-		/// sub-stream will be known in the compound stream.
-		/// 
-		/// </summary>
-		/// <throws>  IllegalStateException if this writer is closed </throws>
-		/// <throws>  NullPointerException if <c>file</c> is null </throws>
-		/// <throws>  IllegalArgumentException if a file with the same name </throws>
-		/// <summary>   has been added already
-		/// </summary>
-		public void  AddFile(String file)
-		{
-			if (merged)
-				throw new InvalidOperationException("Can't add extensions after merge has been called");
-			
-			if (file == null)
-				throw new ArgumentNullException("file");
-			
+        /// <summary>Add a source stream. <c>file</c> is the string by which the 
+        /// sub-stream will be known in the compound stream.
+        /// 
+        /// </summary>
+        /// <throws>  IllegalStateException if this writer is closed </throws>
+        /// <throws>  NullPointerException if <c>file</c> is null </throws>
+        /// <throws>  IllegalArgumentException if a file with the same name </throws>
+        /// <summary>   has been added already
+        /// </summary>
+        public void  AddFile(String file)
+        {
+            if (merged)
+                throw new InvalidOperationException("Can't add extensions after merge has been called");
+            
+            if (file == null)
+                throw new ArgumentNullException("file");
+            
             try
             {
                 ids.Add(file);
             }
             catch (Exception)
             {
-				throw new ArgumentException("File " + file + " already added");
+                throw new ArgumentException("File " + file + " already added");
             }
 
-	    	var entry = new FileEntry {file = file};
-	    	entries.AddLast(entry);
-		}
-		
+            var entry = new FileEntry {file = file};
+            entries.AddLast(entry);
+        }
+        
         [Obsolete("Use Dispose() instead")]
-		public void  Close()
-		{
-		    Dispose();
-		}
+        public void  Close()
+        {
+            Dispose();
+        }
 
         /// <summary>Merge files with the extensions added up to now.
         /// All files with these extensions are combined sequentially into the
@@ -226,50 +226,50 @@ namespace Lucene.Net.Index
             }
         }
 
-		
-		/// <summary>Copy the contents of the file with specified extension into the
-		/// provided output stream. Use the provided buffer for moving data
-		/// to reduce memory allocation.
-		/// </summary>
-		private void  CopyFile(FileEntry source, IndexOutput os, byte[] buffer)
-		{
-			IndexInput isRenamed = null;
-			try
-			{
-				long startPtr = os.FilePointer;
-				
-				isRenamed = directory.OpenInput(source.file);
-				long length = isRenamed.Length();
-				long remainder = length;
-				int chunk = buffer.Length;
-				
-				while (remainder > 0)
-				{
-					var len = (int) Math.Min(chunk, remainder);
-					isRenamed.ReadBytes(buffer, 0, len, false);
-					os.WriteBytes(buffer, len);
-					remainder -= len;
-					if (checkAbort != null)
-					// Roughly every 2 MB we will check if
-					// it's time to abort
-						checkAbort.Work(80);
-				}
-				
-				// Verify that remainder is 0
-				if (remainder != 0)
-					throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")");
-				
-				// Verify that the output length diff is equal to original file
-				long endPtr = os.FilePointer;
-				long diff = endPtr - startPtr;
-				if (diff != length)
-					throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
-			}
-			finally
-			{
-				if (isRenamed != null)
-					isRenamed.Close();
-			}
-		}
-	}
+        
+        /// <summary>Copy the contents of the file with specified extension into the
+        /// provided output stream. Use the provided buffer for moving data
+        /// to reduce memory allocation.
+        /// </summary>
+        private void  CopyFile(FileEntry source, IndexOutput os, byte[] buffer)
+        {
+            IndexInput isRenamed = null;
+            try
+            {
+                long startPtr = os.FilePointer;
+                
+                isRenamed = directory.OpenInput(source.file);
+                long length = isRenamed.Length();
+                long remainder = length;
+                int chunk = buffer.Length;
+                
+                while (remainder > 0)
+                {
+                    var len = (int) Math.Min(chunk, remainder);
+                    isRenamed.ReadBytes(buffer, 0, len, false);
+                    os.WriteBytes(buffer, len);
+                    remainder -= len;
+                    if (checkAbort != null)
+                    // Roughly every 2 MB we will check if
+                    // it's time to abort
+                        checkAbort.Work(80);
+                }
+                
+                // Verify that remainder is 0
+                if (remainder != 0)
+                    throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")");
+                
+                // Verify that the output length diff is equal to original file
+                long endPtr = os.FilePointer;
+                long diff = endPtr - startPtr;
+                if (diff != length)
+                    throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
+            }
+            finally
+            {
+                if (isRenamed != null)
+                    isRenamed.Close();
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/ConcurrentMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ConcurrentMergeScheduler.cs b/src/core/Index/ConcurrentMergeScheduler.cs
index 8b8a300..79ea91f 100644
--- a/src/core/Index/ConcurrentMergeScheduler.cs
+++ b/src/core/Index/ConcurrentMergeScheduler.cs
@@ -21,118 +21,118 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>A <see cref="MergeScheduler" /> that runs each merge using a
-	/// separate thread, up until a maximum number of threads
-	/// (<see cref="MaxThreadCount" />) at which when a merge is
-	/// needed, the thread(s) that are updating the index will
-	/// pause until one or more merges completes.  This is a
-	/// simple way to use concurrency in the indexing process
-	/// without having to create and manage application level
-	/// threads. 
-	/// </summary>
-	
-	public class ConcurrentMergeScheduler:MergeScheduler
-	{
-		
-		private int mergeThreadPriority = - 1;
+    
+    /// <summary>A <see cref="MergeScheduler" /> that runs each merge using a
+    /// separate thread, up until a maximum number of threads
+    /// (<see cref="MaxThreadCount" />) at which when a merge is
+    /// needed, the thread(s) that are updating the index will
+    /// pause until one or more merges completes.  This is a
+    /// simple way to use concurrency in the indexing process
+    /// without having to create and manage application level
+    /// threads. 
+    /// </summary>
+    
+    public class ConcurrentMergeScheduler:MergeScheduler
+    {
+        
+        private int mergeThreadPriority = - 1;
 
         protected internal IList<MergeThread> mergeThreads = new List<MergeThread>();
-		
-		// Max number of threads allowed to be merging at once
-		private int _maxThreadCount = 1;
-		
-		protected internal Directory dir;
-		
-		private bool closed;
-		protected internal IndexWriter writer;
-		protected internal int mergeThreadCount;
-		
-		public ConcurrentMergeScheduler()
-		{
-			if (allInstances != null)
-			{
-				// Only for testing
-				AddMyself();
-			}
-		}
+        
+        // Max number of threads allowed to be merging at once
+        private int _maxThreadCount = 1;
+        
+        protected internal Directory dir;
+        
+        private bool closed;
+        protected internal IndexWriter writer;
+        protected internal int mergeThreadCount;
+        
+        public ConcurrentMergeScheduler()
+        {
+            if (allInstances != null)
+            {
+                // Only for testing
+                AddMyself();
+            }
+        }
 
-	    /// <summary>Gets or sets the max # simultaneous threads that may be
-	    /// running.  If a merge is necessary yet we already have
-	    /// this many threads running, the incoming thread (that
-	    /// is calling add/updateDocument) will block until
-	    /// a merge thread has completed. 
-	    /// </summary>
-	    public virtual int MaxThreadCount
-	    {
-	        set
-	        {
-	            if (value < 1)
-	                throw new System.ArgumentException("count should be at least 1");
-	            _maxThreadCount = value;
-	        }
-	        get { return _maxThreadCount; }
+        /// <summary>Gets or sets the max # simultaneous threads that may be
+        /// running.  If a merge is necessary yet we already have
+        /// this many threads running, the incoming thread (that
+        /// is calling add/updateDocument) will block until
+        /// a merge thread has completed. 
+        /// </summary>
+        public virtual int MaxThreadCount
+        {
+            set
+            {
+                if (value < 1)
+                    throw new System.ArgumentException("count should be at least 1");
+                _maxThreadCount = value;
+            }
+            get { return _maxThreadCount; }
         }
 
-	    /// <summary>Return the priority that merge threads run at.  By
-		/// default the priority is 1 plus the priority of (ie,
-		/// slightly higher priority than) the first thread that
-		/// calls merge. 
-		/// </summary>
+        /// <summary>Return the priority that merge threads run at.  By
+        /// default the priority is 1 plus the priority of (ie,
+        /// slightly higher priority than) the first thread that
+        /// calls merge. 
+        /// </summary>
         [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1024:UsePropertiesWhereAppropriate")]
         public virtual int GetMergeThreadPriority()
-		{
-			lock (this)
-			{
-				InitMergeThreadPriority();
-				return mergeThreadPriority;
-			}
-		}
-		
-		/// <summary>Set the priority that merge threads run at. </summary>
-		public virtual void  SetMergeThreadPriority(int pri)
-		{
-			lock (this)
-			{
-				if (pri > (int) System.Threading.ThreadPriority.Highest || pri < (int) System.Threading.ThreadPriority.Lowest)
-					throw new System.ArgumentException("priority must be in range " + (int) System.Threading.ThreadPriority.Lowest + " .. " + (int) System.Threading.ThreadPriority.Highest + " inclusive");
-				mergeThreadPriority = pri;
-				
-				int numThreads = MergeThreadCount();
-				for (int i = 0; i < numThreads; i++)
-				{
-					MergeThread merge = mergeThreads[i];
-					merge.SetThreadPriority(pri);
-				}
-			}
-		}
-		
-		private bool Verbose()
-		{
-			return writer != null && writer.Verbose;
-		}
-		
-		private void  Message(System.String message)
-		{
-			if (Verbose())
-				writer.Message("CMS: " + message);
-		}
-		
-		private void  InitMergeThreadPriority()
-		{
-			lock (this)
-			{
-				if (mergeThreadPriority == - 1)
-				{
-					// Default to slightly higher priority than our
-					// calling thread
-					mergeThreadPriority = 1 + (System.Int32) ThreadClass.Current().Priority;
-					if (mergeThreadPriority > (int) System.Threading.ThreadPriority.Highest)
-						mergeThreadPriority = (int) System.Threading.ThreadPriority.Highest;
-				}
-			}
-		}
-		
+        {
+            lock (this)
+            {
+                InitMergeThreadPriority();
+                return mergeThreadPriority;
+            }
+        }
+        
+        /// <summary>Set the priority that merge threads run at. </summary>
+        public virtual void  SetMergeThreadPriority(int pri)
+        {
+            lock (this)
+            {
+                if (pri > (int) System.Threading.ThreadPriority.Highest || pri < (int) System.Threading.ThreadPriority.Lowest)
+                    throw new System.ArgumentException("priority must be in range " + (int) System.Threading.ThreadPriority.Lowest + " .. " + (int) System.Threading.ThreadPriority.Highest + " inclusive");
+                mergeThreadPriority = pri;
+                
+                int numThreads = MergeThreadCount();
+                for (int i = 0; i < numThreads; i++)
+                {
+                    MergeThread merge = mergeThreads[i];
+                    merge.SetThreadPriority(pri);
+                }
+            }
+        }
+        
+        private bool Verbose()
+        {
+            return writer != null && writer.Verbose;
+        }
+        
+        private void  Message(System.String message)
+        {
+            if (Verbose())
+                writer.Message("CMS: " + message);
+        }
+        
+        private void  InitMergeThreadPriority()
+        {
+            lock (this)
+            {
+                if (mergeThreadPriority == - 1)
+                {
+                    // Default to slightly higher priority than our
+                    // calling thread
+                    mergeThreadPriority = 1 + (System.Int32) ThreadClass.Current().Priority;
+                    if (mergeThreadPriority > (int) System.Threading.ThreadPriority.Highest)
+                        mergeThreadPriority = (int) System.Threading.ThreadPriority.Highest;
+                }
+            }
+        }
+        
         protected override void Dispose(bool disposing)
         {
             //if (disposing)
@@ -140,30 +140,30 @@ namespace Lucene.Net.Index
                 closed = true;
             //}
         }
-		
-		public virtual void  Sync()
-		{
-			lock (this)
-			{
-				while (MergeThreadCount() > 0)
-				{
-					if (Verbose())
-						Message("now wait for threads; currently " + mergeThreads.Count + " still running");
-					int count = mergeThreads.Count;
-					if (Verbose())
-					{
-						for (int i = 0; i < count; i++)
-							Message("    " + i + ": " + mergeThreads[i]);
-					}
-					
-					System.Threading.Monitor.Wait(this);
-					
-				}
-			}
-		}
-		
-		private int MergeThreadCount()
-		{
+        
+        public virtual void  Sync()
+        {
+            lock (this)
+            {
+                while (MergeThreadCount() > 0)
+                {
+                    if (Verbose())
+                        Message("now wait for threads; currently " + mergeThreads.Count + " still running");
+                    int count = mergeThreads.Count;
+                    if (Verbose())
+                    {
+                        for (int i = 0; i < count; i++)
+                            Message("    " + i + ": " + mergeThreads[i]);
+                    }
+                    
+                    System.Threading.Monitor.Wait(this);
+                    
+                }
+            }
+        }
+        
+        private int MergeThreadCount()
+        {
             lock (this)
             {
                 int count = 0;
@@ -178,327 +178,327 @@ namespace Lucene.Net.Index
                 return count;
             }
         }
-		
-		public override void  Merge(IndexWriter writer)
-		{
-			// TODO: .NET doesn't support this
-			// assert !Thread.holdsLock(writer);
-			
-			this.writer = writer;
-			
-			InitMergeThreadPriority();
-			
-			dir = writer.Directory;
-			
-			// First, quickly run through the newly proposed merges
-			// and add any orthogonal merges (ie a merge not
-			// involving segments already pending to be merged) to
-			// the queue.  If we are way behind on merging, many of
-			// these newly proposed merges will likely already be
-			// registered.
-			
-			if (Verbose())
-			{
-				Message("now merge");
-				Message("  index: " + writer.SegString());
-			}
-			
-			// Iterate, pulling from the IndexWriter's queue of
-			// pending merges, until it's empty:
-			while (true)
-			{
-				// TODO: we could be careful about which merges to do in
-				// the BG (eg maybe the "biggest" ones) vs FG, which
-				// merges to do first (the easiest ones?), etc.
-				
-				MergePolicy.OneMerge merge = writer.GetNextMerge();
-				if (merge == null)
-				{
-					if (Verbose())
-						Message("  no more merges pending; now return");
-					return ;
-				}
-				
-				// We do this w/ the primary thread to keep
-				// deterministic assignment of segment names
-				writer.MergeInit(merge);
-				
-				bool success = false;
-				try
-				{
-					lock (this)
-					{
-						while (MergeThreadCount() >= _maxThreadCount)
-						{
-							if (Verbose())
-								Message("    too many merge threads running; stalling...");
-							
+        
+        public override void  Merge(IndexWriter writer)
+        {
+            // TODO: .NET doesn't support this
+            // assert !Thread.holdsLock(writer);
+            
+            this.writer = writer;
+            
+            InitMergeThreadPriority();
+            
+            dir = writer.Directory;
+            
+            // First, quickly run through the newly proposed merges
+            // and add any orthogonal merges (ie a merge not
+            // involving segments already pending to be merged) to
+            // the queue.  If we are way behind on merging, many of
+            // these newly proposed merges will likely already be
+            // registered.
+            
+            if (Verbose())
+            {
+                Message("now merge");
+                Message("  index: " + writer.SegString());
+            }
+            
+            // Iterate, pulling from the IndexWriter's queue of
+            // pending merges, until it's empty:
+            while (true)
+            {
+                // TODO: we could be careful about which merges to do in
+                // the BG (eg maybe the "biggest" ones) vs FG, which
+                // merges to do first (the easiest ones?), etc.
+                
+                MergePolicy.OneMerge merge = writer.GetNextMerge();
+                if (merge == null)
+                {
+                    if (Verbose())
+                        Message("  no more merges pending; now return");
+                    return ;
+                }
+                
+                // We do this w/ the primary thread to keep
+                // deterministic assignment of segment names
+                writer.MergeInit(merge);
+                
+                bool success = false;
+                try
+                {
+                    lock (this)
+                    {
+                        while (MergeThreadCount() >= _maxThreadCount)
+                        {
+                            if (Verbose())
+                                Message("    too many merge threads running; stalling...");
+                            
                             System.Threading.Monitor.Wait(this);
-							
-							
-						}
-						
-						if (Verbose())
-							Message("  consider merge " + merge.SegString(dir));
+                            
+                            
+                        }
+                        
+                        if (Verbose())
+                            Message("  consider merge " + merge.SegString(dir));
 
-					    System.Diagnostics.Debug.Assert(MergeThreadCount() < _maxThreadCount);
-												
-						// OK to spawn a new merge thread to handle this
-						// merge:
-						MergeThread merger = GetMergeThread(writer, merge);
-						mergeThreads.Add(merger);
-						if (Verbose())
-							Message("    launch new thread [" + merger.Name + "]");
-						
-						merger.Start();
-						success = true;
-					}
-				}
-				finally
-				{
-					if (!success)
-					{
-						writer.MergeFinish(merge);
-					}
-				}
-			}
-		}
-		
-		/// <summary>Does the actual merge, by calling <see cref="IndexWriter.Merge" /> </summary>
-		protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
-		{
-			writer.Merge(merge);
-		}
-		
-		/// <summary>Create and return a new MergeThread </summary>
-		protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
-		{
-			lock (this)
-			{
-				var thread = new MergeThread(this, writer, merge);
-				thread.SetThreadPriority(mergeThreadPriority);
-				thread.IsBackground = true;
-				thread.Name = "Lucene Merge Thread #" + mergeThreadCount++;
-				return thread;
-			}
-		}
-		
-		public /*protected internal*/ class MergeThread:ThreadClass
-		{
-			private void  InitBlock(ConcurrentMergeScheduler enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private ConcurrentMergeScheduler enclosingInstance;
-			public ConcurrentMergeScheduler Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			internal IndexWriter writer;
-			internal MergePolicy.OneMerge startMerge;
-			internal MergePolicy.OneMerge runningMerge;
-			
-			public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
-			{
-				InitBlock(enclosingInstance);
-				this.writer = writer;
-				this.startMerge = startMerge;
-			}
-			
-			public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
-			{
-				lock (this)
-				{
-					runningMerge = merge;
-				}
-			}
+                        System.Diagnostics.Debug.Assert(MergeThreadCount() < _maxThreadCount);
+                                                
+                        // OK to spawn a new merge thread to handle this
+                        // merge:
+                        MergeThread merger = GetMergeThread(writer, merge);
+                        mergeThreads.Add(merger);
+                        if (Verbose())
+                            Message("    launch new thread [" + merger.Name + "]");
+                        
+                        merger.Start();
+                        success = true;
+                    }
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        writer.MergeFinish(merge);
+                    }
+                }
+            }
+        }
+        
+        /// <summary>Does the actual merge, by calling <see cref="IndexWriter.Merge" /> </summary>
+        protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
+        {
+            writer.Merge(merge);
+        }
+        
+        /// <summary>Create and return a new MergeThread </summary>
+        protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
+        {
+            lock (this)
+            {
+                var thread = new MergeThread(this, writer, merge);
+                thread.SetThreadPriority(mergeThreadPriority);
+                thread.IsBackground = true;
+                thread.Name = "Lucene Merge Thread #" + mergeThreadCount++;
+                return thread;
+            }
+        }
+        
+        public /*protected internal*/ class MergeThread:ThreadClass
+        {
+            private void  InitBlock(ConcurrentMergeScheduler enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private ConcurrentMergeScheduler enclosingInstance;
+            public ConcurrentMergeScheduler Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            internal IndexWriter writer;
+            internal MergePolicy.OneMerge startMerge;
+            internal MergePolicy.OneMerge runningMerge;
+            
+            public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
+            {
+                InitBlock(enclosingInstance);
+                this.writer = writer;
+                this.startMerge = startMerge;
+            }
+            
+            public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
+            {
+                lock (this)
+                {
+                    runningMerge = merge;
+                }
+            }
 
-		    public virtual MergePolicy.OneMerge RunningMerge
-		    {
-		        get
-		        {
-		            lock (this)
-		            {
-		                return runningMerge;
-		            }
-		        }
-		    }
+            public virtual MergePolicy.OneMerge RunningMerge
+            {
+                get
+                {
+                    lock (this)
+                    {
+                        return runningMerge;
+                    }
+                }
+            }
 
-		    public virtual void  SetThreadPriority(int pri)
-			{
-				try
-				{
-					Priority = (System.Threading.ThreadPriority) pri;
-				}
-				catch (System.NullReferenceException)
-				{
-					// Strangely, Sun's JDK 1.5 on Linux sometimes
-					// throws NPE out of here...
-				}
-				catch (System.Security.SecurityException)
-				{
-					// Ignore this because we will still run fine with
-					// normal thread priority
-				}
-			}
-			
-			override public void  Run()
-			{
-				
-				// First time through the while loop we do the merge
-				// that we were started with:
-				MergePolicy.OneMerge merge = this.startMerge;
-				
-				try
-				{
-					
-					if (Enclosing_Instance.Verbose())
-						Enclosing_Instance.Message("  merge thread: start");
-					
-					while (true)
-					{
-						SetRunningMerge(merge);
-						Enclosing_Instance.DoMerge(merge);
-						
-						// Subsequent times through the loop we do any new
-						// merge that writer says is necessary:
-						merge = writer.GetNextMerge();
-						if (merge != null)
-						{
-							writer.MergeInit(merge);
-							if (Enclosing_Instance.Verbose())
-								Enclosing_Instance.Message("  merge thread: do another merge " + merge.SegString(Enclosing_Instance.dir));
-						}
-						else
-							break;
-					}
-					
-					if (Enclosing_Instance.Verbose())
-						Enclosing_Instance.Message("  merge thread: done");
-				}
-				catch (System.Exception exc)
-				{
-					// Ignore the exception if it was due to abort:
-					if (!(exc is MergePolicy.MergeAbortedException))
-					{
-						if (!Enclosing_Instance.suppressExceptions)
-						{
-							// suppressExceptions is normally only set during
-							// testing.
-							Lucene.Net.Index.ConcurrentMergeScheduler.anyExceptions = true;
-							Enclosing_Instance.HandleMergeException(exc);
-						}
-					}
-				}
-				finally
-				{
-					lock (Enclosing_Instance)
-					{
-						System.Threading.Monitor.PulseAll(Enclosing_Instance);
-						Enclosing_Instance.mergeThreads.Remove(this);
+            public virtual void  SetThreadPriority(int pri)
+            {
+                try
+                {
+                    Priority = (System.Threading.ThreadPriority) pri;
+                }
+                catch (System.NullReferenceException)
+                {
+                    // Strangely, Sun's JDK 1.5 on Linux sometimes
+                    // throws NPE out of here...
+                }
+                catch (System.Security.SecurityException)
+                {
+                    // Ignore this because we will still run fine with
+                    // normal thread priority
+                }
+            }
+            
+            override public void  Run()
+            {
+                
+                // First time through the while loop we do the merge
+                // that we were started with:
+                MergePolicy.OneMerge merge = this.startMerge;
+                
+                try
+                {
+                    
+                    if (Enclosing_Instance.Verbose())
+                        Enclosing_Instance.Message("  merge thread: start");
+                    
+                    while (true)
+                    {
+                        SetRunningMerge(merge);
+                        Enclosing_Instance.DoMerge(merge);
+                        
+                        // Subsequent times through the loop we do any new
+                        // merge that writer says is necessary:
+                        merge = writer.GetNextMerge();
+                        if (merge != null)
+                        {
+                            writer.MergeInit(merge);
+                            if (Enclosing_Instance.Verbose())
+                                Enclosing_Instance.Message("  merge thread: do another merge " + merge.SegString(Enclosing_Instance.dir));
+                        }
+                        else
+                            break;
+                    }
+                    
+                    if (Enclosing_Instance.Verbose())
+                        Enclosing_Instance.Message("  merge thread: done");
+                }
+                catch (System.Exception exc)
+                {
+                    // Ignore the exception if it was due to abort:
+                    if (!(exc is MergePolicy.MergeAbortedException))
+                    {
+                        if (!Enclosing_Instance.suppressExceptions)
+                        {
+                            // suppressExceptions is normally only set during
+                            // testing.
+                            Lucene.Net.Index.ConcurrentMergeScheduler.anyExceptions = true;
+                            Enclosing_Instance.HandleMergeException(exc);
+                        }
+                    }
+                }
+                finally
+                {
+                    lock (Enclosing_Instance)
+                    {
+                        System.Threading.Monitor.PulseAll(Enclosing_Instance);
+                        Enclosing_Instance.mergeThreads.Remove(this);
                         bool removed = !Enclosing_Instance.mergeThreads.Contains(this);
-						System.Diagnostics.Debug.Assert(removed);
-					}
-				}
-			}
-			
-			public override System.String ToString()
-			{
-				MergePolicy.OneMerge merge = RunningMerge ?? startMerge;
-				return "merge thread: " + merge.SegString(Enclosing_Instance.dir);
-			}
-		}
-		
-		/// <summary>Called when an exception is hit in a background merge
-		/// thread 
-		/// </summary>
-		protected internal virtual void  HandleMergeException(System.Exception exc)
-		{
-			// When an exception is hit during merge, IndexWriter
-			// removes any partial files and then allows another
-			// merge to run.  If whatever caused the error is not
-			// transient then the exception will keep happening,
-			// so, we sleep here to avoid saturating CPU in such
-			// cases:
-			System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 250));
-			
+                        System.Diagnostics.Debug.Assert(removed);
+                    }
+                }
+            }
+            
+            public override System.String ToString()
+            {
+                MergePolicy.OneMerge merge = RunningMerge ?? startMerge;
+                return "merge thread: " + merge.SegString(Enclosing_Instance.dir);
+            }
+        }
+        
+        /// <summary>Called when an exception is hit in a background merge
+        /// thread 
+        /// </summary>
+        protected internal virtual void  HandleMergeException(System.Exception exc)
+        {
+            // When an exception is hit during merge, IndexWriter
+            // removes any partial files and then allows another
+            // merge to run.  If whatever caused the error is not
+            // transient then the exception will keep happening,
+            // so, we sleep here to avoid saturating CPU in such
+            // cases:
+            System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 250));
+            
             throw new MergePolicy.MergeException(exc, dir);
-		}
-		
-		internal static bool anyExceptions = false;
-		
-		/// <summary>Used for testing </summary>
-		public static bool AnyUnhandledExceptions()
-		{
-			if (allInstances == null)
-			{
-				throw new System.SystemException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
-			}
-			lock (allInstances)
-			{
-				int count = allInstances.Count;
-				// Make sure all outstanding threads are done so we see
-				// any exceptions they may produce:
-				for (int i = 0; i < count; i++)
-				    allInstances[i].Sync();
-				bool v = anyExceptions;
-				anyExceptions = false;
-				return v;
-			}
-		}
-		
-		public static void  ClearUnhandledExceptions()
-		{
-			lock (allInstances)
-			{
-				anyExceptions = false;
-			}
-		}
-		
-		/// <summary>Used for testing </summary>
-		private void  AddMyself()
-		{
-			lock (allInstances)
-			{
-				int size = allInstances.Count;
-				int upto = 0;
-				for (int i = 0; i < size; i++)
-				{
-					ConcurrentMergeScheduler other = allInstances[i];
-					if (!(other.closed && 0 == other.MergeThreadCount()))
-					// Keep this one for now: it still has threads or
-					// may spawn new threads
-						allInstances[upto++] = other;
-				}
-			    allInstances.RemoveRange(upto, allInstances.Count - upto);
-				allInstances.Add(this);
-			}
-		}
-		
-		private bool suppressExceptions;
-		
-		/// <summary>Used for testing </summary>
-		public /*internal*/ virtual void  SetSuppressExceptions()
-		{
-			suppressExceptions = true;
-		}
-		
-		/// <summary>Used for testing </summary>
-		public /*internal*/ virtual void  ClearSuppressExceptions()
-		{
-			suppressExceptions = false;
-		}
-		
-		/// <summary>Used for testing </summary>
-		private static List<ConcurrentMergeScheduler> allInstances;
-		public static void  SetTestMode()
-		{
-			allInstances = new List<ConcurrentMergeScheduler>();
-		}
-	}
+        }
+        
+        internal static bool anyExceptions = false;
+        
+        /// <summary>Used for testing </summary>
+        public static bool AnyUnhandledExceptions()
+        {
+            if (allInstances == null)
+            {
+                throw new System.SystemException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
+            }
+            lock (allInstances)
+            {
+                int count = allInstances.Count;
+                // Make sure all outstanding threads are done so we see
+                // any exceptions they may produce:
+                for (int i = 0; i < count; i++)
+                    allInstances[i].Sync();
+                bool v = anyExceptions;
+                anyExceptions = false;
+                return v;
+            }
+        }
+        
+        public static void  ClearUnhandledExceptions()
+        {
+            lock (allInstances)
+            {
+                anyExceptions = false;
+            }
+        }
+        
+        /// <summary>Used for testing </summary>
+        private void  AddMyself()
+        {
+            lock (allInstances)
+            {
+                int size = allInstances.Count;
+                int upto = 0;
+                for (int i = 0; i < size; i++)
+                {
+                    ConcurrentMergeScheduler other = allInstances[i];
+                    if (!(other.closed && 0 == other.MergeThreadCount()))
+                    // Keep this one for now: it still has threads or
+                    // may spawn new threads
+                        allInstances[upto++] = other;
+                }
+                allInstances.RemoveRange(upto, allInstances.Count - upto);
+                allInstances.Add(this);
+            }
+        }
+        
+        private bool suppressExceptions;
+        
+        /// <summary>Used for testing </summary>
+        public /*internal*/ virtual void  SetSuppressExceptions()
+        {
+            suppressExceptions = true;
+        }
+        
+        /// <summary>Used for testing </summary>
+        public /*internal*/ virtual void  ClearSuppressExceptions()
+        {
+            suppressExceptions = false;
+        }
+        
+        /// <summary>Used for testing </summary>
+        private static List<ConcurrentMergeScheduler> allInstances;
+        public static void  SetTestMode()
+        {
+            allInstances = new List<ConcurrentMergeScheduler>();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DefaultSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DefaultSkipListReader.cs b/src/core/Index/DefaultSkipListReader.cs
index a1cddde..470df0d 100644
--- a/src/core/Index/DefaultSkipListReader.cs
+++ b/src/core/Index/DefaultSkipListReader.cs
@@ -20,109 +20,109 @@ using IndexInput = Lucene.Net.Store.IndexInput;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Implements the skip list reader for the default posting list format
-	/// that stores positions and payloads.
-	/// 
-	/// </summary>
-	class DefaultSkipListReader:MultiLevelSkipListReader
-	{
-		private bool currentFieldStoresPayloads;
-		private readonly long[] freqPointer;
-		private readonly long[] proxPointer;
-		private readonly int[] payloadLength;
-		
-		private long lastFreqPointer;
-		private long lastProxPointer;
-		private int lastPayloadLength;
-		
-		
-		internal DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval):base(skipStream, maxSkipLevels, skipInterval)
-		{
-			freqPointer = new long[maxSkipLevels];
-			proxPointer = new long[maxSkipLevels];
-			payloadLength = new int[maxSkipLevels];
-		}
-		
-		internal virtual void  Init(long skipPointer, long freqBasePointer, long proxBasePointer, int df, bool storesPayloads)
-		{
-			base.Init(skipPointer, df);
-			this.currentFieldStoresPayloads = storesPayloads;
-			lastFreqPointer = freqBasePointer;
-			lastProxPointer = proxBasePointer;
+    
+    /// <summary> Implements the skip list reader for the default posting list format
+    /// that stores positions and payloads.
+    /// 
+    /// </summary>
+    class DefaultSkipListReader:MultiLevelSkipListReader
+    {
+        private bool currentFieldStoresPayloads;
+        private readonly long[] freqPointer;
+        private readonly long[] proxPointer;
+        private readonly int[] payloadLength;
+        
+        private long lastFreqPointer;
+        private long lastProxPointer;
+        private int lastPayloadLength;
+        
+        
+        internal DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval):base(skipStream, maxSkipLevels, skipInterval)
+        {
+            freqPointer = new long[maxSkipLevels];
+            proxPointer = new long[maxSkipLevels];
+            payloadLength = new int[maxSkipLevels];
+        }
+        
+        internal virtual void  Init(long skipPointer, long freqBasePointer, long proxBasePointer, int df, bool storesPayloads)
+        {
+            base.Init(skipPointer, df);
+            this.currentFieldStoresPayloads = storesPayloads;
+            lastFreqPointer = freqBasePointer;
+            lastProxPointer = proxBasePointer;
 
-			for (int i = 0; i < freqPointer.Length; i++) freqPointer[i] = freqBasePointer;
-			for (int i = 0; i < proxPointer.Length; i++) proxPointer[i] = proxBasePointer;
-			for (int i = 0; i < payloadLength.Length; i++) payloadLength[i] = 0;
+            for (int i = 0; i < freqPointer.Length; i++) freqPointer[i] = freqBasePointer;
+            for (int i = 0; i < proxPointer.Length; i++) proxPointer[i] = proxBasePointer;
+            for (int i = 0; i < payloadLength.Length; i++) payloadLength[i] = 0;
+        }
+        
+        /// <summary>Returns the freq pointer of the doc to which the last call of 
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)" /> has skipped.  
+        /// </summary>
+        internal virtual long GetFreqPointer()
+        {
+            return lastFreqPointer;
+        }
+        
+        /// <summary>Returns the prox pointer of the doc to which the last call of 
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)" /> has skipped.  
+        /// </summary>
+        internal virtual long GetProxPointer()
+        {
+            return lastProxPointer;
+        }
+        
+        /// <summary>Returns the payload length of the payload stored just before 
+        /// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)" /> 
+        /// has skipped.  
+        /// </summary>
+        internal virtual int GetPayloadLength()
+        {
+            return lastPayloadLength;
+        }
+        
+        protected internal override void  SeekChild(int level)
+        {
+            base.SeekChild(level);
+            freqPointer[level] = lastFreqPointer;
+            proxPointer[level] = lastProxPointer;
+            payloadLength[level] = lastPayloadLength;
+        }
+        
+        protected internal override void  SetLastSkipData(int level)
+        {
+            base.SetLastSkipData(level);
+            lastFreqPointer = freqPointer[level];
+            lastProxPointer = proxPointer[level];
+            lastPayloadLength = payloadLength[level];
+        }
+        
+        
+        protected internal override int ReadSkipData(int level, IndexInput skipStream)
+        {
+            int delta;
+            if (currentFieldStoresPayloads)
+            {
+                // the current field stores payloads.
+                // if the doc delta is odd then we have
+                // to read the current payload length
+                // because it differs from the length of the
+                // previous payload
+                delta = skipStream.ReadVInt();
+                if ((delta & 1) != 0)
+                {
+                    payloadLength[level] = skipStream.ReadVInt();
+                }
+                delta = Number.URShift(delta, 1);
+            }
+            else
+            {
+                delta = skipStream.ReadVInt();
+            }
+            freqPointer[level] += skipStream.ReadVInt();
+            proxPointer[level] += skipStream.ReadVInt();
+            
+            return delta;
         }
-		
-		/// <summary>Returns the freq pointer of the doc to which the last call of 
-		/// <see cref="MultiLevelSkipListReader.SkipTo(int)" /> has skipped.  
-		/// </summary>
-		internal virtual long GetFreqPointer()
-		{
-			return lastFreqPointer;
-		}
-		
-		/// <summary>Returns the prox pointer of the doc to which the last call of 
-		/// <see cref="MultiLevelSkipListReader.SkipTo(int)" /> has skipped.  
-		/// </summary>
-		internal virtual long GetProxPointer()
-		{
-			return lastProxPointer;
-		}
-		
-		/// <summary>Returns the payload length of the payload stored just before 
-		/// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)" /> 
-		/// has skipped.  
-		/// </summary>
-		internal virtual int GetPayloadLength()
-		{
-			return lastPayloadLength;
-		}
-		
-		protected internal override void  SeekChild(int level)
-		{
-			base.SeekChild(level);
-			freqPointer[level] = lastFreqPointer;
-			proxPointer[level] = lastProxPointer;
-			payloadLength[level] = lastPayloadLength;
-		}
-		
-		protected internal override void  SetLastSkipData(int level)
-		{
-			base.SetLastSkipData(level);
-			lastFreqPointer = freqPointer[level];
-			lastProxPointer = proxPointer[level];
-			lastPayloadLength = payloadLength[level];
-		}
-		
-		
-		protected internal override int ReadSkipData(int level, IndexInput skipStream)
-		{
-			int delta;
-			if (currentFieldStoresPayloads)
-			{
-				// the current field stores payloads.
-				// if the doc delta is odd then we have
-				// to read the current payload length
-				// because it differs from the length of the
-				// previous payload
-				delta = skipStream.ReadVInt();
-				if ((delta & 1) != 0)
-				{
-					payloadLength[level] = skipStream.ReadVInt();
-				}
-				delta = Number.URShift(delta, 1);
-			}
-			else
-			{
-				delta = skipStream.ReadVInt();
-			}
-			freqPointer[level] += skipStream.ReadVInt();
-			proxPointer[level] += skipStream.ReadVInt();
-			
-			return delta;
-		}
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DefaultSkipListWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DefaultSkipListWriter.cs b/src/core/Index/DefaultSkipListWriter.cs
index 77412af..1c2de1b 100644
--- a/src/core/Index/DefaultSkipListWriter.cs
+++ b/src/core/Index/DefaultSkipListWriter.cs
@@ -21,123 +21,123 @@ using IndexOutput = Lucene.Net.Store.IndexOutput;
 
 namespace Lucene.Net.Index
 {
-	
-	
-	/// <summary> Implements the skip list writer for the default posting list format
-	/// that stores positions and payloads.
-	/// 
-	/// </summary>
-	class DefaultSkipListWriter:MultiLevelSkipListWriter
-	{
-		private int[] lastSkipDoc;
-		private int[] lastSkipPayloadLength;
-		private long[] lastSkipFreqPointer;
-		private long[] lastSkipProxPointer;
-		
-		private IndexOutput freqOutput;
-		private IndexOutput proxOutput;
-		
-		private int curDoc;
-		private bool curStorePayloads;
-		private int curPayloadLength;
-		private long curFreqPointer;
-		private long curProxPointer;
-		
-		internal DefaultSkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput):base(skipInterval, numberOfSkipLevels, docCount)
-		{
-			this.freqOutput = freqOutput;
-			this.proxOutput = proxOutput;
-			
-			lastSkipDoc = new int[numberOfSkipLevels];
-			lastSkipPayloadLength = new int[numberOfSkipLevels];
-			lastSkipFreqPointer = new long[numberOfSkipLevels];
-			lastSkipProxPointer = new long[numberOfSkipLevels];
-		}
-		
-		internal virtual void  SetFreqOutput(IndexOutput freqOutput)
-		{
-			this.freqOutput = freqOutput;
-		}
-		
-		internal virtual void  SetProxOutput(IndexOutput proxOutput)
-		{
-			this.proxOutput = proxOutput;
-		}
-		
-		/// <summary> Sets the values for the current skip data. </summary>
-		internal virtual void  SetSkipData(int doc, bool storePayloads, int payloadLength)
-		{
-			this.curDoc = doc;
-			this.curStorePayloads = storePayloads;
-			this.curPayloadLength = payloadLength;
-			this.curFreqPointer = freqOutput.FilePointer;
-			if (proxOutput != null)
-				this.curProxPointer = proxOutput.FilePointer;
-		}
-		
-		protected internal override void  ResetSkip()
-		{
-			base.ResetSkip();
-			for (int i = 0; i < lastSkipDoc.Length; i++) lastSkipDoc[i] = 0;
-			for (int i = 0; i < lastSkipPayloadLength.Length; i++) lastSkipPayloadLength[i] = -1; // we don't have to write the first length in the skip list
-			for (int i = 0; i < lastSkipFreqPointer.Length; i++) lastSkipFreqPointer[i] = freqOutput.FilePointer;
-			if (proxOutput != null)
-				for (int i = 0; i < lastSkipProxPointer.Length; i++) lastSkipProxPointer[i] = proxOutput.FilePointer;
-		}
-		
-		protected internal override void  WriteSkipData(int level, IndexOutput skipBuffer)
-		{
-			// To efficiently store payloads in the posting lists we do not store the length of
-			// every payload. Instead we omit the length for a payload if the previous payload had
-			// the same length.
-			// However, in order to support skipping the payload length at every skip point must be known.
-			// So we use the same length encoding that we use for the posting lists for the skip data as well:
-			// Case 1: current field does not store payloads
-			//           SkipDatum                 --> DocSkip, FreqSkip, ProxSkip
-			//           DocSkip,FreqSkip,ProxSkip --> VInt
-			//           DocSkip records the document number before every SkipInterval th  document in TermFreqs. 
-			//           Document numbers are represented as differences from the previous value in the sequence.
-			// Case 2: current field stores payloads
-			//           SkipDatum                 --> DocSkip, PayloadLength?, FreqSkip,ProxSkip
-			//           DocSkip,FreqSkip,ProxSkip --> VInt
-			//           PayloadLength             --> VInt    
-			//         In this case DocSkip/2 is the difference between
-			//         the current and the previous value. If DocSkip
-			//         is odd, then a PayloadLength encoded as VInt follows,
-			//         if DocSkip is even, then it is assumed that the
-			//         current payload length equals the length at the previous
-			//         skip point
-			if (curStorePayloads)
-			{
-				int delta = curDoc - lastSkipDoc[level];
-				if (curPayloadLength == lastSkipPayloadLength[level])
-				{
-					// the current payload length equals the length at the previous skip point,
-					// so we don't store the length again
-					skipBuffer.WriteVInt(delta * 2);
-				}
-				else
-				{
-					// the payload length is different from the previous one. We shift the DocSkip, 
-					// set the lowest bit and store the current payload length as VInt.
-					skipBuffer.WriteVInt(delta * 2 + 1);
-					skipBuffer.WriteVInt(curPayloadLength);
-					lastSkipPayloadLength[level] = curPayloadLength;
-				}
-			}
-			else
-			{
-				// current field does not store payloads
-				skipBuffer.WriteVInt(curDoc - lastSkipDoc[level]);
-			}
-			skipBuffer.WriteVInt((int) (curFreqPointer - lastSkipFreqPointer[level]));
-			skipBuffer.WriteVInt((int) (curProxPointer - lastSkipProxPointer[level]));
-			
-			lastSkipDoc[level] = curDoc;
-			//System.out.println("write doc at level " + level + ": " + curDoc);
-			
-			lastSkipFreqPointer[level] = curFreqPointer;
-			lastSkipProxPointer[level] = curProxPointer;
-		}
-	}
+    
+    
+    /// <summary> Implements the skip list writer for the default posting list format
+    /// that stores positions and payloads.
+    /// 
+    /// </summary>
+    class DefaultSkipListWriter:MultiLevelSkipListWriter
+    {
+        private int[] lastSkipDoc;
+        private int[] lastSkipPayloadLength;
+        private long[] lastSkipFreqPointer;
+        private long[] lastSkipProxPointer;
+        
+        private IndexOutput freqOutput;
+        private IndexOutput proxOutput;
+        
+        private int curDoc;
+        private bool curStorePayloads;
+        private int curPayloadLength;
+        private long curFreqPointer;
+        private long curProxPointer;
+        
+        internal DefaultSkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput):base(skipInterval, numberOfSkipLevels, docCount)
+        {
+            this.freqOutput = freqOutput;
+            this.proxOutput = proxOutput;
+            
+            lastSkipDoc = new int[numberOfSkipLevels];
+            lastSkipPayloadLength = new int[numberOfSkipLevels];
+            lastSkipFreqPointer = new long[numberOfSkipLevels];
+            lastSkipProxPointer = new long[numberOfSkipLevels];
+        }
+        
+        internal virtual void  SetFreqOutput(IndexOutput freqOutput)
+        {
+            this.freqOutput = freqOutput;
+        }
+        
+        internal virtual void  SetProxOutput(IndexOutput proxOutput)
+        {
+            this.proxOutput = proxOutput;
+        }
+        
+        /// <summary> Sets the values for the current skip data. </summary>
+        internal virtual void  SetSkipData(int doc, bool storePayloads, int payloadLength)
+        {
+            this.curDoc = doc;
+            this.curStorePayloads = storePayloads;
+            this.curPayloadLength = payloadLength;
+            this.curFreqPointer = freqOutput.FilePointer;
+            if (proxOutput != null)
+                this.curProxPointer = proxOutput.FilePointer;
+        }
+        
+        protected internal override void  ResetSkip()
+        {
+            base.ResetSkip();
+            for (int i = 0; i < lastSkipDoc.Length; i++) lastSkipDoc[i] = 0;
+            for (int i = 0; i < lastSkipPayloadLength.Length; i++) lastSkipPayloadLength[i] = -1; // we don't have to write the first length in the skip list
+            for (int i = 0; i < lastSkipFreqPointer.Length; i++) lastSkipFreqPointer[i] = freqOutput.FilePointer;
+            if (proxOutput != null)
+                for (int i = 0; i < lastSkipProxPointer.Length; i++) lastSkipProxPointer[i] = proxOutput.FilePointer;
+        }
+        
+        protected internal override void  WriteSkipData(int level, IndexOutput skipBuffer)
+        {
+            // To efficiently store payloads in the posting lists we do not store the length of
+            // every payload. Instead we omit the length for a payload if the previous payload had
+            // the same length.
+            // However, in order to support skipping the payload length at every skip point must be known.
+            // So we use the same length encoding that we use for the posting lists for the skip data as well:
+            // Case 1: current field does not store payloads
+            //           SkipDatum                 --> DocSkip, FreqSkip, ProxSkip
+            //           DocSkip,FreqSkip,ProxSkip --> VInt
+            //           DocSkip records the document number before every SkipInterval th  document in TermFreqs. 
+            //           Document numbers are represented as differences from the previous value in the sequence.
+            // Case 2: current field stores payloads
+            //           SkipDatum                 --> DocSkip, PayloadLength?, FreqSkip,ProxSkip
+            //           DocSkip,FreqSkip,ProxSkip --> VInt
+            //           PayloadLength             --> VInt    
+            //         In this case DocSkip/2 is the difference between
+            //         the current and the previous value. If DocSkip
+            //         is odd, then a PayloadLength encoded as VInt follows,
+            //         if DocSkip is even, then it is assumed that the
+            //         current payload length equals the length at the previous
+            //         skip point
+            if (curStorePayloads)
+            {
+                int delta = curDoc - lastSkipDoc[level];
+                if (curPayloadLength == lastSkipPayloadLength[level])
+                {
+                    // the current payload length equals the length at the previous skip point,
+                    // so we don't store the length again
+                    skipBuffer.WriteVInt(delta * 2);
+                }
+                else
+                {
+                    // the payload length is different from the previous one. We shift the DocSkip, 
+                    // set the lowest bit and store the current payload length as VInt.
+                    skipBuffer.WriteVInt(delta * 2 + 1);
+                    skipBuffer.WriteVInt(curPayloadLength);
+                    lastSkipPayloadLength[level] = curPayloadLength;
+                }
+            }
+            else
+            {
+                // current field does not store payloads
+                skipBuffer.WriteVInt(curDoc - lastSkipDoc[level]);
+            }
+            skipBuffer.WriteVInt((int) (curFreqPointer - lastSkipFreqPointer[level]));
+            skipBuffer.WriteVInt((int) (curProxPointer - lastSkipProxPointer[level]));
+            
+            lastSkipDoc[level] = curDoc;
+            //System.out.println("write doc at level " + level + ": " + curDoc);
+            
+            lastSkipFreqPointer[level] = curFreqPointer;
+            lastSkipProxPointer[level] = curProxPointer;
+        }
+    }
 }
\ No newline at end of file


[27/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/NumericTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/NumericTokenStream.cs b/src/core/Analysis/NumericTokenStream.cs
index 90b6e72..cbcb2dc 100644
--- a/src/core/Analysis/NumericTokenStream.cs
+++ b/src/core/Analysis/NumericTokenStream.cs
@@ -24,247 +24,247 @@ using NumericField = Lucene.Net.Documents.NumericField;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> <b>Expert:</b> This class provides a <see cref="TokenStream" />
-	/// for indexing numeric values that can be used by <see cref="NumericRangeQuery{T}" />
+    
+    /// <summary> <b>Expert:</b> This class provides a <see cref="TokenStream" />
+    /// for indexing numeric values that can be used by <see cref="NumericRangeQuery{T}" />
     /// or <see cref="NumericRangeFilter{T}" />.
-	/// 
-	/// <p/>Note that for simple usage, <see cref="NumericField" /> is
-	/// recommended.  <see cref="NumericField" /> disables norms and
-	/// term freqs, as they are not usually needed during
-	/// searching.  If you need to change these settings, you
-	/// should use this class.
-	/// 
-	/// <p/>See <see cref="NumericField" /> for capabilities of fields
-	/// indexed numerically.<p/>
-	/// 
-	/// <p/>Here's an example usage, for an <c>int</c> field:
-	/// 
-	/// <code>
-	///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
-	///  field.setOmitNorms(true);
-	///  field.setOmitTermFreqAndPositions(true);
-	///  document.add(field);
-	/// </code>
-	/// 
-	/// <p/>For optimal performance, re-use the TokenStream and Field instance
-	/// for more than one document:
-	/// 
-	/// <code>
-	///  NumericTokenStream stream = new NumericTokenStream(precisionStep);
-	///  Field field = new Field(name, stream);
-	///  field.setOmitNorms(true);
-	///  field.setOmitTermFreqAndPositions(true);
-	///  Document document = new Document();
-	///  document.add(field);
-	/// 
-	///  for(all documents) {
-	///    stream.setIntValue(value)
-	///    writer.addDocument(document);
-	///  }
-	/// </code>
-	/// 
-	/// <p/>This stream is not intended to be used in analyzers;
-	/// it's more for iterating the different precisions during
-	/// indexing a specific numeric value.<p/>
-	/// 
-	/// <p/><b>NOTE</b>: as token streams are only consumed once
-	/// the document is added to the index, if you index more
-	/// than one numeric field, use a separate <c>NumericTokenStream</c>
-	/// instance for each.<p/>
-	/// 
+    /// 
+    /// <p/>Note that for simple usage, <see cref="NumericField" /> is
+    /// recommended.  <see cref="NumericField" /> disables norms and
+    /// term freqs, as they are not usually needed during
+    /// searching.  If you need to change these settings, you
+    /// should use this class.
+    /// 
+    /// <p/>See <see cref="NumericField" /> for capabilities of fields
+    /// indexed numerically.<p/>
+    /// 
+    /// <p/>Here's an example usage, for an <c>int</c> field:
+    /// 
+    /// <code>
+    ///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
+    ///  field.setOmitNorms(true);
+    ///  field.setOmitTermFreqAndPositions(true);
+    ///  document.add(field);
+    /// </code>
+    /// 
+    /// <p/>For optimal performance, re-use the TokenStream and Field instance
+    /// for more than one document:
+    /// 
+    /// <code>
+    ///  NumericTokenStream stream = new NumericTokenStream(precisionStep);
+    ///  Field field = new Field(name, stream);
+    ///  field.setOmitNorms(true);
+    ///  field.setOmitTermFreqAndPositions(true);
+    ///  Document document = new Document();
+    ///  document.add(field);
+    /// 
+    ///  for(all documents) {
+    ///    stream.setIntValue(value)
+    ///    writer.addDocument(document);
+    ///  }
+    /// </code>
+    /// 
+    /// <p/>This stream is not intended to be used in analyzers;
+    /// it's more for iterating the different precisions during
+    /// indexing a specific numeric value.<p/>
+    /// 
+    /// <p/><b>NOTE</b>: as token streams are only consumed once
+    /// the document is added to the index, if you index more
+    /// than one numeric field, use a separate <c>NumericTokenStream</c>
+    /// instance for each.<p/>
+    /// 
     /// <p/>See <see cref="NumericRangeQuery{T}" /> for more details on the
-	/// <a href="../search/NumericRangeQuery.html#precisionStepDesc"><c>precisionStep</c></a>
-	/// parameter as well as how numeric fields work under the hood.<p/>
-	/// 
-	/// <p/><font color="red"><b>NOTE:</b> This API is experimental and
-	/// might change in incompatible ways in the next release.</font>
-	///   Since 2.9
-	/// </summary>
-	public sealed class NumericTokenStream : TokenStream
-	{
-		private void  InitBlock()
-		{
+    /// <a href="../search/NumericRangeQuery.html#precisionStepDesc"><c>precisionStep</c></a>
+    /// parameter as well as how numeric fields work under the hood.<p/>
+    /// 
+    /// <p/><font color="red"><b>NOTE:</b> This API is experimental and
+    /// might change in incompatible ways in the next release.</font>
+    ///   Since 2.9
+    /// </summary>
+    public sealed class NumericTokenStream : TokenStream
+    {
+        private void  InitBlock()
+        {
             termAtt = AddAttribute<ITermAttribute>();
             typeAtt = AddAttribute<ITypeAttribute>();
             posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
-		}
-		
-		/// <summary>The full precision token gets this token type assigned. </summary>
-		public const System.String TOKEN_TYPE_FULL_PREC = "fullPrecNumeric";
-		
-		/// <summary>The lower precision tokens gets this token type assigned. </summary>
-		public const System.String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
-		
-		/// <summary> Creates a token stream for numeric values using the default <c>precisionStep</c>
-		/// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The stream is not yet initialized,
-		/// before using set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		public NumericTokenStream():this(NumericUtils.PRECISION_STEP_DEFAULT)
-		{
-		}
-		
-		/// <summary> Creates a token stream for numeric values with the specified
-		/// <c>precisionStep</c>. The stream is not yet initialized,
-		/// before using set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		public NumericTokenStream(int precisionStep):base()
-		{
-			InitBlock();
-			this.precisionStep = precisionStep;
-			if (precisionStep < 1)
-				throw new System.ArgumentException("precisionStep must be >=1");
-		}
-		
-		/// <summary> Expert: Creates a token stream for numeric values with the specified
-		/// <c>precisionStep</c> using the given <see cref="AttributeSource" />.
-		/// The stream is not yet initialized,
-		/// before using set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		public NumericTokenStream(AttributeSource source, int precisionStep):base(source)
-		{
-			InitBlock();
-			this.precisionStep = precisionStep;
-			if (precisionStep < 1)
-				throw new System.ArgumentException("precisionStep must be >=1");
-		}
-		
-		/// <summary> Expert: Creates a token stream for numeric values with the specified
-		/// <c>precisionStep</c> using the given
-		/// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />.
-		/// The stream is not yet initialized,
-		/// before using set a value using the various set<em>???</em>Value() methods.
-		/// </summary>
-		public NumericTokenStream(AttributeFactory factory, int precisionStep):base(factory)
-		{
-			InitBlock();
-			this.precisionStep = precisionStep;
-			if (precisionStep < 1)
-				throw new System.ArgumentException("precisionStep must be >=1");
-		}
-		
-		/// <summary> Initializes the token stream with the supplied <c>long</c> value.</summary>
-		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetLongValue(value))</c>
-		/// </returns>
-		public NumericTokenStream SetLongValue(long value_Renamed)
-		{
-			this.value_Renamed = value_Renamed;
-			valSize = 64;
-			shift = 0;
-			return this;
-		}
-		
-		/// <summary> Initializes the token stream with the supplied <c>int</c> value.</summary>
-		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetIntValue(value))</c>
-		/// </returns>
-		public NumericTokenStream SetIntValue(int value_Renamed)
-		{
-			this.value_Renamed = (long) value_Renamed;
-			valSize = 32;
-			shift = 0;
-			return this;
-		}
-		
-		/// <summary> Initializes the token stream with the supplied <c>double</c> value.</summary>
-		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</c>
-		/// </returns>
-		public NumericTokenStream SetDoubleValue(double value_Renamed)
-		{
-			this.value_Renamed = NumericUtils.DoubleToSortableLong(value_Renamed);
-			valSize = 64;
-			shift = 0;
-			return this;
-		}
-		
-		/// <summary> Initializes the token stream with the supplied <c>float</c> value.</summary>
-		/// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
-		/// </param>
-		/// <returns> this instance, because of this you can use it the following way:
-		/// <c>new Field(name, new NumericTokenStream(precisionStep).SetFloatValue(value))</c>
-		/// </returns>
-		public NumericTokenStream SetFloatValue(float value_Renamed)
-		{
-			this.value_Renamed = (long) NumericUtils.FloatToSortableInt(value_Renamed);
-			valSize = 32;
-			shift = 0;
-			return this;
-		}
-		
-		// @Override
-		public override void  Reset()
-		{
-			if (valSize == 0)
-				throw new System.SystemException("call set???Value() before usage");
-			shift = 0;
-		}
+        }
+        
+        /// <summary>The full precision token gets this token type assigned. </summary>
+        public const System.String TOKEN_TYPE_FULL_PREC = "fullPrecNumeric";
+        
+        /// <summary>The lower precision tokens gets this token type assigned. </summary>
+        public const System.String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
+        
+        /// <summary> Creates a token stream for numeric values using the default <c>precisionStep</c>
+        /// <see cref="NumericUtils.PRECISION_STEP_DEFAULT" /> (4). The stream is not yet initialized,
+        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        public NumericTokenStream():this(NumericUtils.PRECISION_STEP_DEFAULT)
+        {
+        }
+        
+        /// <summary> Creates a token stream for numeric values with the specified
+        /// <c>precisionStep</c>. The stream is not yet initialized,
+        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        public NumericTokenStream(int precisionStep):base()
+        {
+            InitBlock();
+            this.precisionStep = precisionStep;
+            if (precisionStep < 1)
+                throw new System.ArgumentException("precisionStep must be >=1");
+        }
+        
+        /// <summary> Expert: Creates a token stream for numeric values with the specified
+        /// <c>precisionStep</c> using the given <see cref="AttributeSource" />.
+        /// The stream is not yet initialized,
+        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        public NumericTokenStream(AttributeSource source, int precisionStep):base(source)
+        {
+            InitBlock();
+            this.precisionStep = precisionStep;
+            if (precisionStep < 1)
+                throw new System.ArgumentException("precisionStep must be >=1");
+        }
+        
+        /// <summary> Expert: Creates a token stream for numeric values with the specified
+        /// <c>precisionStep</c> using the given
+        /// <see cref="Lucene.Net.Util.AttributeSource.AttributeFactory" />.
+        /// The stream is not yet initialized,
+        /// before using set a value using the various set<em>???</em>Value() methods.
+        /// </summary>
+        public NumericTokenStream(AttributeFactory factory, int precisionStep):base(factory)
+        {
+            InitBlock();
+            this.precisionStep = precisionStep;
+            if (precisionStep < 1)
+                throw new System.ArgumentException("precisionStep must be >=1");
+        }
+        
+        /// <summary> Initializes the token stream with the supplied <c>long</c> value.</summary>
+        /// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>new Field(name, new NumericTokenStream(precisionStep).SetLongValue(value))</c>
+        /// </returns>
+        public NumericTokenStream SetLongValue(long value_Renamed)
+        {
+            this.value_Renamed = value_Renamed;
+            valSize = 64;
+            shift = 0;
+            return this;
+        }
+        
+        /// <summary> Initializes the token stream with the supplied <c>int</c> value.</summary>
+        /// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>new Field(name, new NumericTokenStream(precisionStep).SetIntValue(value))</c>
+        /// </returns>
+        public NumericTokenStream SetIntValue(int value_Renamed)
+        {
+            this.value_Renamed = (long) value_Renamed;
+            valSize = 32;
+            shift = 0;
+            return this;
+        }
+        
+        /// <summary> Initializes the token stream with the supplied <c>double</c> value.</summary>
+        /// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</c>
+        /// </returns>
+        public NumericTokenStream SetDoubleValue(double value_Renamed)
+        {
+            this.value_Renamed = NumericUtils.DoubleToSortableLong(value_Renamed);
+            valSize = 64;
+            shift = 0;
+            return this;
+        }
+        
+        /// <summary> Initializes the token stream with the supplied <c>float</c> value.</summary>
+        /// <param name="value_Renamed">the value, for which this TokenStream should enumerate tokens.
+        /// </param>
+        /// <returns> this instance, because of this you can use it the following way:
+        /// <c>new Field(name, new NumericTokenStream(precisionStep).SetFloatValue(value))</c>
+        /// </returns>
+        public NumericTokenStream SetFloatValue(float value_Renamed)
+        {
+            this.value_Renamed = (long) NumericUtils.FloatToSortableInt(value_Renamed);
+            valSize = 32;
+            shift = 0;
+            return this;
+        }
+        
+        // @Override
+        public override void  Reset()
+        {
+            if (valSize == 0)
+                throw new System.SystemException("call set???Value() before usage");
+            shift = 0;
+        }
 
         protected override void Dispose(bool disposing)
         {
             // Do nothing.
         }
-		
-		// @Override
-		public override bool IncrementToken()
-		{
-			if (valSize == 0)
-				throw new System.SystemException("call set???Value() before usage");
-			if (shift >= valSize)
-				return false;
-			
-			ClearAttributes();
-			char[] buffer;
-			switch (valSize)
-			{
-				
-				case 64: 
-					buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_LONG);
-					termAtt.SetTermLength(NumericUtils.LongToPrefixCoded(value_Renamed, shift, buffer));
-					break;
-				
-				
-				case 32: 
-					buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_INT);
-					termAtt.SetTermLength(NumericUtils.IntToPrefixCoded((int) value_Renamed, shift, buffer));
-					break;
-				
-				
-				default: 
-					// should not happen
-					throw new System.ArgumentException("valSize must be 32 or 64");
-				
-			}
-			
-			typeAtt.Type = (shift == 0)?TOKEN_TYPE_FULL_PREC:TOKEN_TYPE_LOWER_PREC;
-			posIncrAtt.PositionIncrement = (shift == 0)?1:0;
-			shift += precisionStep;
-			return true;
-		}
-		
-		// @Override
-		public override System.String ToString()
-		{
-			System.Text.StringBuilder sb = new System.Text.StringBuilder("(numeric,valSize=").Append(valSize);
-			sb.Append(",precisionStep=").Append(precisionStep).Append(')');
-			return sb.ToString();
-		}
-		
-		// members
-		private ITermAttribute termAtt;
-		private ITypeAttribute typeAtt;
-		private IPositionIncrementAttribute posIncrAtt;
-		
-		private int shift = 0, valSize = 0; // valSize==0 means not initialized
-		private readonly int precisionStep;
-		
-		private long value_Renamed = 0L;
-	}
+        
+        // @Override
+        public override bool IncrementToken()
+        {
+            if (valSize == 0)
+                throw new System.SystemException("call set???Value() before usage");
+            if (shift >= valSize)
+                return false;
+            
+            ClearAttributes();
+            char[] buffer;
+            switch (valSize)
+            {
+                
+                case 64: 
+                    buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_LONG);
+                    termAtt.SetTermLength(NumericUtils.LongToPrefixCoded(value_Renamed, shift, buffer));
+                    break;
+                
+                
+                case 32: 
+                    buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_INT);
+                    termAtt.SetTermLength(NumericUtils.IntToPrefixCoded((int) value_Renamed, shift, buffer));
+                    break;
+                
+                
+                default: 
+                    // should not happen
+                    throw new System.ArgumentException("valSize must be 32 or 64");
+                
+            }
+            
+            typeAtt.Type = (shift == 0)?TOKEN_TYPE_FULL_PREC:TOKEN_TYPE_LOWER_PREC;
+            posIncrAtt.PositionIncrement = (shift == 0)?1:0;
+            shift += precisionStep;
+            return true;
+        }
+        
+        // @Override
+        public override System.String ToString()
+        {
+            System.Text.StringBuilder sb = new System.Text.StringBuilder("(numeric,valSize=").Append(valSize);
+            sb.Append(",precisionStep=").Append(precisionStep).Append(')');
+            return sb.ToString();
+        }
+        
+        // members
+        private ITermAttribute termAtt;
+        private ITypeAttribute typeAtt;
+        private IPositionIncrementAttribute posIncrAtt;
+        
+        private int shift = 0, valSize = 0; // valSize==0 means not initialized
+        private readonly int precisionStep;
+        
+        private long value_Renamed = 0L;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/PerFieldAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/PerFieldAnalyzerWrapper.cs b/src/core/Analysis/PerFieldAnalyzerWrapper.cs
index b1c43aa..45e2344 100644
--- a/src/core/Analysis/PerFieldAnalyzerWrapper.cs
+++ b/src/core/Analysis/PerFieldAnalyzerWrapper.cs
@@ -20,104 +20,104 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> This analyzer is used to facilitate scenarios where different
-	/// fields require different analysis techniques.  Use <see cref="AddAnalyzer" />
-	/// to add a non-default analyzer on a field name basis.
-	/// 
-	/// <p/>Example usage:
-	/// 
-	/// <code>
-	/// PerFieldAnalyzerWrapper aWrapper =
-	/// new PerFieldAnalyzerWrapper(new StandardAnalyzer());
-	/// aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
-	/// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
-	/// </code>
-	/// 
-	/// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
-	/// and "lastname", for which KeywordAnalyzer will be used.
-	/// 
-	/// <p/>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
-	/// and query parsing.
-	/// </summary>
-	public class PerFieldAnalyzerWrapper:Analyzer
-	{
-		private readonly Analyzer defaultAnalyzer;
-		private readonly IDictionary<string, Analyzer> analyzerMap = new HashMap<string, Analyzer>();
-		
-		
-		/// <summary> Constructs with default analyzer.
-		/// 
-		/// </summary>
-		/// <param name="defaultAnalyzer">Any fields not specifically
-		/// defined to use a different analyzer will use the one provided here.
-		/// </param>
-		public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer)
+    
+    /// <summary> This analyzer is used to facilitate scenarios where different
+    /// fields require different analysis techniques.  Use <see cref="AddAnalyzer" />
+    /// to add a non-default analyzer on a field name basis.
+    /// 
+    /// <p/>Example usage:
+    /// 
+    /// <code>
+    /// PerFieldAnalyzerWrapper aWrapper =
+    /// new PerFieldAnalyzerWrapper(new StandardAnalyzer());
+    /// aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
+    /// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
+    /// </code>
+    /// 
+    /// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
+    /// and "lastname", for which KeywordAnalyzer will be used.
+    /// 
+    /// <p/>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
+    /// and query parsing.
+    /// </summary>
+    public class PerFieldAnalyzerWrapper:Analyzer
+    {
+        private readonly Analyzer defaultAnalyzer;
+        private readonly IDictionary<string, Analyzer> analyzerMap = new HashMap<string, Analyzer>();
+        
+        
+        /// <summary> Constructs with default analyzer.
+        /// 
+        /// </summary>
+        /// <param name="defaultAnalyzer">Any fields not specifically
+        /// defined to use a different analyzer will use the one provided here.
+        /// </param>
+        public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer)
             : this(defaultAnalyzer, null)
-		{
-		}
-		
-		/// <summary> Constructs with default analyzer and a map of analyzers to use for 
-		/// specific fields.
-		/// 
-		/// </summary>
-		/// <param name="defaultAnalyzer">Any fields not specifically
-		/// defined to use a different analyzer will use the one provided here.
-		/// </param>
-		/// <param name="fieldAnalyzers">a Map (String field name to the Analyzer) to be 
-		/// used for those fields 
-		/// </param>
+        {
+        }
+        
+        /// <summary> Constructs with default analyzer and a map of analyzers to use for 
+        /// specific fields.
+        /// 
+        /// </summary>
+        /// <param name="defaultAnalyzer">Any fields not specifically
+        /// defined to use a different analyzer will use the one provided here.
+        /// </param>
+        /// <param name="fieldAnalyzers">a Map (String field name to the Analyzer) to be 
+        /// used for those fields 
+        /// </param>
         public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer, IEnumerable<KeyValuePair<string, Analyzer>> fieldAnalyzers)
-		{
-			this.defaultAnalyzer = defaultAnalyzer;
-			if (fieldAnalyzers != null)
-			{
-				foreach(var entry in fieldAnalyzers)
-					analyzerMap[entry.Key] = entry.Value;
-			}
+        {
+            this.defaultAnalyzer = defaultAnalyzer;
+            if (fieldAnalyzers != null)
+            {
+                foreach(var entry in fieldAnalyzers)
+                    analyzerMap[entry.Key] = entry.Value;
+            }
             SetOverridesTokenStreamMethod<PerFieldAnalyzerWrapper>();
-		}
-		
-		
-		/// <summary> Defines an analyzer to use for the specified field.
-		/// 
-		/// </summary>
-		/// <param name="fieldName">field name requiring a non-default analyzer
-		/// </param>
-		/// <param name="analyzer">non-default analyzer to use for field
-		/// </param>
-		public virtual void  AddAnalyzer(System.String fieldName, Analyzer analyzer)
-		{
-			analyzerMap[fieldName] = analyzer;
-		}
-		
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
+        }
+        
+        
+        /// <summary> Defines an analyzer to use for the specified field.
+        /// 
+        /// </summary>
+        /// <param name="fieldName">field name requiring a non-default analyzer
+        /// </param>
+        /// <param name="analyzer">non-default analyzer to use for field
+        /// </param>
+        public virtual void  AddAnalyzer(System.String fieldName, Analyzer analyzer)
+        {
+            analyzerMap[fieldName] = analyzer;
+        }
+        
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
 
-			return analyzer.TokenStream(fieldName, reader);
-		}
-		
-		public override TokenStream ReusableTokenStream(string fieldName, System.IO.TextReader reader)
-		{
-			if (overridesTokenStreamMethod)
-			{
-				// LUCENE-1678: force fallback to tokenStream() if we
-				// have been subclassed and that subclass overrides
-				// tokenStream but not reusableTokenStream
-				return TokenStream(fieldName, reader);
-			}
-			var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
+            return analyzer.TokenStream(fieldName, reader);
+        }
+        
+        public override TokenStream ReusableTokenStream(string fieldName, System.IO.TextReader reader)
+        {
+            if (overridesTokenStreamMethod)
+            {
+                // LUCENE-1678: force fallback to tokenStream() if we
+                // have been subclassed and that subclass overrides
+                // tokenStream but not reusableTokenStream
+                return TokenStream(fieldName, reader);
+            }
+            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
 
-			return analyzer.ReusableTokenStream(fieldName, reader);
-		}
-		
-		/// <summary>Return the positionIncrementGap from the analyzer assigned to fieldName </summary>
-		public override int GetPositionIncrementGap(string fieldName)
-		{
-			var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
-		    return analyzer.GetPositionIncrementGap(fieldName);
-		}
+            return analyzer.ReusableTokenStream(fieldName, reader);
+        }
+        
+        /// <summary>Return the positionIncrementGap from the analyzer assigned to fieldName </summary>
+        public override int GetPositionIncrementGap(string fieldName)
+        {
+            var analyzer = analyzerMap[fieldName] ?? defaultAnalyzer;
+            return analyzer.GetPositionIncrementGap(fieldName);
+        }
 
         /// <summary> Return the offsetGap from the analyzer assigned to field </summary>
         public override int GetOffsetGap(Documents.IFieldable field)
@@ -125,11 +125,11 @@ namespace Lucene.Net.Analysis
             Analyzer analyzer = analyzerMap[field.Name] ?? defaultAnalyzer;
             return analyzer.GetOffsetGap(field);
         }
-		
-		public override System.String ToString()
-		{
-			// {{Aroush-2.9}} will 'analyzerMap.ToString()' work in the same way as Java's java.util.HashMap.toString()? 
-			return "PerFieldAnalyzerWrapper(" + analyzerMap + ", default=" + defaultAnalyzer + ")";
-		}
-	}
+        
+        public override System.String ToString()
+        {
+            // {{Aroush-2.9}} will 'analyzerMap.ToString()' work in the same way as Java's java.util.HashMap.toString()? 
+            return "PerFieldAnalyzerWrapper(" + analyzerMap + ", default=" + defaultAnalyzer + ")";
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/PorterStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/PorterStemFilter.cs b/src/core/Analysis/PorterStemFilter.cs
index b7f1dbf..b4f14dc 100644
--- a/src/core/Analysis/PorterStemFilter.cs
+++ b/src/core/Analysis/PorterStemFilter.cs
@@ -19,44 +19,44 @@ using Lucene.Net.Analysis.Tokenattributes;
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>Transforms the token stream as per the Porter stemming algorithm.
-	/// Note: the input to the stemming filter must already be in lower case,
-	/// so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
-	/// down the Tokenizer chain in order for this to work properly!
-	/// <p/>
-	/// To use this filter with other analyzers, you'll want to write an
-	/// Analyzer class that sets up the TokenStream chain as you want it.
-	/// To use this with LowerCaseTokenizer, for example, you'd write an
-	/// analyzer like this:
-	/// <p/>
-	/// <code>
-	/// class MyAnalyzer extends Analyzer {
-	///     public final TokenStream tokenStream(String fieldName, Reader reader) {
-	///          return new PorterStemFilter(new LowerCaseTokenizer(reader));
-	///     }
-	/// }
-	/// </code>
-	/// </summary>
-	public sealed class PorterStemFilter:TokenFilter
-	{
-		private readonly PorterStemmer stemmer;
-		private readonly ITermAttribute termAtt;
-		
-		public PorterStemFilter(TokenStream in_Renamed):base(in_Renamed)
-		{
-			stemmer = new PorterStemmer();
+    
+    /// <summary>Transforms the token stream as per the Porter stemming algorithm.
+    /// Note: the input to the stemming filter must already be in lower case,
+    /// so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
+    /// down the Tokenizer chain in order for this to work properly!
+    /// <p/>
+    /// To use this filter with other analyzers, you'll want to write an
+    /// Analyzer class that sets up the TokenStream chain as you want it.
+    /// To use this with LowerCaseTokenizer, for example, you'd write an
+    /// analyzer like this:
+    /// <p/>
+    /// <code>
+    /// class MyAnalyzer extends Analyzer {
+    ///     public final TokenStream tokenStream(String fieldName, Reader reader) {
+    ///          return new PorterStemFilter(new LowerCaseTokenizer(reader));
+    ///     }
+    /// }
+    /// </code>
+    /// </summary>
+    public sealed class PorterStemFilter:TokenFilter
+    {
+        private readonly PorterStemmer stemmer;
+        private readonly ITermAttribute termAtt;
+        
+        public PorterStemFilter(TokenStream in_Renamed):base(in_Renamed)
+        {
+            stemmer = new PorterStemmer();
             termAtt = AddAttribute<ITermAttribute>();
-		}
-		
-		public override bool IncrementToken()
-		{
-			if (!input.IncrementToken())
-				return false;
-			
-			if (stemmer.Stem(termAtt.TermBuffer(), 0, termAtt.TermLength()))
-				termAtt.SetTermBuffer(stemmer.ResultBuffer, 0, stemmer.ResultLength);
-			return true;
-		}
-	}
+        }
+        
+        public override bool IncrementToken()
+        {
+            if (!input.IncrementToken())
+                return false;
+            
+            if (stemmer.Stem(termAtt.TermBuffer(), 0, termAtt.TermLength()))
+                termAtt.SetTermBuffer(stemmer.ResultBuffer, 0, stemmer.ResultLength);
+            return true;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/PorterStemmer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/PorterStemmer.cs b/src/core/Analysis/PorterStemmer.cs
index f47c5a7..bc4cf75 100644
--- a/src/core/Analysis/PorterStemmer.cs
+++ b/src/core/Analysis/PorterStemmer.cs
@@ -42,705 +42,705 @@ optimize for fewer object creations.  ]
 using System;
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary> 
-	/// Stemmer, implementing the Porter Stemming Algorithm
-	/// 
-	/// The Stemmer class transforms a word into its root form.  The input
-	/// word can be provided a character at time (by calling add()), or at once
-	/// by calling one of the various stem(something) methods.
-	/// </summary>
-	
-	class PorterStemmer
-	{
-		private char[] b;
-		private int i, j, k, k0;
-		private bool dirty = false;
-		private const int INC = 50; /* unit of size whereby b is increased */
-		private const int EXTRA = 1;
-		
-		public PorterStemmer()
-		{
-			b = new char[INC];
-			i = 0;
-		}
-		
-		/// <summary> reset() resets the stemmer so it can stem another word.  If you invoke
-		/// the stemmer by calling add(char) and then stem(), you must call reset()
-		/// before starting another word.
-		/// </summary>
-		public virtual void  Reset()
-		{
-			i = 0; dirty = false;
-		}
-		
-		/// <summary> Add a character to the word being stemmed.  When you are finished
-		/// adding characters, you can call stem(void) to process the word.
-		/// </summary>
-		public virtual void  Add(char ch)
-		{
-			if (b.Length <= i + EXTRA)
-			{
-				var new_b = new char[b.Length + INC];
-				Array.Copy(b, 0, new_b, 0, b.Length);
-				b = new_b;
-			}
-			b[i++] = ch;
-		}
-		
-		/// <summary> After a word has been stemmed, it can be retrieved by toString(),
-		/// or a reference to the internal buffer can be retrieved by getResultBuffer
-		/// and getResultLength (which is generally more efficient.)
-		/// </summary>
-		public override System.String ToString()
-		{
-			return new System.String(b, 0, i);
-		}
+    
+    /// <summary> 
+    /// Stemmer, implementing the Porter Stemming Algorithm
+    /// 
+    /// The Stemmer class transforms a word into its root form.  The input
+    /// word can be provided a character at time (by calling add()), or at once
+    /// by calling one of the various stem(something) methods.
+    /// </summary>
+    
+    class PorterStemmer
+    {
+        private char[] b;
+        private int i, j, k, k0;
+        private bool dirty = false;
+        private const int INC = 50; /* unit of size whereby b is increased */
+        private const int EXTRA = 1;
+        
+        public PorterStemmer()
+        {
+            b = new char[INC];
+            i = 0;
+        }
+        
+        /// <summary> reset() resets the stemmer so it can stem another word.  If you invoke
+        /// the stemmer by calling add(char) and then stem(), you must call reset()
+        /// before starting another word.
+        /// </summary>
+        public virtual void  Reset()
+        {
+            i = 0; dirty = false;
+        }
+        
+        /// <summary> Add a character to the word being stemmed.  When you are finished
+        /// adding characters, you can call stem(void) to process the word.
+        /// </summary>
+        public virtual void  Add(char ch)
+        {
+            if (b.Length <= i + EXTRA)
+            {
+                var new_b = new char[b.Length + INC];
+                Array.Copy(b, 0, new_b, 0, b.Length);
+                b = new_b;
+            }
+            b[i++] = ch;
+        }
+        
+        /// <summary> After a word has been stemmed, it can be retrieved by toString(),
+        /// or a reference to the internal buffer can be retrieved by getResultBuffer
+        /// and getResultLength (which is generally more efficient.)
+        /// </summary>
+        public override System.String ToString()
+        {
+            return new System.String(b, 0, i);
+        }
 
-	    /// <summary> Returns the length of the word resulting from the stemming process.</summary>
-	    public virtual int ResultLength
-	    {
-	        get { return i; }
-	    }
+        /// <summary> Returns the length of the word resulting from the stemming process.</summary>
+        public virtual int ResultLength
+        {
+            get { return i; }
+        }
 
-	    /// <summary> Returns a reference to a character buffer containing the results of
-	    /// the stemming process.  You also need to consult getResultLength()
-	    /// to determine the length of the result.
-	    /// </summary>
-	    public virtual char[] ResultBuffer
-	    {
-	        get { return b; }
-	    }
+        /// <summary> Returns a reference to a character buffer containing the results of
+        /// the stemming process.  You also need to consult getResultLength()
+        /// to determine the length of the result.
+        /// </summary>
+        public virtual char[] ResultBuffer
+        {
+            get { return b; }
+        }
 
-	    /* cons(i) is true <=> b[i] is a consonant. */
-		
-		private bool Cons(int i)
-		{
-			switch (b[i])
-			{
-				
-				case 'a': 
-				case 'e': 
-				case 'i': 
-				case 'o': 
-				case 'u': 
-					return false;
-				
-				case 'y': 
-					return (i == k0)?true:!Cons(i - 1);
-				
-				default: 
-					return true;
-				
-			}
-		}
-		
-		/* m() measures the number of consonant sequences between k0 and j. if c is
-		a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
-		presence,
-		
-		<c><v>       gives 0
-		<c>vc<v>     gives 1
-		<c>vcvc<v>   gives 2
-		<c>vcvcvc<v> gives 3
-		....
-		*/
-		
-		private int M()
-		{
-			int n = 0;
-			int i = k0;
-			while (true)
-			{
-				if (i > j)
-					return n;
-				if (!Cons(i))
-					break;
-				i++;
-			}
-			i++;
-			while (true)
-			{
-				while (true)
-				{
-					if (i > j)
-						return n;
-					if (Cons(i))
-						break;
-					i++;
-				}
-				i++;
-				n++;
-				while (true)
-				{
-					if (i > j)
-						return n;
-					if (!Cons(i))
-						break;
-					i++;
-				}
-				i++;
-			}
-		}
-		
-		/* vowelinstem() is true <=> k0,...j contains a vowel */
-		
-		private bool Vowelinstem()
-		{
-			int i;
-			for (i = k0; i <= j; i++)
-				if (!Cons(i))
-					return true;
-			return false;
-		}
-		
-		/* doublec(j) is true <=> j,(j-1) contain a double consonant. */
-		
-		private bool Doublec(int j)
-		{
-			if (j < k0 + 1)
-				return false;
-			if (b[j] != b[j - 1])
-				return false;
-			return Cons(j);
-		}
-		
-		/* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
-		and also if the second c is not w,x or y. this is used when trying to
-		restore an e at the end of a short word. e.g.
-		
-		cav(e), lov(e), hop(e), crim(e), but
-		snow, box, tray.
-		
-		*/
-		
-		private bool Cvc(int i)
-		{
-			if (i < k0 + 2 || !Cons(i) || Cons(i - 1) || !Cons(i - 2))
-				return false;
-			else
-			{
-				int ch = b[i];
-				if (ch == 'w' || ch == 'x' || ch == 'y')
-					return false;
-			}
-			return true;
-		}
-		
-		private bool Ends(System.String s)
-		{
-			int l = s.Length;
-			int o = k - l + 1;
-			if (o < k0)
-				return false;
-			for (int i = 0; i < l; i++)
-				if (b[o + i] != s[i])
-					return false;
-			j = k - l;
-			return true;
-		}
-		
-		/* setto(s) sets (j+1),...k to the characters in the string s, readjusting
-		k. */
-		
-		internal virtual void  Setto(System.String s)
-		{
-			int l = s.Length;
-			int o = j + 1;
-			for (int i = 0; i < l; i++)
-				b[o + i] = s[i];
-			k = j + l;
-			dirty = true;
-		}
-		
-		/* r(s) is used further down. */
-		
-		internal virtual void  R(System.String s)
-		{
-			if (M() > 0)
-				Setto(s);
-		}
-		
-		/* step1() gets rid of plurals and -ed or -ing. e.g.
-		
-		caresses  ->  caress
-		ponies    ->  poni
-		ties      ->  ti
-		caress    ->  caress
-		cats      ->  cat
-		
-		feed      ->  feed
-		agreed    ->  agree
-		disabled  ->  disable
-		
-		matting   ->  mat
-		mating    ->  mate
-		meeting   ->  meet
-		milling   ->  mill
-		messing   ->  mess
-		
-		meetings  ->  meet
-		
-		*/
-		
-		private void  Step1()
-		{
-			if (b[k] == 's')
-			{
-				if (Ends("sses"))
-					k -= 2;
-				else if (Ends("ies"))
-					Setto("i");
-				else if (b[k - 1] != 's')
-					k--;
-			}
-			if (Ends("eed"))
-			{
-				if (M() > 0)
-					k--;
-			}
-			else if ((Ends("ed") || Ends("ing")) && Vowelinstem())
-			{
-				k = j;
-				if (Ends("at"))
-					Setto("ate");
-				else if (Ends("bl"))
-					Setto("ble");
-				else if (Ends("iz"))
-					Setto("ize");
-				else if (Doublec(k))
-				{
-					int ch = b[k--];
-					if (ch == 'l' || ch == 's' || ch == 'z')
-						k++;
-				}
-				else if (M() == 1 && Cvc(k))
-					Setto("e");
-			}
-		}
-		
-		/* step2() turns terminal y to i when there is another vowel in the stem. */
-		
-		private void  Step2()
-		{
-			if (Ends("y") && Vowelinstem())
-			{
-				b[k] = 'i';
-				dirty = true;
-			}
-		}
-		
-		/* step3() maps double suffices to single ones. so -ization ( = -ize plus
-		-ation) maps to -ize etc. note that the string before the suffix must give
-		m() > 0. */
-		
-		private void  Step3()
-		{
-			if (k == k0)
-				return ; /* For Bug 1 */
-			switch (b[k - 1])
-			{
-				
-				case 'a': 
-					if (Ends("ational"))
-					{
-						R("ate"); break;
-					}
-					if (Ends("tional"))
-					{
-						R("tion"); break;
-					}
-					break;
-				
-				case 'c': 
-					if (Ends("enci"))
-					{
-						R("ence"); break;
-					}
-					if (Ends("anci"))
-					{
-						R("ance"); break;
-					}
-					break;
-				
-				case 'e': 
-					if (Ends("izer"))
-					{
-						R("ize"); break;
-					}
-					break;
-				
-				case 'l': 
-					if (Ends("bli"))
-					{
-						R("ble"); break;
-					}
-					if (Ends("alli"))
-					{
-						R("al"); break;
-					}
-					if (Ends("entli"))
-					{
-						R("ent"); break;
-					}
-					if (Ends("eli"))
-					{
-						R("e"); break;
-					}
-					if (Ends("ousli"))
-					{
-						R("ous"); break;
-					}
-					break;
-				
-				case 'o': 
-					if (Ends("ization"))
-					{
-						R("ize"); break;
-					}
-					if (Ends("ation"))
-					{
-						R("ate"); break;
-					}
-					if (Ends("ator"))
-					{
-						R("ate"); break;
-					}
-					break;
-				
-				case 's': 
-					if (Ends("alism"))
-					{
-						R("al"); break;
-					}
-					if (Ends("iveness"))
-					{
-						R("ive"); break;
-					}
-					if (Ends("fulness"))
-					{
-						R("ful"); break;
-					}
-					if (Ends("ousness"))
-					{
-						R("ous"); break;
-					}
-					break;
-				
-				case 't': 
-					if (Ends("aliti"))
-					{
-						R("al"); break;
-					}
-					if (Ends("iviti"))
-					{
-						R("ive"); break;
-					}
-					if (Ends("biliti"))
-					{
-						R("ble"); break;
-					}
-					break;
-				
-				case 'g': 
-					if (Ends("logi"))
-					{
-						R("log"); break;
-					}
-					break;
-				}
-		}
-		
-		/* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
-		
-		private void  Step4()
-		{
-			switch (b[k])
-			{
-				
-				case 'e': 
-					if (Ends("icate"))
-					{
-						R("ic"); break;
-					}
-					if (Ends("ative"))
-					{
-						R(""); break;
-					}
-					if (Ends("alize"))
-					{
-						R("al"); break;
-					}
-					break;
-				
-				case 'i': 
-					if (Ends("iciti"))
-					{
-						R("ic"); break;
-					}
-					break;
-				
-				case 'l': 
-					if (Ends("ical"))
-					{
-						R("ic"); break;
-					}
-					if (Ends("ful"))
-					{
-						R(""); break;
-					}
-					break;
-				
-				case 's': 
-					if (Ends("ness"))
-					{
-						R(""); break;
-					}
-					break;
-				}
-		}
-		
-		/* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
-		
-		private void  Step5()
-		{
-			if (k == k0)
-				return ; /* for Bug 1 */
-			switch (b[k - 1])
-			{
-				
-				case 'a': 
-					if (Ends("al"))
-						break;
-					return ;
-				
-				case 'c': 
-					if (Ends("ance"))
-						break;
-					if (Ends("ence"))
-						break;
-					return ;
-				
-				case 'e': 
-					if (Ends("er"))
-						break; return ;
-				
-				case 'i': 
-					if (Ends("ic"))
-						break; return ;
-				
-				case 'l': 
-					if (Ends("able"))
-						break;
-					if (Ends("ible"))
-						break; return ;
-				
-				case 'n': 
-					if (Ends("ant"))
-						break;
-					if (Ends("ement"))
-						break;
-					if (Ends("ment"))
-						break;
-					/* element etc. not stripped before the m */
-					if (Ends("ent"))
-						break;
-					return ;
-				
-				case 'o': 
-					if (Ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't'))
-						break;
-					/* j >= 0 fixes Bug 2 */
-					if (Ends("ou"))
-						break;
-					return ;
-					/* takes care of -ous */
-				
-				case 's': 
-					if (Ends("ism"))
-						break;
-					return ;
-				
-				case 't': 
-					if (Ends("ate"))
-						break;
-					if (Ends("iti"))
-						break;
-					return ;
-				
-				case 'u': 
-					if (Ends("ous"))
-						break;
-					return ;
-				
-				case 'v': 
-					if (Ends("ive"))
-						break;
-					return ;
-				
-				case 'z': 
-					if (Ends("ize"))
-						break;
-					return ;
-				
-				default: 
-					return ;
-				
-			}
-			if (M() > 1)
-				k = j;
-		}
-		
-		/* step6() removes a final -e if m() > 1. */
-		
-		private void  Step6()
-		{
-			j = k;
-			if (b[k] == 'e')
-			{
-				int a = M();
-				if (a > 1 || a == 1 && !Cvc(k - 1))
-					k--;
-			}
-			if (b[k] == 'l' && Doublec(k) && M() > 1)
-				k--;
-		}
-		
-		
-		/// <summary> Stem a word provided as a String.  Returns the result as a String.</summary>
-		public virtual System.String Stem(System.String s)
-		{
-			if (Stem(s.ToCharArray(), s.Length))
-			{
-				return ToString();
-			}
-			else
-				return s;
-		}
-		
-		/// <summary>Stem a word contained in a char[].  Returns true if the stemming process
-		/// resulted in a word different from the input.  You can retrieve the
-		/// result with getResultLength()/getResultBuffer() or toString().
-		/// </summary>
-		public virtual bool Stem(char[] word)
-		{
-			return Stem(word, word.Length);
-		}
-		
-		/// <summary>Stem a word contained in a portion of a char[] array.  Returns
-		/// true if the stemming process resulted in a word different from
-		/// the input.  You can retrieve the result with
-		/// getResultLength()/getResultBuffer() or toString().
-		/// </summary>
-		public virtual bool Stem(char[] wordBuffer, int offset, int wordLen)
-		{
-			Reset();
-			if (b.Length < wordLen)
-			{
-				var new_b = new char[wordLen + EXTRA];
-				b = new_b;
-			}
-			Array.Copy(wordBuffer, offset, b, 0, wordLen);
-			i = wordLen;
-			return Stem(0);
-		}
-		
-		/// <summary>Stem a word contained in a leading portion of a char[] array.
-		/// Returns true if the stemming process resulted in a word different
-		/// from the input.  You can retrieve the result with
-		/// getResultLength()/getResultBuffer() or toString().
-		/// </summary>
-		public virtual bool Stem(char[] word, int wordLen)
-		{
-			return Stem(word, 0, wordLen);
-		}
-		
-		/// <summary>Stem the word placed into the Stemmer buffer through calls to add().
-		/// Returns true if the stemming process resulted in a word different
-		/// from the input.  You can retrieve the result with
-		/// getResultLength()/getResultBuffer() or toString().
-		/// </summary>
-		public virtual bool Stem()
-		{
-			return Stem(0);
-		}
-		
-		public virtual bool Stem(int i0)
-		{
-			k = i - 1;
-			k0 = i0;
-			if (k > k0 + 1)
-			{
-				Step1(); Step2(); Step3(); Step4(); Step5(); Step6();
-			}
-			// Also, a word is considered dirty if we lopped off letters
-			// Thanks to Ifigenia Vairelles for pointing this out.
-			if (i != k + 1)
-				dirty = true;
-			i = k + 1;
-			return dirty;
-		}
-		
-		/// <summary>Test program for demonstrating the Stemmer.  It reads a file and
-		/// stems each word, writing the result to standard out.
-		/// Usage: Stemmer file-name
-		/// </summary>
-		[STAThread]
-		public static void  Main(System.String[] args)
-		{
-			var s = new PorterStemmer();
-			
-			for (int i = 0; i < args.Length; i++)
-			{
-				try
-				{
-					System.IO.Stream in_Renamed = new System.IO.FileStream(args[i], System.IO.FileMode.Open, System.IO.FileAccess.Read);
-					var buffer = new byte[1024];
+        /* cons(i) is true <=> b[i] is a consonant. */
+        
+        private bool Cons(int i)
+        {
+            switch (b[i])
+            {
+                
+                case 'a': 
+                case 'e': 
+                case 'i': 
+                case 'o': 
+                case 'u': 
+                    return false;
+                
+                case 'y': 
+                    return (i == k0)?true:!Cons(i - 1);
+                
+                default: 
+                    return true;
+                
+            }
+        }
+        
+        /* m() measures the number of consonant sequences between k0 and j. if c is
+        a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
+        presence,
+        
+        <c><v>       gives 0
+        <c>vc<v>     gives 1
+        <c>vcvc<v>   gives 2
+        <c>vcvcvc<v> gives 3
+        ....
+        */
+        
+        private int M()
+        {
+            int n = 0;
+            int i = k0;
+            while (true)
+            {
+                if (i > j)
+                    return n;
+                if (!Cons(i))
+                    break;
+                i++;
+            }
+            i++;
+            while (true)
+            {
+                while (true)
+                {
+                    if (i > j)
+                        return n;
+                    if (Cons(i))
+                        break;
+                    i++;
+                }
+                i++;
+                n++;
+                while (true)
+                {
+                    if (i > j)
+                        return n;
+                    if (!Cons(i))
+                        break;
+                    i++;
+                }
+                i++;
+            }
+        }
+        
+        /* vowelinstem() is true <=> k0,...j contains a vowel */
+        
+        private bool Vowelinstem()
+        {
+            int i;
+            for (i = k0; i <= j; i++)
+                if (!Cons(i))
+                    return true;
+            return false;
+        }
+        
+        /* doublec(j) is true <=> j,(j-1) contain a double consonant. */
+        
+        private bool Doublec(int j)
+        {
+            if (j < k0 + 1)
+                return false;
+            if (b[j] != b[j - 1])
+                return false;
+            return Cons(j);
+        }
+        
+        /* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
+        and also if the second c is not w,x or y. this is used when trying to
+        restore an e at the end of a short word. e.g.
+        
+        cav(e), lov(e), hop(e), crim(e), but
+        snow, box, tray.
+        
+        */
+        
+        private bool Cvc(int i)
+        {
+            if (i < k0 + 2 || !Cons(i) || Cons(i - 1) || !Cons(i - 2))
+                return false;
+            else
+            {
+                int ch = b[i];
+                if (ch == 'w' || ch == 'x' || ch == 'y')
+                    return false;
+            }
+            return true;
+        }
+        
+        private bool Ends(System.String s)
+        {
+            int l = s.Length;
+            int o = k - l + 1;
+            if (o < k0)
+                return false;
+            for (int i = 0; i < l; i++)
+                if (b[o + i] != s[i])
+                    return false;
+            j = k - l;
+            return true;
+        }
+        
+        /* setto(s) sets (j+1),...k to the characters in the string s, readjusting
+        k. */
+        
+        internal virtual void  Setto(System.String s)
+        {
+            int l = s.Length;
+            int o = j + 1;
+            for (int i = 0; i < l; i++)
+                b[o + i] = s[i];
+            k = j + l;
+            dirty = true;
+        }
+        
+        /* r(s) is used further down. */
+        
+        internal virtual void  R(System.String s)
+        {
+            if (M() > 0)
+                Setto(s);
+        }
+        
+        /* step1() gets rid of plurals and -ed or -ing. e.g.
+        
+        caresses  ->  caress
+        ponies    ->  poni
+        ties      ->  ti
+        caress    ->  caress
+        cats      ->  cat
+        
+        feed      ->  feed
+        agreed    ->  agree
+        disabled  ->  disable
+        
+        matting   ->  mat
+        mating    ->  mate
+        meeting   ->  meet
+        milling   ->  mill
+        messing   ->  mess
+        
+        meetings  ->  meet
+        
+        */
+        
+        private void  Step1()
+        {
+            if (b[k] == 's')
+            {
+                if (Ends("sses"))
+                    k -= 2;
+                else if (Ends("ies"))
+                    Setto("i");
+                else if (b[k - 1] != 's')
+                    k--;
+            }
+            if (Ends("eed"))
+            {
+                if (M() > 0)
+                    k--;
+            }
+            else if ((Ends("ed") || Ends("ing")) && Vowelinstem())
+            {
+                k = j;
+                if (Ends("at"))
+                    Setto("ate");
+                else if (Ends("bl"))
+                    Setto("ble");
+                else if (Ends("iz"))
+                    Setto("ize");
+                else if (Doublec(k))
+                {
+                    int ch = b[k--];
+                    if (ch == 'l' || ch == 's' || ch == 'z')
+                        k++;
+                }
+                else if (M() == 1 && Cvc(k))
+                    Setto("e");
+            }
+        }
+        
+        /* step2() turns terminal y to i when there is another vowel in the stem. */
+        
+        private void  Step2()
+        {
+            if (Ends("y") && Vowelinstem())
+            {
+                b[k] = 'i';
+                dirty = true;
+            }
+        }
+        
+        /* step3() maps double suffices to single ones. so -ization ( = -ize plus
+        -ation) maps to -ize etc. note that the string before the suffix must give
+        m() > 0. */
+        
+        private void  Step3()
+        {
+            if (k == k0)
+                return ; /* For Bug 1 */
+            switch (b[k - 1])
+            {
+                
+                case 'a': 
+                    if (Ends("ational"))
+                    {
+                        R("ate"); break;
+                    }
+                    if (Ends("tional"))
+                    {
+                        R("tion"); break;
+                    }
+                    break;
+                
+                case 'c': 
+                    if (Ends("enci"))
+                    {
+                        R("ence"); break;
+                    }
+                    if (Ends("anci"))
+                    {
+                        R("ance"); break;
+                    }
+                    break;
+                
+                case 'e': 
+                    if (Ends("izer"))
+                    {
+                        R("ize"); break;
+                    }
+                    break;
+                
+                case 'l': 
+                    if (Ends("bli"))
+                    {
+                        R("ble"); break;
+                    }
+                    if (Ends("alli"))
+                    {
+                        R("al"); break;
+                    }
+                    if (Ends("entli"))
+                    {
+                        R("ent"); break;
+                    }
+                    if (Ends("eli"))
+                    {
+                        R("e"); break;
+                    }
+                    if (Ends("ousli"))
+                    {
+                        R("ous"); break;
+                    }
+                    break;
+                
+                case 'o': 
+                    if (Ends("ization"))
+                    {
+                        R("ize"); break;
+                    }
+                    if (Ends("ation"))
+                    {
+                        R("ate"); break;
+                    }
+                    if (Ends("ator"))
+                    {
+                        R("ate"); break;
+                    }
+                    break;
+                
+                case 's': 
+                    if (Ends("alism"))
+                    {
+                        R("al"); break;
+                    }
+                    if (Ends("iveness"))
+                    {
+                        R("ive"); break;
+                    }
+                    if (Ends("fulness"))
+                    {
+                        R("ful"); break;
+                    }
+                    if (Ends("ousness"))
+                    {
+                        R("ous"); break;
+                    }
+                    break;
+                
+                case 't': 
+                    if (Ends("aliti"))
+                    {
+                        R("al"); break;
+                    }
+                    if (Ends("iviti"))
+                    {
+                        R("ive"); break;
+                    }
+                    if (Ends("biliti"))
+                    {
+                        R("ble"); break;
+                    }
+                    break;
+                
+                case 'g': 
+                    if (Ends("logi"))
+                    {
+                        R("log"); break;
+                    }
+                    break;
+                }
+        }
+        
+        /* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
+        
+        private void  Step4()
+        {
+            switch (b[k])
+            {
+                
+                case 'e': 
+                    if (Ends("icate"))
+                    {
+                        R("ic"); break;
+                    }
+                    if (Ends("ative"))
+                    {
+                        R(""); break;
+                    }
+                    if (Ends("alize"))
+                    {
+                        R("al"); break;
+                    }
+                    break;
+                
+                case 'i': 
+                    if (Ends("iciti"))
+                    {
+                        R("ic"); break;
+                    }
+                    break;
+                
+                case 'l': 
+                    if (Ends("ical"))
+                    {
+                        R("ic"); break;
+                    }
+                    if (Ends("ful"))
+                    {
+                        R(""); break;
+                    }
+                    break;
+                
+                case 's': 
+                    if (Ends("ness"))
+                    {
+                        R(""); break;
+                    }
+                    break;
+                }
+        }
+        
+        /* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
+        
+        private void  Step5()
+        {
+            if (k == k0)
+                return ; /* for Bug 1 */
+            switch (b[k - 1])
+            {
+                
+                case 'a': 
+                    if (Ends("al"))
+                        break;
+                    return ;
+                
+                case 'c': 
+                    if (Ends("ance"))
+                        break;
+                    if (Ends("ence"))
+                        break;
+                    return ;
+                
+                case 'e': 
+                    if (Ends("er"))
+                        break; return ;
+                
+                case 'i': 
+                    if (Ends("ic"))
+                        break; return ;
+                
+                case 'l': 
+                    if (Ends("able"))
+                        break;
+                    if (Ends("ible"))
+                        break; return ;
+                
+                case 'n': 
+                    if (Ends("ant"))
+                        break;
+                    if (Ends("ement"))
+                        break;
+                    if (Ends("ment"))
+                        break;
+                    /* element etc. not stripped before the m */
+                    if (Ends("ent"))
+                        break;
+                    return ;
+                
+                case 'o': 
+                    if (Ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't'))
+                        break;
+                    /* j >= 0 fixes Bug 2 */
+                    if (Ends("ou"))
+                        break;
+                    return ;
+                    /* takes care of -ous */
+                
+                case 's': 
+                    if (Ends("ism"))
+                        break;
+                    return ;
+                
+                case 't': 
+                    if (Ends("ate"))
+                        break;
+                    if (Ends("iti"))
+                        break;
+                    return ;
+                
+                case 'u': 
+                    if (Ends("ous"))
+                        break;
+                    return ;
+                
+                case 'v': 
+                    if (Ends("ive"))
+                        break;
+                    return ;
+                
+                case 'z': 
+                    if (Ends("ize"))
+                        break;
+                    return ;
+                
+                default: 
+                    return ;
+                
+            }
+            if (M() > 1)
+                k = j;
+        }
+        
+        /* step6() removes a final -e if m() > 1. */
+        
+        private void  Step6()
+        {
+            j = k;
+            if (b[k] == 'e')
+            {
+                int a = M();
+                if (a > 1 || a == 1 && !Cvc(k - 1))
+                    k--;
+            }
+            if (b[k] == 'l' && Doublec(k) && M() > 1)
+                k--;
+        }
+        
+        
+        /// <summary> Stem a word provided as a String.  Returns the result as a String.</summary>
+        public virtual System.String Stem(System.String s)
+        {
+            if (Stem(s.ToCharArray(), s.Length))
+            {
+                return ToString();
+            }
+            else
+                return s;
+        }
+        
+        /// <summary>Stem a word contained in a char[].  Returns true if the stemming process
+        /// resulted in a word different from the input.  You can retrieve the
+        /// result with getResultLength()/getResultBuffer() or toString().
+        /// </summary>
+        public virtual bool Stem(char[] word)
+        {
+            return Stem(word, word.Length);
+        }
+        
+        /// <summary>Stem a word contained in a portion of a char[] array.  Returns
+        /// true if the stemming process resulted in a word different from
+        /// the input.  You can retrieve the result with
+        /// getResultLength()/getResultBuffer() or toString().
+        /// </summary>
+        public virtual bool Stem(char[] wordBuffer, int offset, int wordLen)
+        {
+            Reset();
+            if (b.Length < wordLen)
+            {
+                var new_b = new char[wordLen + EXTRA];
+                b = new_b;
+            }
+            Array.Copy(wordBuffer, offset, b, 0, wordLen);
+            i = wordLen;
+            return Stem(0);
+        }
+        
+        /// <summary>Stem a word contained in a leading portion of a char[] array.
+        /// Returns true if the stemming process resulted in a word different
+        /// from the input.  You can retrieve the result with
+        /// getResultLength()/getResultBuffer() or toString().
+        /// </summary>
+        public virtual bool Stem(char[] word, int wordLen)
+        {
+            return Stem(word, 0, wordLen);
+        }
+        
+        /// <summary>Stem the word placed into the Stemmer buffer through calls to add().
+        /// Returns true if the stemming process resulted in a word different
+        /// from the input.  You can retrieve the result with
+        /// getResultLength()/getResultBuffer() or toString().
+        /// </summary>
+        public virtual bool Stem()
+        {
+            return Stem(0);
+        }
+        
+        public virtual bool Stem(int i0)
+        {
+            k = i - 1;
+            k0 = i0;
+            if (k > k0 + 1)
+            {
+                Step1(); Step2(); Step3(); Step4(); Step5(); Step6();
+            }
+            // Also, a word is considered dirty if we lopped off letters
+            // Thanks to Ifigenia Vairelles for pointing this out.
+            if (i != k + 1)
+                dirty = true;
+            i = k + 1;
+            return dirty;
+        }
+        
+        /// <summary>Test program for demonstrating the Stemmer.  It reads a file and
+        /// stems each word, writing the result to standard out.
+        /// Usage: Stemmer file-name
+        /// </summary>
+        [STAThread]
+        public static void  Main(System.String[] args)
+        {
+            var s = new PorterStemmer();
+            
+            for (int i = 0; i < args.Length; i++)
+            {
+                try
+                {
+                    System.IO.Stream in_Renamed = new System.IO.FileStream(args[i], System.IO.FileMode.Open, System.IO.FileAccess.Read);
+                    var buffer = new byte[1024];
 
-					int bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
-					int offset = 0;
-					s.Reset();
-					
-					while (true)
-					{
-						int ch;
-						if (offset < bufferLen)
-							ch = buffer[offset++];
-						else
-						{
-							bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
-							offset = 0;
-							if (bufferLen < 0)
-								ch = - 1;
-							else
-								ch = buffer[offset++];
-						}
-						
-						if (Char.IsLetter((char) ch))
-						{
-							s.Add(Char.ToLower((char) ch));
-						}
-						else
-						{
-							s.Stem();
-							Console.Out.Write(s.ToString());
-							s.Reset();
-							if (ch < 0)
-								break;
-							else
-							{
-								System.Console.Out.Write((char) ch);
-							}
-						}
-					}
-					
-					in_Renamed.Close();
-				}
-				catch (System.IO.IOException)
-				{
-					Console.Out.WriteLine("error reading " + args[i]);
-				}
-			}
-		}
-	}
+                    int bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
+                    int offset = 0;
+                    s.Reset();
+                    
+                    while (true)
+                    {
+                        int ch;
+                        if (offset < bufferLen)
+                            ch = buffer[offset++];
+                        else
+                        {
+                            bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
+                            offset = 0;
+                            if (bufferLen < 0)
+                                ch = - 1;
+                            else
+                                ch = buffer[offset++];
+                        }
+                        
+                        if (Char.IsLetter((char) ch))
+                        {
+                            s.Add(Char.ToLower((char) ch));
+                        }
+                        else
+                        {
+                            s.Stem();
+                            Console.Out.Write(s.ToString());
+                            s.Reset();
+                            if (ch < 0)
+                                break;
+                            else
+                            {
+                                System.Console.Out.Write((char) ch);
+                            }
+                        }
+                    }
+                    
+                    in_Renamed.Close();
+                }
+                catch (System.IO.IOException)
+                {
+                    Console.Out.WriteLine("error reading " + args[i]);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/SimpleAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/SimpleAnalyzer.cs b/src/core/Analysis/SimpleAnalyzer.cs
index b84f470..50bc9c1 100644
--- a/src/core/Analysis/SimpleAnalyzer.cs
+++ b/src/core/Analysis/SimpleAnalyzer.cs
@@ -17,29 +17,29 @@
 
 namespace Lucene.Net.Analysis
 {
-	
-	/// <summary>An <see cref="Analyzer" /> that filters <see cref="LetterTokenizer" /> 
-	/// with <see cref="LowerCaseFilter" /> 
-	/// </summary>
-	
-	public sealed class SimpleAnalyzer : Analyzer
-	{
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			return new LowerCaseTokenizer(reader);
-		}
-		
-		public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			var tokenizer = (Tokenizer) PreviousTokenStream;
-			if (tokenizer == null)
-			{
-				tokenizer = new LowerCaseTokenizer(reader);
-				PreviousTokenStream = tokenizer;
-			}
-			else
-				tokenizer.Reset(reader);
-			return tokenizer;
-		}
-	}
+    
+    /// <summary>An <see cref="Analyzer" /> that filters <see cref="LetterTokenizer" /> 
+    /// with <see cref="LowerCaseFilter" /> 
+    /// </summary>
+    
+    public sealed class SimpleAnalyzer : Analyzer
+    {
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            return new LowerCaseTokenizer(reader);
+        }
+        
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            var tokenizer = (Tokenizer) PreviousTokenStream;
+            if (tokenizer == null)
+            {
+                tokenizer = new LowerCaseTokenizer(reader);
+                PreviousTokenStream = tokenizer;
+            }
+            else
+                tokenizer.Reset(reader);
+            return tokenizer;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Standard/StandardAnalyzer.cs b/src/core/Analysis/Standard/StandardAnalyzer.cs
index 347d026..bf704be 100644
--- a/src/core/Analysis/Standard/StandardAnalyzer.cs
+++ b/src/core/Analysis/Standard/StandardAnalyzer.cs
@@ -24,151 +24,151 @@ using Version = Lucene.Net.Util.Version;
 
 namespace Lucene.Net.Analysis.Standard
 {
-	
-	/// <summary> Filters <see cref="StandardTokenizer" /> with <see cref="StandardFilter" />,
-	/// <see cref="LowerCaseFilter" /> and <see cref="StopFilter" />, using a list of English stop
-	/// words.
-	/// 
-	/// <a name="version"/>
-	/// <p/>
-	/// You must specify the required <see cref="Version" /> compatibility when creating
-	/// StandardAnalyzer:
-	/// <list type="bullet">
-	/// <item>As of 2.9, StopFilter preserves position increments</item>
-	/// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-	/// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>)</item>
-	/// </list>
-	/// </summary>
-	public class StandardAnalyzer : Analyzer
-	{
-		private ISet<string> stopSet;
-		
-		/// <summary> Specifies whether deprecated acronyms should be replaced with HOST type.
+    
+    /// <summary> Filters <see cref="StandardTokenizer" /> with <see cref="StandardFilter" />,
+    /// <see cref="LowerCaseFilter" /> and <see cref="StopFilter" />, using a list of English stop
+    /// words.
+    /// 
+    /// <a name="version"/>
+    /// <p/>
+    /// You must specify the required <see cref="Version" /> compatibility when creating
+    /// StandardAnalyzer:
+    /// <list type="bullet">
+    /// <item>As of 2.9, StopFilter preserves position increments</item>
+    /// <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+    /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>)</item>
+    /// </list>
+    /// </summary>
+    public class StandardAnalyzer : Analyzer
+    {
+        private ISet<string> stopSet;
+        
+        /// <summary> Specifies whether deprecated acronyms should be replaced with HOST type.
         /// See <a href="https://issues.apache.org/jira/browse/LUCENE-1068">https://issues.apache.org/jira/browse/LUCENE-1068</a>
-		/// </summary>
-		private bool replaceInvalidAcronym, enableStopPositionIncrements;
+        /// </summary>
+        private bool replaceInvalidAcronym, enableStopPositionIncrements;
 
-		/// <summary>An unmodifiable set containing some common English words that are usually not
-		/// useful for searching. 
-		/// </summary>
-		public static readonly ISet<string> STOP_WORDS_SET;
-		private Version matchVersion;
-		
-		/// <summary>Builds an analyzer with the default stop words (<see cref="STOP_WORDS_SET" />).
-		/// </summary>
-		/// <param name="matchVersion">Lucene version to match see <see cref="Version">above</see></param>
-		public StandardAnalyzer(Version matchVersion)
+        /// <summary>An unmodifiable set containing some common English words that are usually not
+        /// useful for searching. 
+        /// </summary>
+        public static readonly ISet<string> STOP_WORDS_SET;
+        private Version matchVersion;
+        
+        /// <summary>Builds an analyzer with the default stop words (<see cref="STOP_WORDS_SET" />).
+        /// </summary>
+        /// <param name="matchVersion">Lucene version to match see <see cref="Version">above</see></param>
+        public StandardAnalyzer(Version matchVersion)
             : this(matchVersion, STOP_WORDS_SET)
-		{ }
-		
-		/// <summary>Builds an analyzer with the given stop words.</summary>
+        { }
+        
+        /// <summary>Builds an analyzer with the given stop words.</summary>
         /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
-		///
-		/// </param>
-		/// <param name="stopWords">stop words 
-		/// </param>
-		public StandardAnalyzer(Version matchVersion, ISet<string> stopWords)
-		{
-			stopSet = stopWords;
+        ///
+        /// </param>
+        /// <param name="stopWords">stop words 
+        /// </param>
+        public StandardAnalyzer(Version matchVersion, ISet<string> stopWords)
+        {
+            stopSet = stopWords;
             SetOverridesTokenStreamMethod<StandardAnalyzer>();
             enableStopPositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
             replaceInvalidAcronym = matchVersion.OnOrAfter(Version.LUCENE_24);
             this.matchVersion = matchVersion;
-		}
+        }
         
-		/// <summary>Builds an analyzer with the stop words from the given file.</summary>
-		/// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
-		/// </seealso>
+        /// <summary>Builds an analyzer with the stop words from the given file.</summary>
+        /// <seealso cref="WordlistLoader.GetWordSet(System.IO.FileInfo)">
+        /// </seealso>
         /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
-		///
-		/// </param>
-		/// <param name="stopwords">File to read stop words from 
-		/// </param>
-		public StandardAnalyzer(Version matchVersion, System.IO.FileInfo stopwords)
+        ///
+        /// </param>
+        /// <param name="stopwords">File to read stop words from 
+        /// </param>
+        public StandardAnalyzer(Version matchVersion, System.IO.FileInfo stopwords)
             : this (matchVersion, WordlistLoader.GetWordSet(stopwords))
-		{
-		}
-		
-		/// <summary>Builds an analyzer with the stop words from the given reader.</summary>
+        {
+        }
+        
+        /// <summary>Builds an analyzer with the stop words from the given reader.</summary>
         /// <seealso cref="WordlistLoader.GetWordSet(System.IO.TextReader)">
-		/// </seealso>
+        /// </seealso>
         /// <param name="matchVersion">Lucene version to match See <see cref="Version">above</see> />
-		///
-		/// </param>
-		/// <param name="stopwords">Reader to read stop words from 
-		/// </param>
-		public StandardAnalyzer(Version matchVersion, System.IO.TextReader stopwords)
+        ///
+        /// </param>
+        /// <param name="stopwords">Reader to read stop words from 
+        /// </param>
+        public StandardAnalyzer(Version matchVersion, System.IO.TextReader stopwords)
             : this(matchVersion, WordlistLoader.GetWordSet(stopwords))
-		{ }
-		
-		/// <summary>Constructs a <see cref="StandardTokenizer" /> filtered by a <see cref="StandardFilter" />
-		///, a <see cref="LowerCaseFilter" /> and a <see cref="StopFilter" />. 
-		/// </summary>
-		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
-			tokenStream.MaxTokenLength = maxTokenLength;
-			TokenStream result = new StandardFilter(tokenStream);
-			result = new LowerCaseFilter(result);
-			result = new StopFilter(enableStopPositionIncrements, result, stopSet);
-			return result;
-		}
-		
-		private sealed class SavedStreams
-		{
-			internal StandardTokenizer tokenStream;
-			internal TokenStream filteredTokenStream;
-		}
-		
-		/// <summary>Default maximum allowed token length </summary>
-		public const int DEFAULT_MAX_TOKEN_LENGTH = 255;
-		
-		private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
+        { }
+        
+        /// <summary>Constructs a <see cref="StandardTokenizer" /> filtered by a <see cref="StandardFilter" />
+        ///, a <see cref="LowerCaseFilter" /> and a <see cref="StopFilter" />. 
+        /// </summary>
+        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
+            tokenStream.MaxTokenLength = maxTokenLength;
+            TokenStream result = new StandardFilter(tokenStream);
+            result = new LowerCaseFilter(result);
+            result = new StopFilter(enableStopPositionIncrements, result, stopSet);
+            return result;
+        }
+        
+        private sealed class SavedStreams
+        {
+            internal StandardTokenizer tokenStream;
+            internal TokenStream filteredTokenStream;
+        }
+        
+        /// <summary>Default maximum allowed token length </summary>
+        public const int DEFAULT_MAX_TOKEN_LENGTH = 255;
+        
+        private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
 
-	    /// <summary> Set maximum allowed token length.  If a token is seen
-	    /// that exceeds this length then it is discarded.  This
-	    /// setting only takes effect the next time tokenStream or
-	    /// reusableTokenStream is called.
-	    /// </summary>
-	    public virtual int MaxTokenLength
-	    {
-	        get { return maxTokenLength; }
-	        set { maxTokenLength = value; }
-	    }
+        /// <summary> Set maximum allowed token length.  If a token is seen
+        /// that exceeds this length then it is discarded.  This
+        /// setting only takes effect the next time tokenStream or
+        /// reusableTokenStream is called.
+        /// </summary>
+        public virtual int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { maxTokenLength = value; }
+        }
 
-	    public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
-		{
-			if (overridesTokenStreamMethod)
-			{
-				// LUCENE-1678: force fallback to tokenStream() if we
-				// have been subclassed and that subclass overrides
-				// tokenStream but not reusableTokenStream
-				return TokenStream(fieldName, reader);
-			}
-			SavedStreams streams = (SavedStreams) PreviousTokenStream;
-			if (streams == null)
-			{
-				streams = new SavedStreams();
-				PreviousTokenStream = streams;
-				streams.tokenStream = new StandardTokenizer(matchVersion, reader);
-				streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
-				streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
-			    streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, 
+        public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+        {
+            if (overridesTokenStreamMethod)
+            {
+                // LUCENE-1678: force fallback to tokenStream() if we
+                // have been subclassed and that subclass overrides
+                // tokenStream but not reusableTokenStream
+                return TokenStream(fieldName, reader);
+            }
+            SavedStreams streams = (SavedStreams) PreviousTokenStream;
+            if (streams == null)
+            {
+                streams = new SavedStreams();
+                PreviousTokenStream = streams;
+                streams.tokenStream = new StandardTokenizer(matchVersion, reader);
+                streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
+                streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
+                streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, 
                                                              streams.filteredTokenStream, stopSet);
-			}
-			else
-			{
-				streams.tokenStream.Reset(reader);
-			}
-			streams.tokenStream.MaxTokenLength = maxTokenLength;
-			
-			streams.tokenStream.SetReplaceInvalidAcronym(replaceInvalidAcronym);
-			
-			return streams.filteredTokenStream;
-		}
-		static StandardAnalyzer()
-		{
-			STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
-		}
-	}
+            }
+            else
+            {
+                streams.tokenStream.Reset(reader);
+            }
+            streams.tokenStream.MaxTokenLength = maxTokenLength;
+            
+            streams.tokenStream.SetReplaceInvalidAcronym(replaceInvalidAcronym);
+            
+            return streams.filteredTokenStream;
+        }
+        static StandardAnalyzer()
+        {
+            STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Analysis/Standard/StandardFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Standard/StandardFilter.cs b/src/core/Analysis/Standard/StandardFilter.cs
index fd13261..f4a1c56 100644
--- a/src/core/Analysis/Standard/StandardFilter.cs
+++ b/src/core/Analysis/Standard/StandardFilter.cs
@@ -23,66 +23,66 @@ using TokenStream = Lucene.Net.Analysis.TokenStream;
 
 namespace Lucene.Net.Analysis.Standard
 {
-	
-	/// <summary>Normalizes tokens extracted with <see cref="StandardTokenizer" />. </summary>
-	
-	public sealed class StandardFilter:TokenFilter
-	{
-		
-		
-		/// <summary>Construct filtering <i>in</i>. </summary>
-		public StandardFilter(TokenStream in_Renamed):base(in_Renamed)
-		{
+    
+    /// <summary>Normalizes tokens extracted with <see cref="StandardTokenizer" />. </summary>
+    
+    public sealed class StandardFilter:TokenFilter
+    {
+        
+        
+        /// <summary>Construct filtering <i>in</i>. </summary>
+        public StandardFilter(TokenStream in_Renamed):base(in_Renamed)
+        {
             termAtt = AddAttribute<ITermAttribute>();
-			typeAtt = AddAttribute<ITypeAttribute>();
-		}
-		
-		private static readonly System.String APOSTROPHE_TYPE;
-		private static readonly System.String ACRONYM_TYPE;
-		
-		// this filters uses attribute type
-		private ITypeAttribute typeAtt;
-		private ITermAttribute termAtt;
-		
-		/// <summary>Returns the next token in the stream, or null at EOS.
-		/// <p/>Removes <tt>'s</tt> from the end of words.
-		/// <p/>Removes dots from acronyms.
-		/// </summary>
-		public override bool IncrementToken()
-		{
-			if (!input.IncrementToken())
-			{
-				return false;
-			}
-			
-			char[] buffer = termAtt.TermBuffer();
-			int bufferLength = termAtt.TermLength();
-			System.String type = typeAtt.Type;
-			
-			if ((System.Object) type == (System.Object) APOSTROPHE_TYPE && bufferLength >= 2 && buffer[bufferLength - 2] == '\'' && (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
-			{
-				// Strip last 2 characters off
-				termAtt.SetTermLength(bufferLength - 2);
-			}
-			else if ((System.Object) type == (System.Object) ACRONYM_TYPE)
-			{
-				// remove dots
-				int upto = 0;
-				for (int i = 0; i < bufferLength; i++)
-				{
-					char c = buffer[i];
-					if (c != '.')
-						buffer[upto++] = c;
-				}
-				termAtt.SetTermLength(upto);
-			}
-			
-			return true;
-		}
-		static StandardFilter()
-		{
-			APOSTROPHE_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.APOSTROPHE];
-			ACRONYM_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
-		}
-	}
+            typeAtt = AddAttribute<ITypeAttribute>();
+        }
+        
+        private static readonly System.String APOSTROPHE_TYPE;
+        private static readonly System.String ACRONYM_TYPE;
+        
+        // this filters uses attribute type
+        private ITypeAttribute typeAtt;
+        private ITermAttribute termAtt;
+        
+        /// <summary>Returns the next token in the stream, or null at EOS.
+        /// <p/>Removes <tt>'s</tt> from the end of words.
+        /// <p/>Removes dots from acronyms.
+        /// </summary>
+        public override bool IncrementToken()
+        {
+            if (!input.IncrementToken())
+            {
+                return false;
+            }
+            
+            char[] buffer = termAtt.TermBuffer();
+            int bufferLength = termAtt.TermLength();
+            System.String type = typeAtt.Type;
+            
+            if ((System.Object) type == (System.Object) APOSTROPHE_TYPE && bufferLength >= 2 && buffer[bufferLength - 2] == '\'' && (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
+            {
+                // Strip last 2 characters off
+                termAtt.SetTermLength(bufferLength - 2);
+            }
+            else if ((System.Object) type == (System.Object) ACRONYM_TYPE)
+            {
+                // remove dots
+                int upto = 0;
+                for (int i = 0; i < bufferLength; i++)
+                {
+                    char c = buffer[i];
+                    if (c != '.')
+                        buffer[upto++] = c;
+                }
+                termAtt.SetTermLength(upto);
+            }
+            
+            return true;
+        }
+        static StandardFilter()
+        {
+            APOSTROPHE_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.APOSTROPHE];
+            ACRONYM_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
+        }
+    }
 }
\ No newline at end of file


[40/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/contrib/Snowball/SF/Snowball/Ext/ItalianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Snowball/SF/Snowball/Ext/ItalianStemmer.cs b/src/contrib/Snowball/SF/Snowball/Ext/ItalianStemmer.cs
index a77103b..f661835 100644
--- a/src/contrib/Snowball/SF/Snowball/Ext/ItalianStemmer.cs
+++ b/src/contrib/Snowball/SF/Snowball/Ext/ItalianStemmer.cs
@@ -24,1235 +24,1235 @@ namespace SF.Snowball.Ext
 #pragma warning disable 162,164
     
     /// <summary> Generated class implementing code defined by a snowball script.</summary>
-	public class ItalianStemmer : SnowballProgram
-	{
-		public ItalianStemmer()
-		{
-			InitBlock();
-		}
-		private void  InitBlock()
-		{
-			a_0 = new Among[]{new Among("", - 1, 7, "", this), new Among("qu", 0, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this)};
-			a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("I", 0, 1, "", this), new Among("U", 0, 2, "", this)};
-			a_2 = new Among[]{new Among("la", - 1, - 1, "", this), new Among("cela", 0, - 1, "", this), new Among("gliela", 0, - 1, "", this), new Among("mela", 0, - 1, "", this), new Among("tela", 0, - 1, "", this), new Among("vela", 0, - 1, "", this), new Among("le", - 1, - 1, "", this), new Among("cele", 6, - 1, "", this), new Among("gliele", 6, - 1, "", this), new Among("mele", 6, - 1, "", this), new Among("tele", 6, - 1, "", this), new Among("vele", 6, - 1, "", this), new Among("ne", - 1, - 1, "", this), new Among("cene", 12, - 1, "", this), new Among("gliene", 12, - 1, "", this), new Among("mene", 12, - 1, "", this), new Among("sene", 12, - 1, "", this), new Among("tene", 12, - 1, "", this), new Among("vene", 12, - 1, "", this), new Among("ci", - 1, - 1, "", this), new Among("li", - 1, - 1, "", this), new Among("celi", 20, - 1, "", this), new Among("glieli", 20, - 1, "", this), new Among("meli", 20, - 1, "", this), new Among("teli", 20, - 1, "", this), new Among("veli", 20, - 1, "", th
 is), new Among("gli", 20, - 1, "", this), new Among("mi", - 1, - 1, "", this), new Among("si", - 1, - 1, "", this), new Among("ti", - 1, - 1, "", this), new Among("vi", - 1, - 1, "", this), new Among("lo", - 1, - 1, "", this), new Among("celo", 31, - 1, "", this), new Among("glielo", 31, - 1, "", this), new Among("melo", 31, - 1, "", this), new Among("telo", 31, - 1, "", this), new Among("velo", 31, - 1, "", this)};
-			a_3 = new Among[]{new Among("ando", - 1, 1, "", this), new Among("endo", - 1, 1, "", this), new Among("ar", - 1, 2, "", this), new Among("er", - 1, 2, "", this), new Among("ir", - 1, 2, "", this)};
-			a_4 = new Among[]{new Among("ic", - 1, - 1, "", this), new Among("abil", - 1, - 1, "", this), new Among("os", - 1, - 1, "", this), new Among("iv", - 1, 1, "", this)};
-			a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 1, "", this)};
-			a_6 = new Among[]{new Among("ica", - 1, 1, "", this), new Among("logia", - 1, 3, "", this), new Among("osa", - 1, 1, "", this), new Among("ista", - 1, 1, "", this), new Among("iva", - 1, 9, "", this), new Among("anza", - 1, 1, "", this), new Among("enza", - 1, 5, "", this), new Among("ice", - 1, 1, "", this), new Among("atrice", 7, 1, "", this), new Among("iche", - 1, 1, "", this), new Among("logie", - 1, 3, "", this), new Among("abile", - 1, 1, "", this), new Among("ibile", - 1, 1, "", this), new Among("usione", - 1, 4, "", this), new Among("azione", - 1, 2, "", this), new Among("uzione", - 1, 4, "", this), new Among("atore", - 1, 2, "", this), new Among("ose", - 1, 1, "", this), new Among("mente", - 1, 1, "", this), new Among("amente", 18, 7, "", this), new Among("iste", - 1, 1, "", this), new Among("ive", - 1, 9, "", this), new Among("anze", - 1, 1, "", this), new Among("enze", - 1, 5, "", this), new Among("ici", - 1, 1, "", this), new Among("atrici", 24, 1, "", this), new Amo
 ng("ichi", - 1, 1, "", this), new Among("abili", - 1, 1, "", this), new Among("ibili", - 1, 1, "", this), new Among("ismi", - 1, 1, "", this), new Among("usioni", - 1, 4, "", this), new Among("azioni", - 1, 2, "", this), new Among("uzioni", - 1, 4, "", this), new Among("atori", - 1, 2, "", this), new Among("osi", - 1, 1, "", this), new Among("amenti", - 1, 6, "", this), new Among("imenti", - 1, 6, "", this), new Among("isti", - 1, 1, "", this), new Among("ivi", - 1, 9, "", this), new Among("ico", - 1, 1, "", this), new Among("ismo", - 1, 1, "", this), new Among("oso", - 1, 1, "", this), new Among("amento", - 1, 6, "", this), new Among("imento", - 1, 6, "", this), new Among("ivo", - 1, 9, "", this), new Among("it\u00E0", - 1, 8, "", this), new Among("ist\u00E0", - 1, 1, "", this), new Among("ist\u00E8", - 1, 1, "", this), new Among("ist\u00EC", - 1, 1, "", this)};
-			a_7 = new Among[]{new Among("isca", - 1, 1, "", this), new Among("enda", - 1, 1, "", this), new Among("ata", - 1, 1, "", this), new Among("ita", - 1, 1, "", this), new Among("uta", - 1, 1, "", this), new Among("ava", - 1, 1, "", this), new Among("eva", - 1, 1, "", this), new Among("iva", - 1, 1, "", this), new Among("erebbe", - 1, 1, "", this), new Among("irebbe", - 1, 1, "", this), new Among("isce", - 1, 1, "", this), new Among("ende", - 1, 1, "", this), new Among("are", - 1, 1, "", this), new Among("ere", - 1, 1, "", this), new Among("ire", - 1, 1, "", this), new Among("asse", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("avate", 16, 1, "", this), new Among("evate", 16, 1, "", this), new Among("ivate", 16, 1, "", this), new Among("ete", - 1, 1, "", this), new Among("erete", 20, 1, "", this), new Among("irete", 20, 1, "", this), new Among("ite", - 1, 1, "", this), new Among("ereste", - 1, 1, "", this), new Among("ireste", - 1, 1, "", this), new Among("ute", -
  1, 1, "", this), new Among("erai", - 1, 1, "", this), new Among("irai", - 1, 1, "", this), new Among("isci", - 1, 1, "", this), new Among("endi", - 1, 1, "", this), new Among("erei", - 1, 1, "", this), new Among("irei", - 1, 1, "", this), new Among("assi", - 1, 1, "", this), new Among("ati", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("eresti", - 1, 1, "", this), new Among("iresti", - 1, 1, "", this), new Among("uti", - 1, 1, "", this), new Among("avi", - 1, 1, "", this), new Among("evi", - 1, 1, "", this), new Among("ivi", - 1, 1, "", this), new Among("isco", - 1, 1, "", this), new Among("ando", - 1, 1, "", this), new Among("endo", - 1, 1, "", this), new Among("Yamo", - 1, 1, "", this), new Among("iamo", - 1, 1, "", this), new Among("avamo", - 1, 1, "", this), new Among("evamo", - 1, 1, "", this), new Among("ivamo", - 1, 1, "", this), new Among("eremo", - 1, 1, "", this), new Among("iremo", - 1, 1, "", this), new Among("assimo", - 1, 1, "", this), new Among("a
 mmo", - 1, 1, "", this), new Among(
-				"emmo", - 1, 1, "", this), new Among("eremmo", 54, 1, "", this), new Among("iremmo", 54, 1, "", this), new Among("immo", - 1, 1, "", this), new Among("ano", - 1, 1, "", this), new Among("iscano", 58, 1, "", this), new Among("avano", 58, 1, "", this), new Among("evano", 58, 1, "", this), new Among("ivano", 58, 1, "", this), new Among("eranno", - 1, 1, "", this), new Among("iranno", - 1, 1, "", this), new Among("ono", - 1, 1, "", this), new Among("iscono", 65, 1, "", this), new Among("arono", 65, 1, "", this), new Among("erono", 65, 1, "", this), new Among("irono", 65, 1, "", this), new Among("erebbero", - 1, 1, "", this), new Among("irebbero", - 1, 1, "", this), new Among("assero", - 1, 1, "", this), new Among("essero", - 1, 1, "", this), new Among("issero", - 1, 1, "", this), new Among("ato", - 1, 1, "", this), new Among("ito", - 1, 1, "", this), new Among("uto", - 1, 1, "", this), new Among("avo", - 1, 1, "", this), new Among("evo", - 1, 1, "", this), new Among("ivo", - 1, 1, "
 ", this), new Among("ar", - 1, 1, "", this), new Among("ir", - 1, 1, "", this), new Among("er\u00E0", - 1, 1, "", this), new Among("ir\u00E0", - 1, 1, "", this), new Among("er\u00F2", - 1, 1, "", this), new Among("ir\u00F2", - 1, 1, "", this)};
-		}
-		
-		private Among[] a_0;
-		private Among[] a_1;
-		private Among[] a_2;
-		private Among[] a_3;
-		private Among[] a_4;
-		private Among[] a_5;
-		private Among[] a_6;
-		private Among[] a_7;
-		private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (128), (char) (8), (char) (2), (char) (1)};
-		private static readonly char[] g_AEIO = new char[]{(char) (17), (char) (65), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (128), (char) (8), (char) (2)};
-		private static readonly char[] g_CG = new char[]{(char) (17)};
-		
-		private int I_p2;
-		private int I_p1;
-		private int I_pV;
-		
-		protected internal virtual void  copy_from(ItalianStemmer other)
-		{
-			I_p2 = other.I_p2;
-			I_p1 = other.I_p1;
-			I_pV = other.I_pV;
-			base.copy_from(other);
-		}
-		
-		private bool r_prelude()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			// (, line 34
-			// test, line 35
-			v_1 = cursor;
-			// repeat, line 35
-			while (true)
-			{
-				v_2 = cursor;
-				do 
-				{
-					// (, line 35
-					// [, line 36
-					bra = cursor;
-					// substring, line 36
-					among_var = find_among(a_0, 7);
-					if (among_var == 0)
-					{
-						goto lab1_brk;
-					}
-					// ], line 36
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab1_brk;
-						
-						case 1: 
-							// (, line 37
-							// <-, line 37
-							slice_from("\u00E0");
-							break;
-						
-						case 2: 
-							// (, line 38
-							// <-, line 38
-							slice_from("\u00E8");
-							break;
-						
-						case 3: 
-							// (, line 39
-							// <-, line 39
-							slice_from("\u00EC");
-							break;
-						
-						case 4: 
-							// (, line 40
-							// <-, line 40
-							slice_from("\u00F2");
-							break;
-						
-						case 5: 
-							// (, line 41
-							// <-, line 41
-							slice_from("\u00F9");
-							break;
-						
-						case 6: 
-							// (, line 42
-							// <-, line 42
-							slice_from("qU");
-							break;
-						
-						case 7: 
-							// (, line 43
-							// next, line 43
-							if (cursor >= limit)
-							{
-								goto lab1_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab0;
-				}
-				while (false);
+    public class ItalianStemmer : SnowballProgram
+    {
+        public ItalianStemmer()
+        {
+            InitBlock();
+        }
+        private void  InitBlock()
+        {
+            a_0 = new Among[]{new Among("", - 1, 7, "", this), new Among("qu", 0, 6, "", this), new Among("\u00E1", 0, 1, "", this), new Among("\u00E9", 0, 2, "", this), new Among("\u00ED", 0, 3, "", this), new Among("\u00F3", 0, 4, "", this), new Among("\u00FA", 0, 5, "", this)};
+            a_1 = new Among[]{new Among("", - 1, 3, "", this), new Among("I", 0, 1, "", this), new Among("U", 0, 2, "", this)};
+            a_2 = new Among[]{new Among("la", - 1, - 1, "", this), new Among("cela", 0, - 1, "", this), new Among("gliela", 0, - 1, "", this), new Among("mela", 0, - 1, "", this), new Among("tela", 0, - 1, "", this), new Among("vela", 0, - 1, "", this), new Among("le", - 1, - 1, "", this), new Among("cele", 6, - 1, "", this), new Among("gliele", 6, - 1, "", this), new Among("mele", 6, - 1, "", this), new Among("tele", 6, - 1, "", this), new Among("vele", 6, - 1, "", this), new Among("ne", - 1, - 1, "", this), new Among("cene", 12, - 1, "", this), new Among("gliene", 12, - 1, "", this), new Among("mene", 12, - 1, "", this), new Among("sene", 12, - 1, "", this), new Among("tene", 12, - 1, "", this), new Among("vene", 12, - 1, "", this), new Among("ci", - 1, - 1, "", this), new Among("li", - 1, - 1, "", this), new Among("celi", 20, - 1, "", this), new Among("glieli", 20, - 1, "", this), new Among("meli", 20, - 1, "", this), new Among("teli", 20, - 1, "", this), new Among("veli", 20, - 
 1, "", this), new Among("gli", 20, - 1, "", this), new Among("mi", - 1, - 1, "", this), new Among("si", - 1, - 1, "", this), new Among("ti", - 1, - 1, "", this), new Among("vi", - 1, - 1, "", this), new Among("lo", - 1, - 1, "", this), new Among("celo", 31, - 1, "", this), new Among("glielo", 31, - 1, "", this), new Among("melo", 31, - 1, "", this), new Among("telo", 31, - 1, "", this), new Among("velo", 31, - 1, "", this)};
+            a_3 = new Among[]{new Among("ando", - 1, 1, "", this), new Among("endo", - 1, 1, "", this), new Among("ar", - 1, 2, "", this), new Among("er", - 1, 2, "", this), new Among("ir", - 1, 2, "", this)};
+            a_4 = new Among[]{new Among("ic", - 1, - 1, "", this), new Among("abil", - 1, - 1, "", this), new Among("os", - 1, - 1, "", this), new Among("iv", - 1, 1, "", this)};
+            a_5 = new Among[]{new Among("ic", - 1, 1, "", this), new Among("abil", - 1, 1, "", this), new Among("iv", - 1, 1, "", this)};
+            a_6 = new Among[]{new Among("ica", - 1, 1, "", this), new Among("logia", - 1, 3, "", this), new Among("osa", - 1, 1, "", this), new Among("ista", - 1, 1, "", this), new Among("iva", - 1, 9, "", this), new Among("anza", - 1, 1, "", this), new Among("enza", - 1, 5, "", this), new Among("ice", - 1, 1, "", this), new Among("atrice", 7, 1, "", this), new Among("iche", - 1, 1, "", this), new Among("logie", - 1, 3, "", this), new Among("abile", - 1, 1, "", this), new Among("ibile", - 1, 1, "", this), new Among("usione", - 1, 4, "", this), new Among("azione", - 1, 2, "", this), new Among("uzione", - 1, 4, "", this), new Among("atore", - 1, 2, "", this), new Among("ose", - 1, 1, "", this), new Among("mente", - 1, 1, "", this), new Among("amente", 18, 7, "", this), new Among("iste", - 1, 1, "", this), new Among("ive", - 1, 9, "", this), new Among("anze", - 1, 1, "", this), new Among("enze", - 1, 5, "", this), new Among("ici", - 1, 1, "", this), new Among("atrici", 24, 1, "", this)
 , new Among("ichi", - 1, 1, "", this), new Among("abili", - 1, 1, "", this), new Among("ibili", - 1, 1, "", this), new Among("ismi", - 1, 1, "", this), new Among("usioni", - 1, 4, "", this), new Among("azioni", - 1, 2, "", this), new Among("uzioni", - 1, 4, "", this), new Among("atori", - 1, 2, "", this), new Among("osi", - 1, 1, "", this), new Among("amenti", - 1, 6, "", this), new Among("imenti", - 1, 6, "", this), new Among("isti", - 1, 1, "", this), new Among("ivi", - 1, 9, "", this), new Among("ico", - 1, 1, "", this), new Among("ismo", - 1, 1, "", this), new Among("oso", - 1, 1, "", this), new Among("amento", - 1, 6, "", this), new Among("imento", - 1, 6, "", this), new Among("ivo", - 1, 9, "", this), new Among("it\u00E0", - 1, 8, "", this), new Among("ist\u00E0", - 1, 1, "", this), new Among("ist\u00E8", - 1, 1, "", this), new Among("ist\u00EC", - 1, 1, "", this)};
+            a_7 = new Among[]{new Among("isca", - 1, 1, "", this), new Among("enda", - 1, 1, "", this), new Among("ata", - 1, 1, "", this), new Among("ita", - 1, 1, "", this), new Among("uta", - 1, 1, "", this), new Among("ava", - 1, 1, "", this), new Among("eva", - 1, 1, "", this), new Among("iva", - 1, 1, "", this), new Among("erebbe", - 1, 1, "", this), new Among("irebbe", - 1, 1, "", this), new Among("isce", - 1, 1, "", this), new Among("ende", - 1, 1, "", this), new Among("are", - 1, 1, "", this), new Among("ere", - 1, 1, "", this), new Among("ire", - 1, 1, "", this), new Among("asse", - 1, 1, "", this), new Among("ate", - 1, 1, "", this), new Among("avate", 16, 1, "", this), new Among("evate", 16, 1, "", this), new Among("ivate", 16, 1, "", this), new Among("ete", - 1, 1, "", this), new Among("erete", 20, 1, "", this), new Among("irete", 20, 1, "", this), new Among("ite", - 1, 1, "", this), new Among("ereste", - 1, 1, "", this), new Among("ireste", - 1, 1, "", this), new Among
 ("ute", - 1, 1, "", this), new Among("erai", - 1, 1, "", this), new Among("irai", - 1, 1, "", this), new Among("isci", - 1, 1, "", this), new Among("endi", - 1, 1, "", this), new Among("erei", - 1, 1, "", this), new Among("irei", - 1, 1, "", this), new Among("assi", - 1, 1, "", this), new Among("ati", - 1, 1, "", this), new Among("iti", - 1, 1, "", this), new Among("eresti", - 1, 1, "", this), new Among("iresti", - 1, 1, "", this), new Among("uti", - 1, 1, "", this), new Among("avi", - 1, 1, "", this), new Among("evi", - 1, 1, "", this), new Among("ivi", - 1, 1, "", this), new Among("isco", - 1, 1, "", this), new Among("ando", - 1, 1, "", this), new Among("endo", - 1, 1, "", this), new Among("Yamo", - 1, 1, "", this), new Among("iamo", - 1, 1, "", this), new Among("avamo", - 1, 1, "", this), new Among("evamo", - 1, 1, "", this), new Among("ivamo", - 1, 1, "", this), new Among("eremo", - 1, 1, "", this), new Among("iremo", - 1, 1, "", this), new Among("assimo", - 1, 1, "", this), new
  Among("ammo", - 1, 1, "", this), new Among(
+                "emmo", - 1, 1, "", this), new Among("eremmo", 54, 1, "", this), new Among("iremmo", 54, 1, "", this), new Among("immo", - 1, 1, "", this), new Among("ano", - 1, 1, "", this), new Among("iscano", 58, 1, "", this), new Among("avano", 58, 1, "", this), new Among("evano", 58, 1, "", this), new Among("ivano", 58, 1, "", this), new Among("eranno", - 1, 1, "", this), new Among("iranno", - 1, 1, "", this), new Among("ono", - 1, 1, "", this), new Among("iscono", 65, 1, "", this), new Among("arono", 65, 1, "", this), new Among("erono", 65, 1, "", this), new Among("irono", 65, 1, "", this), new Among("erebbero", - 1, 1, "", this), new Among("irebbero", - 1, 1, "", this), new Among("assero", - 1, 1, "", this), new Among("essero", - 1, 1, "", this), new Among("issero", - 1, 1, "", this), new Among("ato", - 1, 1, "", this), new Among("ito", - 1, 1, "", this), new Among("uto", - 1, 1, "", this), new Among("avo", - 1, 1, "", this), new Among("evo", - 1, 1, "", this), new Among("ivo
 ", - 1, 1, "", this), new Among("ar", - 1, 1, "", this), new Among("ir", - 1, 1, "", this), new Among("er\u00E0", - 1, 1, "", this), new Among("ir\u00E0", - 1, 1, "", this), new Among("er\u00F2", - 1, 1, "", this), new Among("ir\u00F2", - 1, 1, "", this)};
+        }
+        
+        private Among[] a_0;
+        private Among[] a_1;
+        private Among[] a_2;
+        private Among[] a_3;
+        private Among[] a_4;
+        private Among[] a_5;
+        private Among[] a_6;
+        private Among[] a_7;
+        private static readonly char[] g_v = new char[]{(char) (17), (char) (65), (char) (16), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (128), (char) (8), (char) (2), (char) (1)};
+        private static readonly char[] g_AEIO = new char[]{(char) (17), (char) (65), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (0), (char) (128), (char) (128), (char) (8), (char) (2)};
+        private static readonly char[] g_CG = new char[]{(char) (17)};
+        
+        private int I_p2;
+        private int I_p1;
+        private int I_pV;
+        
+        protected internal virtual void  copy_from(ItalianStemmer other)
+        {
+            I_p2 = other.I_p2;
+            I_p1 = other.I_p1;
+            I_pV = other.I_pV;
+            base.copy_from(other);
+        }
+        
+        private bool r_prelude()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            // (, line 34
+            // test, line 35
+            v_1 = cursor;
+            // repeat, line 35
+            while (true)
+            {
+                v_2 = cursor;
+                do 
+                {
+                    // (, line 35
+                    // [, line 36
+                    bra = cursor;
+                    // substring, line 36
+                    among_var = find_among(a_0, 7);
+                    if (among_var == 0)
+                    {
+                        goto lab1_brk;
+                    }
+                    // ], line 36
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab1_brk;
+                        
+                        case 1: 
+                            // (, line 37
+                            // <-, line 37
+                            slice_from("\u00E0");
+                            break;
+                        
+                        case 2: 
+                            // (, line 38
+                            // <-, line 38
+                            slice_from("\u00E8");
+                            break;
+                        
+                        case 3: 
+                            // (, line 39
+                            // <-, line 39
+                            slice_from("\u00EC");
+                            break;
+                        
+                        case 4: 
+                            // (, line 40
+                            // <-, line 40
+                            slice_from("\u00F2");
+                            break;
+                        
+                        case 5: 
+                            // (, line 41
+                            // <-, line 41
+                            slice_from("\u00F9");
+                            break;
+                        
+                        case 6: 
+                            // (, line 42
+                            // <-, line 42
+                            slice_from("qU");
+                            break;
+                        
+                        case 7: 
+                            // (, line 43
+                            // next, line 43
+                            if (cursor >= limit)
+                            {
+                                goto lab1_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab0;
+                }
+                while (false);
 
 lab1_brk: ;
-				
-				cursor = v_2;
-				goto replab0_brk;
+                
+                cursor = v_2;
+                goto replab0_brk;
 
 replab0: ;
-			}
+            }
 
 replab0_brk: ;
-			
-			cursor = v_1;
-			// repeat, line 46
-			while (true)
-			{
-				v_3 = cursor;
-				do 
-				{
-					// goto, line 46
-					while (true)
-					{
-						v_4 = cursor;
-						do 
-						{
-							// (, line 46
-							if (!(in_grouping(g_v, 97, 249)))
-							{
-								goto lab5_brk;
-							}
-							// [, line 47
-							bra = cursor;
-							// or, line 47
-							do 
-							{
-								v_5 = cursor;
-								do 
-								{
-									// (, line 47
-									// literal, line 47
-									if (!(eq_s(1, "u")))
-									{
-										goto lab7_brk;
-									}
-									// ], line 47
-									ket = cursor;
-									if (!(in_grouping(g_v, 97, 249)))
-									{
-										goto lab7_brk;
-									}
-									// <-, line 47
-									slice_from("U");
-									goto lab6_brk;
-								}
-								while (false);
+            
+            cursor = v_1;
+            // repeat, line 46
+            while (true)
+            {
+                v_3 = cursor;
+                do 
+                {
+                    // goto, line 46
+                    while (true)
+                    {
+                        v_4 = cursor;
+                        do 
+                        {
+                            // (, line 46
+                            if (!(in_grouping(g_v, 97, 249)))
+                            {
+                                goto lab5_brk;
+                            }
+                            // [, line 47
+                            bra = cursor;
+                            // or, line 47
+                            do 
+                            {
+                                v_5 = cursor;
+                                do 
+                                {
+                                    // (, line 47
+                                    // literal, line 47
+                                    if (!(eq_s(1, "u")))
+                                    {
+                                        goto lab7_brk;
+                                    }
+                                    // ], line 47
+                                    ket = cursor;
+                                    if (!(in_grouping(g_v, 97, 249)))
+                                    {
+                                        goto lab7_brk;
+                                    }
+                                    // <-, line 47
+                                    slice_from("U");
+                                    goto lab6_brk;
+                                }
+                                while (false);
 
 lab7_brk: ;
-								
-								cursor = v_5;
-								// (, line 48
-								// literal, line 48
-								if (!(eq_s(1, "i")))
-								{
-									goto lab5_brk;
-								}
-								// ], line 48
-								ket = cursor;
-								if (!(in_grouping(g_v, 97, 249)))
-								{
-									goto lab5_brk;
-								}
-								// <-, line 48
-								slice_from("I");
-							}
-							while (false);
+                                
+                                cursor = v_5;
+                                // (, line 48
+                                // literal, line 48
+                                if (!(eq_s(1, "i")))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // ], line 48
+                                ket = cursor;
+                                if (!(in_grouping(g_v, 97, 249)))
+                                {
+                                    goto lab5_brk;
+                                }
+                                // <-, line 48
+                                slice_from("I");
+                            }
+                            while (false);
 
 lab6_brk: ;
-							
-							cursor = v_4;
-							goto golab4_brk;
-						}
-						while (false);
+                            
+                            cursor = v_4;
+                            goto golab4_brk;
+                        }
+                        while (false);
 
 lab5_brk: ;
-						
-						cursor = v_4;
-						if (cursor >= limit)
-						{
-							goto lab3_brk;
-						}
-						cursor++;
-					}
+                        
+                        cursor = v_4;
+                        if (cursor >= limit)
+                        {
+                            goto lab3_brk;
+                        }
+                        cursor++;
+                    }
 
 golab4_brk: ;
-					
-					goto replab2;
-				}
-				while (false);
+                    
+                    goto replab2;
+                }
+                while (false);
 
 lab3_brk: ;
-				
-				cursor = v_3;
-				goto replab2_brk;
+                
+                cursor = v_3;
+                goto replab2_brk;
 
 replab2: ;
-			}
+            }
 
 replab2_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_mark_regions()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_6;
-			int v_8;
-			// (, line 52
-			I_pV = limit;
-			I_p1 = limit;
-			I_p2 = limit;
-			// do, line 58
-			v_1 = cursor;
-			do 
-			{
-				// (, line 58
-				// or, line 60
+            
+            return true;
+        }
+        
+        private bool r_mark_regions()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_6;
+            int v_8;
+            // (, line 52
+            I_pV = limit;
+            I_p1 = limit;
+            I_p2 = limit;
+            // do, line 58
+            v_1 = cursor;
+            do 
+            {
+                // (, line 58
+                // or, line 60
 lab2: 
-				do 
-				{
-					v_2 = cursor;
-					do 
-					{
-						// (, line 59
-						if (!(in_grouping(g_v, 97, 249)))
-						{
-							goto lab2_brk;
-						}
-						// or, line 59
+                do 
+                {
+                    v_2 = cursor;
+                    do 
+                    {
+                        // (, line 59
+                        if (!(in_grouping(g_v, 97, 249)))
+                        {
+                            goto lab2_brk;
+                        }
+                        // or, line 59
 lab4: 
-						do 
-						{
-							v_3 = cursor;
-							do 
-							{
-								// (, line 59
-								if (!(out_grouping(g_v, 97, 249)))
-								{
-									goto lab4_brk;
-								}
-								// gopast, line 59
-								while (true)
-								{
-									do 
-									{
-										if (!(in_grouping(g_v, 97, 249)))
-										{
-											goto lab8_brk;
-										}
-										goto golab5_brk;
-									}
-									while (false);
+                        do 
+                        {
+                            v_3 = cursor;
+                            do 
+                            {
+                                // (, line 59
+                                if (!(out_grouping(g_v, 97, 249)))
+                                {
+                                    goto lab4_brk;
+                                }
+                                // gopast, line 59
+                                while (true)
+                                {
+                                    do 
+                                    {
+                                        if (!(in_grouping(g_v, 97, 249)))
+                                        {
+                                            goto lab8_brk;
+                                        }
+                                        goto golab5_brk;
+                                    }
+                                    while (false);
 
 lab8_brk: ;
-									
-									if (cursor >= limit)
-									{
-										goto lab4_brk;
-									}
-									cursor++;
-								}
+                                    
+                                    if (cursor >= limit)
+                                    {
+                                        goto lab4_brk;
+                                    }
+                                    cursor++;
+                                }
 
 golab5_brk: ;
-								
-								goto lab4_brk;
-							}
-							while (false);
+                                
+                                goto lab4_brk;
+                            }
+                            while (false);
 
 lab4_brk: ;
-							
-							cursor = v_3;
-							// (, line 59
-							if (!(in_grouping(g_v, 97, 249)))
-							{
-								goto lab2_brk;
-							}
-							// gopast, line 59
-							while (true)
-							{
-								do 
-								{
-									if (!(out_grouping(g_v, 97, 249)))
-									{
-										goto lab8_brk;
-									}
-									goto golab7_brk;
-								}
-								while (false);
+                            
+                            cursor = v_3;
+                            // (, line 59
+                            if (!(in_grouping(g_v, 97, 249)))
+                            {
+                                goto lab2_brk;
+                            }
+                            // gopast, line 59
+                            while (true)
+                            {
+                                do 
+                                {
+                                    if (!(out_grouping(g_v, 97, 249)))
+                                    {
+                                        goto lab8_brk;
+                                    }
+                                    goto golab7_brk;
+                                }
+                                while (false);
 
 lab8_brk: ;
-								
-								if (cursor >= limit)
-								{
-									goto lab2_brk;
-								}
-								cursor++;
-							}
+                                
+                                if (cursor >= limit)
+                                {
+                                    goto lab2_brk;
+                                }
+                                cursor++;
+                            }
 
 golab7_brk: ;
-							
-						}
-						while (false);
-						goto lab2_brk;
-					}
-					while (false);
+                            
+                        }
+                        while (false);
+                        goto lab2_brk;
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					cursor = v_2;
-					// (, line 61
-					if (!(out_grouping(g_v, 97, 249)))
-					{
-						goto lab0_brk;
-					}
-					// or, line 61
-					do 
-					{
-						v_6 = cursor;
-						do 
-						{
-							// (, line 61
-							if (!(out_grouping(g_v, 97, 249)))
-							{
-								goto lab10_brk;
-							}
-							// gopast, line 61
-							while (true)
-							{
-								do 
-								{
-									if (!(in_grouping(g_v, 97, 249)))
-									{
-										goto lab12_brk;
-									}
-									goto golab11_brk;
-								}
-								while (false);
+                    
+                    cursor = v_2;
+                    // (, line 61
+                    if (!(out_grouping(g_v, 97, 249)))
+                    {
+                        goto lab0_brk;
+                    }
+                    // or, line 61
+                    do 
+                    {
+                        v_6 = cursor;
+                        do 
+                        {
+                            // (, line 61
+                            if (!(out_grouping(g_v, 97, 249)))
+                            {
+                                goto lab10_brk;
+                            }
+                            // gopast, line 61
+                            while (true)
+                            {
+                                do 
+                                {
+                                    if (!(in_grouping(g_v, 97, 249)))
+                                    {
+                                        goto lab12_brk;
+                                    }
+                                    goto golab11_brk;
+                                }
+                                while (false);
 
 lab12_brk: ;
-								
-								if (cursor >= limit)
-								{
-									goto lab10_brk;
-								}
-								cursor++;
-							}
+                                
+                                if (cursor >= limit)
+                                {
+                                    goto lab10_brk;
+                                }
+                                cursor++;
+                            }
 
 golab11_brk: ;
-							
-							goto lab9_brk;
-						}
-						while (false);
+                            
+                            goto lab9_brk;
+                        }
+                        while (false);
 
 lab10_brk: ;
-						
-						cursor = v_6;
-						// (, line 61
-						if (!(in_grouping(g_v, 97, 249)))
-						{
-							goto lab0_brk;
-						}
-						// next, line 61
-						if (cursor >= limit)
-						{
-							goto lab0_brk;
-						}
-						cursor++;
-					}
-					while (false);
+                        
+                        cursor = v_6;
+                        // (, line 61
+                        if (!(in_grouping(g_v, 97, 249)))
+                        {
+                            goto lab0_brk;
+                        }
+                        // next, line 61
+                        if (cursor >= limit)
+                        {
+                            goto lab0_brk;
+                        }
+                        cursor++;
+                    }
+                    while (false);
 
 lab9_brk: ;
-					
-				}
-				while (false);
-				// setmark pV, line 62
-				I_pV = cursor;
-			}
-			while (false);
+                    
+                }
+                while (false);
+                // setmark pV, line 62
+                I_pV = cursor;
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 64
-			v_8 = cursor;
-			do 
-			{
-				// (, line 64
-				// gopast, line 65
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 249)))
-						{
-							goto lab15_brk;
-						}
-						goto golab14_brk;
-					}
-					while (false);
+            
+            cursor = v_1;
+            // do, line 64
+            v_8 = cursor;
+            do 
+            {
+                // (, line 64
+                // gopast, line 65
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 249)))
+                        {
+                            goto lab15_brk;
+                        }
+                        goto golab14_brk;
+                    }
+                    while (false);
 
 lab15_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab14_brk: ;
-				
-				// gopast, line 65
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 249)))
-						{
-							goto lab17_brk;
-						}
-						goto golab16_brk;
-					}
-					while (false);
+                
+                // gopast, line 65
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 249)))
+                        {
+                            goto lab17_brk;
+                        }
+                        goto golab16_brk;
+                    }
+                    while (false);
 
 lab17_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab16_brk: ;
-				
-				// setmark p1, line 65
-				I_p1 = cursor;
-				// gopast, line 66
-				while (true)
-				{
-					do 
-					{
-						if (!(in_grouping(g_v, 97, 249)))
-						{
-							goto lab19_brk;
-						}
-						goto golab18_brk;
-					}
-					while (false);
+                
+                // setmark p1, line 65
+                I_p1 = cursor;
+                // gopast, line 66
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(in_grouping(g_v, 97, 249)))
+                        {
+                            goto lab19_brk;
+                        }
+                        goto golab18_brk;
+                    }
+                    while (false);
 
 lab19_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab18_brk: ;
-				
-				// gopast, line 66
-				while (true)
-				{
-					do 
-					{
-						if (!(out_grouping(g_v, 97, 249)))
-						{
-							goto lab21_brk;
-						}
-						goto golab20_brk;
-					}
-					while (false);
+                
+                // gopast, line 66
+                while (true)
+                {
+                    do 
+                    {
+                        if (!(out_grouping(g_v, 97, 249)))
+                        {
+                            goto lab21_brk;
+                        }
+                        goto golab20_brk;
+                    }
+                    while (false);
 
 lab21_brk: ;
-					
-					if (cursor >= limit)
-					{
-						goto lab13_brk;
-					}
-					cursor++;
-				}
+                    
+                    if (cursor >= limit)
+                    {
+                        goto lab13_brk;
+                    }
+                    cursor++;
+                }
 
 golab20_brk: ;
-				
-				// setmark p2, line 66
-				I_p2 = cursor;
-			}
-			while (false);
+                
+                // setmark p2, line 66
+                I_p2 = cursor;
+            }
+            while (false);
 
 lab13_brk: ;
-			
-			cursor = v_8;
-			return true;
-		}
-		
-		private bool r_postlude()
-		{
-			int among_var;
-			int v_1;
-			// repeat, line 70
-			while (true)
-			{
-				v_1 = cursor;
-				do 
-				{
-					// (, line 70
-					// [, line 72
-					bra = cursor;
-					// substring, line 72
-					among_var = find_among(a_1, 3);
-					if (among_var == 0)
-					{
-						goto lab11_brk;
-					}
-					// ], line 72
-					ket = cursor;
-					switch (among_var)
-					{
-						
-						case 0: 
-							goto lab11_brk;
-						
-						case 1: 
-							// (, line 73
-							// <-, line 73
-							slice_from("i");
-							break;
-						
-						case 2: 
-							// (, line 74
-							// <-, line 74
-							slice_from("u");
-							break;
-						
-						case 3: 
-							// (, line 75
-							// next, line 75
-							if (cursor >= limit)
-							{
-								goto lab11_brk;
-							}
-							cursor++;
-							break;
-						}
-					goto replab1;
-				}
-				while (false);
+            
+            cursor = v_8;
+            return true;
+        }
+        
+        private bool r_postlude()
+        {
+            int among_var;
+            int v_1;
+            // repeat, line 70
+            while (true)
+            {
+                v_1 = cursor;
+                do 
+                {
+                    // (, line 70
+                    // [, line 72
+                    bra = cursor;
+                    // substring, line 72
+                    among_var = find_among(a_1, 3);
+                    if (among_var == 0)
+                    {
+                        goto lab11_brk;
+                    }
+                    // ], line 72
+                    ket = cursor;
+                    switch (among_var)
+                    {
+                        
+                        case 0: 
+                            goto lab11_brk;
+                        
+                        case 1: 
+                            // (, line 73
+                            // <-, line 73
+                            slice_from("i");
+                            break;
+                        
+                        case 2: 
+                            // (, line 74
+                            // <-, line 74
+                            slice_from("u");
+                            break;
+                        
+                        case 3: 
+                            // (, line 75
+                            // next, line 75
+                            if (cursor >= limit)
+                            {
+                                goto lab11_brk;
+                            }
+                            cursor++;
+                            break;
+                        }
+                    goto replab1;
+                }
+                while (false);
 
 lab11_brk: ;
-				
-				cursor = v_1;
-				goto replab1_brk;
+                
+                cursor = v_1;
+                goto replab1_brk;
 
 replab1: ;
-			}
+            }
 
 replab1_brk: ;
-			
-			return true;
-		}
-		
-		private bool r_RV()
-		{
-			if (!(I_pV <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R1()
-		{
-			if (!(I_p1 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_R2()
-		{
-			if (!(I_p2 <= cursor))
-			{
-				return false;
-			}
-			return true;
-		}
-		
-		private bool r_attached_pronoun()
-		{
-			int among_var;
-			// (, line 86
-			// [, line 87
-			ket = cursor;
-			// substring, line 87
-			if (find_among_b(a_2, 37) == 0)
-			{
-				return false;
-			}
-			// ], line 87
-			bra = cursor;
-			// among, line 97
-			among_var = find_among_b(a_3, 5);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// (, line 97
-			// call RV, line 97
-			if (!r_RV())
-			{
-				return false;
-			}
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 98
-					// delete, line 98
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 99
-					// <-, line 99
-					slice_from("e");
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_standard_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			// (, line 103
-			// [, line 104
-			ket = cursor;
-			// substring, line 104
-			among_var = find_among_b(a_6, 49);
-			if (among_var == 0)
-			{
-				return false;
-			}
-			// ], line 104
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					return false;
-				
-				case 1: 
-					// (, line 110
-					// call R2, line 110
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 110
-					slice_del();
-					break;
-				
-				case 2: 
-					// (, line 112
-					// call R2, line 112
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 112
-					slice_del();
-					// try, line 113
-					v_1 = limit - cursor;
-					do 
-					{
-						// (, line 113
-						// [, line 113
-						ket = cursor;
-						// literal, line 113
-						if (!(eq_s_b(2, "ic")))
-						{
-							cursor = limit - v_1;
-							goto lab0_brk;
-						}
-						// ], line 113
-						bra = cursor;
-						// call R2, line 113
-						if (!r_R2())
-						{
-							cursor = limit - v_1;
-							goto lab0_brk;
-						}
-						// delete, line 113
-						slice_del();
-					}
-					while (false);
+            
+            return true;
+        }
+        
+        private bool r_RV()
+        {
+            if (!(I_pV <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R1()
+        {
+            if (!(I_p1 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_R2()
+        {
+            if (!(I_p2 <= cursor))
+            {
+                return false;
+            }
+            return true;
+        }
+        
+        private bool r_attached_pronoun()
+        {
+            int among_var;
+            // (, line 86
+            // [, line 87
+            ket = cursor;
+            // substring, line 87
+            if (find_among_b(a_2, 37) == 0)
+            {
+                return false;
+            }
+            // ], line 87
+            bra = cursor;
+            // among, line 97
+            among_var = find_among_b(a_3, 5);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // (, line 97
+            // call RV, line 97
+            if (!r_RV())
+            {
+                return false;
+            }
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 98
+                    // delete, line 98
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 99
+                    // <-, line 99
+                    slice_from("e");
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_standard_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            // (, line 103
+            // [, line 104
+            ket = cursor;
+            // substring, line 104
+            among_var = find_among_b(a_6, 49);
+            if (among_var == 0)
+            {
+                return false;
+            }
+            // ], line 104
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    return false;
+                
+                case 1: 
+                    // (, line 110
+                    // call R2, line 110
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 110
+                    slice_del();
+                    break;
+                
+                case 2: 
+                    // (, line 112
+                    // call R2, line 112
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 112
+                    slice_del();
+                    // try, line 113
+                    v_1 = limit - cursor;
+                    do 
+                    {
+                        // (, line 113
+                        // [, line 113
+                        ket = cursor;
+                        // literal, line 113
+                        if (!(eq_s_b(2, "ic")))
+                        {
+                            cursor = limit - v_1;
+                            goto lab0_brk;
+                        }
+                        // ], line 113
+                        bra = cursor;
+                        // call R2, line 113
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_1;
+                            goto lab0_brk;
+                        }
+                        // delete, line 113
+                        slice_del();
+                    }
+                    while (false);
 
 lab0_brk: ;
-					
-					break;
-				
-				case 3: 
-					// (, line 116
-					// call R2, line 116
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 116
-					slice_from("log");
-					break;
-				
-				case 4: 
-					// (, line 118
-					// call R2, line 118
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 118
-					slice_from("u");
-					break;
-				
-				case 5: 
-					// (, line 120
-					// call R2, line 120
-					if (!r_R2())
-					{
-						return false;
-					}
-					// <-, line 120
-					slice_from("ente");
-					break;
-				
-				case 6: 
-					// (, line 122
-					// call RV, line 122
-					if (!r_RV())
-					{
-						return false;
-					}
-					// delete, line 122
-					slice_del();
-					break;
-				
-				case 7: 
-					// (, line 123
-					// call R1, line 124
-					if (!r_R1())
-					{
-						return false;
-					}
-					// delete, line 124
-					slice_del();
-					// try, line 125
-					v_2 = limit - cursor;
-					do 
-					{
-						// (, line 125
-						// [, line 126
-						ket = cursor;
-						// substring, line 126
-						among_var = find_among_b(a_4, 4);
-						if (among_var == 0)
-						{
-							cursor = limit - v_2;
-							goto lab1_brk;
-						}
-						// ], line 126
-						bra = cursor;
-						// call R2, line 126
-						if (!r_R2())
-						{
-							cursor = limit - v_2;
-							goto lab1_brk;
-						}
-						// delete, line 126
-						slice_del();
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_2;
-								goto lab1_brk;
-							
-							case 1: 
-								// (, line 127
-								// [, line 127
-								ket = cursor;
-								// literal, line 127
-								if (!(eq_s_b(2, "at")))
-								{
-									cursor = limit - v_2;
-									goto lab1_brk;
-								}
-								// ], line 127
-								bra = cursor;
-								// call R2, line 127
-								if (!r_R2())
-								{
-									cursor = limit - v_2;
-									goto lab1_brk;
-								}
-								// delete, line 127
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                    
+                    break;
+                
+                case 3: 
+                    // (, line 116
+                    // call R2, line 116
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 116
+                    slice_from("log");
+                    break;
+                
+                case 4: 
+                    // (, line 118
+                    // call R2, line 118
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 118
+                    slice_from("u");
+                    break;
+                
+                case 5: 
+                    // (, line 120
+                    // call R2, line 120
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // <-, line 120
+                    slice_from("ente");
+                    break;
+                
+                case 6: 
+                    // (, line 122
+                    // call RV, line 122
+                    if (!r_RV())
+                    {
+                        return false;
+                    }
+                    // delete, line 122
+                    slice_del();
+                    break;
+                
+                case 7: 
+                    // (, line 123
+                    // call R1, line 124
+                    if (!r_R1())
+                    {
+                        return false;
+                    }
+                    // delete, line 124
+                    slice_del();
+                    // try, line 125
+                    v_2 = limit - cursor;
+                    do 
+                    {
+                        // (, line 125
+                        // [, line 126
+                        ket = cursor;
+                        // substring, line 126
+                        among_var = find_among_b(a_4, 4);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_2;
+                            goto lab1_brk;
+                        }
+                        // ], line 126
+                        bra = cursor;
+                        // call R2, line 126
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_2;
+                            goto lab1_brk;
+                        }
+                        // delete, line 126
+                        slice_del();
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_2;
+                                goto lab1_brk;
+                            
+                            case 1: 
+                                // (, line 127
+                                // [, line 127
+                                ket = cursor;
+                                // literal, line 127
+                                if (!(eq_s_b(2, "at")))
+                                {
+                                    cursor = limit - v_2;
+                                    goto lab1_brk;
+                                }
+                                // ], line 127
+                                bra = cursor;
+                                // call R2, line 127
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_2;
+                                    goto lab1_brk;
+                                }
+                                // delete, line 127
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab1_brk: ;
-					
-					break;
-				
-				case 8: 
-					// (, line 132
-					// call R2, line 133
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 133
-					slice_del();
-					// try, line 134
-					v_3 = limit - cursor;
-					do 
-					{
-						// (, line 134
-						// [, line 135
-						ket = cursor;
-						// substring, line 135
-						among_var = find_among_b(a_5, 3);
-						if (among_var == 0)
-						{
-							cursor = limit - v_3;
-							goto lab2_brk;
-						}
-						// ], line 135
-						bra = cursor;
-						switch (among_var)
-						{
-							
-							case 0: 
-								cursor = limit - v_3;
-								goto lab2_brk;
-							
-							case 1: 
-								// (, line 136
-								// call R2, line 136
-								if (!r_R2())
-								{
-									cursor = limit - v_3;
-									goto lab2_brk;
-								}
-								// delete, line 136
-								slice_del();
-								break;
-							}
-					}
-					while (false);
+                    
+                    break;
+                
+                case 8: 
+                    // (, line 132
+                    // call R2, line 133
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 133
+                    slice_del();
+                    // try, line 134
+                    v_3 = limit - cursor;
+                    do 
+                    {
+                        // (, line 134
+                        // [, line 135
+                        ket = cursor;
+                        // substring, line 135
+                        among_var = find_among_b(a_5, 3);
+                        if (among_var == 0)
+                        {
+                            cursor = limit - v_3;
+                            goto lab2_brk;
+                        }
+                        // ], line 135
+                        bra = cursor;
+                        switch (among_var)
+                        {
+                            
+                            case 0: 
+                                cursor = limit - v_3;
+                                goto lab2_brk;
+                            
+                            case 1: 
+                                // (, line 136
+                                // call R2, line 136
+                                if (!r_R2())
+                                {
+                                    cursor = limit - v_3;
+                                    goto lab2_brk;
+                                }
+                                // delete, line 136
+                                slice_del();
+                                break;
+                            }
+                    }
+                    while (false);
 
 lab2_brk: ;
-					
-					break;
-				
-				case 9: 
-					// (, line 140
-					// call R2, line 141
-					if (!r_R2())
-					{
-						return false;
-					}
-					// delete, line 141
-					slice_del();
-					// try, line 142
-					v_4 = limit - cursor;
-					do 
-					{
-						// (, line 142
-						// [, line 142
-						ket = cursor;
-						// literal, line 142
-						if (!(eq_s_b(2, "at")))
-						{
-							cursor = limit - v_4;
-							goto lab3_brk;
-						}
-						// ], line 142
-						bra = cursor;
-						// call R2, line 142
-						if (!r_R2())
-						{
-							cursor = limit - v_4;
-							goto lab3_brk;
-						}
-						// delete, line 142
-						slice_del();
-						// [, line 142
-						ket = cursor;
-						// literal, line 142
-						if (!(eq_s_b(2, "ic")))
-						{
-							cursor = limit - v_4;
-							goto lab3_brk;
-						}
-						// ], line 142
-						bra = cursor;
-						// call R2, line 142
-						if (!r_R2())
-						{
-							cursor = limit - v_4;
-							goto lab3_brk;
-						}
-						// delete, line 142
-						slice_del();
-					}
-					while (false);
+                    
+                    break;
+                
+                case 9: 
+                    // (, line 140
+                    // call R2, line 141
+                    if (!r_R2())
+                    {
+                        return false;
+                    }
+                    // delete, line 141
+                    slice_del();
+                    // try, line 142
+                    v_4 = limit - cursor;
+                    do 
+                    {
+                        // (, line 142
+                        // [, line 142
+                        ket = cursor;
+                        // literal, line 142
+                        if (!(eq_s_b(2, "at")))
+                        {
+                            cursor = limit - v_4;
+                            goto lab3_brk;
+                        }
+                        // ], line 142
+                        bra = cursor;
+                        // call R2, line 142
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_4;
+                            goto lab3_brk;
+                        }
+                        // delete, line 142
+                        slice_del();
+                        // [, line 142
+                        ket = cursor;
+                        // literal, line 142
+                        if (!(eq_s_b(2, "ic")))
+                        {
+                            cursor = limit - v_4;
+                            goto lab3_brk;
+                        }
+                        // ], line 142
+                        bra = cursor;
+                        // call R2, line 142
+                        if (!r_R2())
+                        {
+                            cursor = limit - v_4;
+                            goto lab3_brk;
+                        }
+                        // delete, line 142
+                        slice_del();
+                    }
+                    while (false);
 
 lab3_brk: ;
-					
-					break;
-				}
-			return true;
-		}
-		
-		private bool r_verb_suffix()
-		{
-			int among_var;
-			int v_1;
-			int v_2;
-			// setlimit, line 147
-			v_1 = limit - cursor;
-			// tomark, line 147
-			if (cursor < I_pV)
-			{
-				return false;
-			}
-			cursor = I_pV;
-			v_2 = limit_backward;
-			limit_backward = cursor;
-			cursor = limit - v_1;
-			// (, line 147
-			// [, line 148
-			ket = cursor;
-			// substring, line 148
-			among_var = find_among_b(a_7, 87);
-			if (among_var == 0)
-			{
-				limit_backward = v_2;
-				return false;
-			}
-			// ], line 148
-			bra = cursor;
-			switch (among_var)
-			{
-				
-				case 0: 
-					limit_backward = v_2;
-					return false;
-				
-				case 1: 
-					// (, line 162
-					// delete, line 162
-					slice_del();
-					break;
-				}
-			limit_backward = v_2;
-			return true;
-		}
-		
-		private bool r_vowel_suffix()
-		{
-			int v_1;
-			int v_2;
-			// (, line 169
-			// try, line 170
-			v_1 = limit - cursor;
-			do 
-			{
-				// (, line 170
-				// [, line 171
-				ket = cursor;
-				if (!(in_grouping_b(g_AEIO, 97, 242)))
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// ], line 171
-				bra = cursor;
-				// call RV, line 171
-				if (!r_RV())
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// delete, line 171
-				slice_del();
-				// [, line 172
-				ket = cursor;
-				// literal, line 172
-				if (!(eq_s_b(1, "i")))
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// ], line 172
-				bra = cursor;
-				// call RV, line 172
-				if (!r_RV())
-				{
-					cursor = limit - v_1;
-					goto lab0_brk;
-				}
-				// delete, line 172
-				slice_del();
-			}
-			while (false);
+                    
+                    break;
+                }
+            return true;
+        }
+        
+        private bool r_verb_suffix()
+        {
+            int among_var;
+            int v_1;
+            int v_2;
+            // setlimit, line 147
+            v_1 = limit - cursor;
+            // tomark, line 147
+            if (cursor < I_pV)
+            {
+                return false;
+            }
+            cursor = I_pV;
+            v_2 = limit_backward;
+            limit_backward = cursor;
+            cursor = limit - v_1;
+            // (, line 147
+            // [, line 148
+            ket = cursor;
+            // substring, line 148
+            among_var = find_among_b(a_7, 87);
+            if (among_var == 0)
+            {
+                limit_backward = v_2;
+                return false;
+            }
+            // ], line 148
+            bra = cursor;
+            switch (among_var)
+            {
+                
+                case 0: 
+                    limit_backward = v_2;
+                    return false;
+                
+                case 1: 
+                    // (, line 162
+                    // delete, line 162
+                    slice_del();
+                    break;
+                }
+            limit_backward = v_2;
+            return true;
+        }
+        
+        private bool r_vowel_suffix()
+        {
+            int v_1;
+            int v_2;
+            // (, line 169
+            // try, line 170
+            v_1 = limit - cursor;
+            do 
+            {
+                // (, line 170
+                // [, line 171
+                ket = cursor;
+                if (!(in_grouping_b(g_AEIO, 97, 242)))
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // ], line 171
+                bra = cursor;
+                // call RV, line 171
+                if (!r_RV())
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // delete, line 171
+                slice_del();
+                // [, line 172
+                ket = cursor;
+                // literal, line 172
+                if (!(eq_s_b(1, "i")))
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // ], line 172
+                bra = cursor;
+                // call RV, line 172
+                if (!r_RV())
+                {
+                    cursor = limit - v_1;
+                    goto lab0_brk;
+                }
+                // delete, line 172
+                slice_del();
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			// try, line 174
-			v_2 = limit - cursor;
-			do 
-			{
-				// (, line 174
-				// [, line 175
-				ket = cursor;
-				// literal, line 175
-				if (!(eq_s_b(1, "h")))
-				{
-					cursor = limit - v_2;
-					goto lab1_brk;
-				}
-				// ], line 175
-				bra = cursor;
-				if (!(in_grouping_b(g_CG, 99, 103)))
-				{
-					cursor = limit - v_2;
-					goto lab1_brk;
-				}
-				// call RV, line 175
-				if (!r_RV())
-				{
-					cursor = limit - v_2;
-					goto lab1_brk;
-				}
-				// delete, line 175
-				slice_del();
-			}
-			while (false);
+            
+            // try, line 174
+            v_2 = limit - cursor;
+            do 
+            {
+                // (, line 174
+                // [, line 175
+                ket = cursor;
+                // literal, line 175
+                if (!(eq_s_b(1, "h")))
+                {
+                    cursor = limit - v_2;
+                    goto lab1_brk;
+                }
+                // ], line 175
+                bra = cursor;
+                if (!(in_grouping_b(g_CG, 99, 103)))
+                {
+                    cursor = limit - v_2;
+                    goto lab1_brk;
+                }
+                // call RV, line 175
+                if (!r_RV())
+                {
+                    cursor = limit - v_2;
+                    goto lab1_brk;
+                }
+                // delete, line 175
+                slice_del();
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			return true;
-		}
-		
-		public override bool Stem()
-		{
-			int v_1;
-			int v_2;
-			int v_3;
-			int v_4;
-			int v_5;
-			int v_6;
-			int v_7;
-			// (, line 180
-			// do, line 181
-			v_1 = cursor;
-			do 
-			{
-				// call prelude, line 181
-				if (!r_prelude())
-				{
-					goto lab0_brk;
-				}
-			}
-			while (false);
+            
+            return true;
+        }
+        
+        public override bool Stem()
+        {
+            int v_1;
+            int v_2;
+            int v_3;
+            int v_4;
+            int v_5;
+            int v_6;
+            int v_7;
+            // (, line 180
+            // do, line 181
+            v_1 = cursor;
+            do 
+            {
+                // call prelude, line 181
+                if (!r_prelude())
+                {
+                    goto lab0_brk;
+                }
+            }
+            while (false);
 
 lab0_brk: ;
-			
-			cursor = v_1;
-			// do, line 182
-			v_2 = cursor;
-			do 
-			{
-				// call mark_regions, line 182
-				if (!r_mark_regions())
-				{
-					goto lab1_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_1;
+            // do, line 182
+            v_2 = cursor;
+            do 
+            {
+                // call mark_regions, line 182
+                if (!r_mark_regions())
+                {
+                    goto lab1_brk;
+                }
+            }
+            while (false);
 
 lab1_brk: ;
-			
-			cursor = v_2;
-			// backwards, line 183
-			limit_backward = cursor; cursor = limit;
-			// (, line 183
-			// do, line 184
-			v_3 = limit - cursor;
-			do 
-			{
-				// call attached_pronoun, line 184
-				if (!r_attached_pronoun())
-				{
-					goto lab2_brk;
-				}
-			}
-			while (false);
+            
+            cursor = v_2;
+            // backwards, line 183
+            limit_backward = cursor; cursor = limit;
+            // (, line 183
+            // do, line 184
+            v_3 = limit - cursor;
+            do 
+            {
+                // call attached_pronoun, line 184
+                if (!r_attached_pronoun())
+                {
+                    goto lab2_brk;
+                }
+            }
+            while (false);
 
 lab2_brk: ;
-			
-			cursor = limit - v_3;
-			// do, line 185
-			v_4 = limit - cursor;
-			do 
-			{
-				// (, line 185
-				// or, line 185
-				do 
-				{
-					v_5 = limit - cursor;
-					do 
-					{
-						// call standard_suffix, line 185
-						if (!r_standard_suffix())
-						{
-							goto lab5_brk;
-						}
-						goto lab4_brk;
-					}
-					while (false);
+            
+            cursor = limit - v_3;
+            // do, line 185
+            v_4 = limit - cursor;
+            do 
+            {
+                // (, line 185
+                // or, line 185
+                do 
+                {
+                    v_5 = limit - cursor;
+                    do 
+                    {
+                        // call standard_suffix, line 185
+                        if (!r_standard_suffix())
+                        {
+                            goto lab5_brk;
+                        }
+                        goto lab4_brk;
+                    }
+                    while (false);
 
 lab5_brk: ;
-					
-					cursor = limit - v_5;
-					// call verb_suffix, line 185
-					if (!r_verb_suffix())
-					{
-						goto lab3_brk;
-					}
-				}
-				while (false);
+                    
+                    cursor = limit - v_5;
+                    // call verb_suffix, line 185
+                    if (!r_verb_suffix())
+                    {
+                        goto lab3_brk;
+                    }
+                }
+                while (false);
 
 lab4_brk: ;
-				
-			}
-			while (false);
+                
+            }
+            while (false);
 
 lab3_brk: ;
 
-			cursor = limit - v_4;
-			// do, line 186
-			v_6 = limit - cursor;
-			do 
-			{
-				// call vowel_suffix, line 186
-				if (!r_vowel_suffix())
-				{
-					goto lab6_brk;
-				}
-			}
-			while (false);
+            cursor = limit - v_4;
+            // do, line 186
+            v_6 = limit - cursor;
+            do 
+            {
+                // call vowel_suffix, line 186
+                if (!r_vowel_suffix())
+                {
+                    goto lab6_brk;
+                }
+            }
+            while (false);
 
 lab6_brk: ;
-			
-			cursor = limit - v_6;
-			cursor = limit_backward; // do, line 188
-			v_7 = cursor;
-			do 
-			{
-				// call postlude, line 188
-				if (!r_postlude())
-				{
-					goto lab7_brk;
-				}
-			}
-			while (false);
+            
+            cursor = limit - v_6;
+            cursor = limit_backward; // do, line 188
+            v_7 = cursor;
+            do 
+            {
+                // call postlude, line 188
+                if (!r_postlude())
+                {
+                    goto lab7_brk;
+                }
+            }
+            while (false);
 
 lab7_brk: ;
-			
-			cursor = v_7;
-			return true;
-		}
-	}
+            
+            cursor = v_7;
+            return true;
+        }
+    }
 }


[12/51] [partial] Mass convert mixed tabs to spaces

Posted by cc...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsDocsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsDocsWriter.cs b/src/core/Index/FormatPostingsDocsWriter.cs
index 82a7398..a0d2d5f 100644
--- a/src/core/Index/FormatPostingsDocsWriter.cs
+++ b/src/core/Index/FormatPostingsDocsWriter.cs
@@ -24,105 +24,105 @@ namespace Lucene.Net.Index
     /// <summary>Consumes doc and freq, writing them using the current
     /// index file format 
     /// </summary>
-	sealed class FormatPostingsDocsWriter : FormatPostingsDocsConsumer, IDisposable
-	{
-		
-		internal IndexOutput out_Renamed;
-		internal FormatPostingsTermsWriter parent;
-		internal FormatPostingsPositionsWriter posWriter;
-		internal DefaultSkipListWriter skipListWriter;
-		internal int skipInterval;
-		internal int totalNumDocs;
-		
-		internal bool omitTermFreqAndPositions;
-		internal bool storePayloads;
-		internal long freqStart;
-		internal FieldInfo fieldInfo;
-		
-		internal FormatPostingsDocsWriter(SegmentWriteState state, FormatPostingsTermsWriter parent):base()
-		{
-			this.parent = parent;
-			System.String fileName = IndexFileNames.SegmentFileName(parent.parent.segment, IndexFileNames.FREQ_EXTENSION);
-			state.flushedFiles.Add(fileName);
-			out_Renamed = parent.parent.dir.CreateOutput(fileName);
-			totalNumDocs = parent.parent.totalNumDocs;
-			
-			// TODO: abstraction violation
-			skipInterval = parent.parent.termsOut.skipInterval;
-			skipListWriter = parent.parent.skipListWriter;
-			skipListWriter.SetFreqOutput(out_Renamed);
-			
-			posWriter = new FormatPostingsPositionsWriter(state, this);
-		}
-		
-		internal void  SetField(FieldInfo fieldInfo)
-		{
-			this.fieldInfo = fieldInfo;
-			omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
-			storePayloads = fieldInfo.storePayloads;
-			posWriter.SetField(fieldInfo);
-		}
-		
-		internal int lastDocID;
-		internal int df;
-		
-		/// <summary>Adds a new doc in this term.  If this returns null
-		/// then we just skip consuming positions/payloads. 
-		/// </summary>
-		internal override FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq)
-		{
-			
-			int delta = docID - lastDocID;
-			
-			if (docID < 0 || (df > 0 && delta <= 0))
-				throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )");
-			
-			if ((++df % skipInterval) == 0)
-			{
-				// TODO: abstraction violation
-				skipListWriter.SetSkipData(lastDocID, storePayloads, posWriter.lastPayloadLength);
-				skipListWriter.BufferSkip(df);
-			}
-			
-			System.Diagnostics.Debug.Assert(docID < totalNumDocs, "docID=" + docID + " totalNumDocs=" + totalNumDocs);
-			
-			lastDocID = docID;
-			if (omitTermFreqAndPositions)
-				out_Renamed.WriteVInt(delta);
-			else if (1 == termDocFreq)
-				out_Renamed.WriteVInt((delta << 1) | 1);
-			else
-			{
-				out_Renamed.WriteVInt(delta << 1);
-				out_Renamed.WriteVInt(termDocFreq);
-			}
-			
-			return posWriter;
-		}
-		
-		private TermInfo termInfo = new TermInfo(); // minimize consing
-		internal UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
-		
-		/// <summary>Called when we are done adding docs to this term </summary>
-		internal override void  Finish()
-		{
-			long skipPointer = skipListWriter.WriteSkip(out_Renamed);
-			
-			// TODO: this is abstraction violation -- we should not
-			// peek up into parents terms encoding format
-			termInfo.Set(df, parent.freqStart, parent.proxStart, (int) (skipPointer - parent.freqStart));
-			
-			// TODO: we could do this incrementally
-			UnicodeUtil.UTF16toUTF8(parent.currentTerm, parent.currentTermStart, utf8);
-			
-			if (df > 0)
-			{
-				parent.termsOut.Add(fieldInfo.number, utf8.result, utf8.length, termInfo);
-			}
-			
-			lastDocID = 0;
-			df = 0;
-		}
+    sealed class FormatPostingsDocsWriter : FormatPostingsDocsConsumer, IDisposable
+    {
+        
+        internal IndexOutput out_Renamed;
+        internal FormatPostingsTermsWriter parent;
+        internal FormatPostingsPositionsWriter posWriter;
+        internal DefaultSkipListWriter skipListWriter;
+        internal int skipInterval;
+        internal int totalNumDocs;
+        
+        internal bool omitTermFreqAndPositions;
+        internal bool storePayloads;
+        internal long freqStart;
+        internal FieldInfo fieldInfo;
+        
+        internal FormatPostingsDocsWriter(SegmentWriteState state, FormatPostingsTermsWriter parent):base()
+        {
+            this.parent = parent;
+            System.String fileName = IndexFileNames.SegmentFileName(parent.parent.segment, IndexFileNames.FREQ_EXTENSION);
+            state.flushedFiles.Add(fileName);
+            out_Renamed = parent.parent.dir.CreateOutput(fileName);
+            totalNumDocs = parent.parent.totalNumDocs;
+            
+            // TODO: abstraction violation
+            skipInterval = parent.parent.termsOut.skipInterval;
+            skipListWriter = parent.parent.skipListWriter;
+            skipListWriter.SetFreqOutput(out_Renamed);
+            
+            posWriter = new FormatPostingsPositionsWriter(state, this);
+        }
+        
+        internal void  SetField(FieldInfo fieldInfo)
+        {
+            this.fieldInfo = fieldInfo;
+            omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+            storePayloads = fieldInfo.storePayloads;
+            posWriter.SetField(fieldInfo);
+        }
+        
+        internal int lastDocID;
+        internal int df;
+        
+        /// <summary>Adds a new doc in this term.  If this returns null
+        /// then we just skip consuming positions/payloads. 
+        /// </summary>
+        internal override FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq)
+        {
+            
+            int delta = docID - lastDocID;
+            
+            if (docID < 0 || (df > 0 && delta <= 0))
+                throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )");
+            
+            if ((++df % skipInterval) == 0)
+            {
+                // TODO: abstraction violation
+                skipListWriter.SetSkipData(lastDocID, storePayloads, posWriter.lastPayloadLength);
+                skipListWriter.BufferSkip(df);
+            }
+            
+            System.Diagnostics.Debug.Assert(docID < totalNumDocs, "docID=" + docID + " totalNumDocs=" + totalNumDocs);
+            
+            lastDocID = docID;
+            if (omitTermFreqAndPositions)
+                out_Renamed.WriteVInt(delta);
+            else if (1 == termDocFreq)
+                out_Renamed.WriteVInt((delta << 1) | 1);
+            else
+            {
+                out_Renamed.WriteVInt(delta << 1);
+                out_Renamed.WriteVInt(termDocFreq);
+            }
+            
+            return posWriter;
+        }
+        
+        private TermInfo termInfo = new TermInfo(); // minimize consing
+        internal UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+        
+        /// <summary>Called when we are done adding docs to this term </summary>
+        internal override void  Finish()
+        {
+            long skipPointer = skipListWriter.WriteSkip(out_Renamed);
+            
+            // TODO: this is abstraction violation -- we should not
+            // peek up into parents terms encoding format
+            termInfo.Set(df, parent.freqStart, parent.proxStart, (int) (skipPointer - parent.freqStart));
+            
+            // TODO: we could do this incrementally
+            UnicodeUtil.UTF16toUTF8(parent.currentTerm, parent.currentTermStart, utf8);
+            
+            if (df > 0)
+            {
+                parent.termsOut.Add(fieldInfo.number, utf8.result, utf8.length, termInfo);
+            }
+            
+            lastDocID = 0;
+            df = 0;
+        }
 
         public void Dispose()
         {
@@ -130,5 +130,5 @@ namespace Lucene.Net.Index
             out_Renamed.Dispose();
             posWriter.Dispose();
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsFieldsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsFieldsConsumer.cs b/src/core/Index/FormatPostingsFieldsConsumer.cs
index a3f86ec..1808a33 100644
--- a/src/core/Index/FormatPostingsFieldsConsumer.cs
+++ b/src/core/Index/FormatPostingsFieldsConsumer.cs
@@ -19,21 +19,21 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>Abstract API that consumes terms, doc, freq, prox and
-	/// payloads postings.  Concrete implementations of this
-	/// actually do "something" with the postings (write it into
-	/// the index in a specific format).
-	/// 
-	/// NOTE: this API is experimental and will likely change
-	/// </summary>
-	abstract class FormatPostingsFieldsConsumer
-	{
-		
-		/// <summary>Add a new field </summary>
-		internal abstract FormatPostingsTermsConsumer AddField(FieldInfo field);
-		
-		/// <summary>Called when we are done adding everything. </summary>
-		internal abstract void  Finish();
-	}
+    
+    /// <summary>Abstract API that consumes terms, doc, freq, prox and
+    /// payloads postings.  Concrete implementations of this
+    /// actually do "something" with the postings (write it into
+    /// the index in a specific format).
+    /// 
+    /// NOTE: this API is experimental and will likely change
+    /// </summary>
+    abstract class FormatPostingsFieldsConsumer
+    {
+        
+        /// <summary>Add a new field </summary>
+        internal abstract FormatPostingsTermsConsumer AddField(FieldInfo field);
+        
+        /// <summary>Called when we are done adding everything. </summary>
+        internal abstract void  Finish();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsFieldsWriter.cs b/src/core/Index/FormatPostingsFieldsWriter.cs
index 40ef619..577cacf 100644
--- a/src/core/Index/FormatPostingsFieldsWriter.cs
+++ b/src/core/Index/FormatPostingsFieldsWriter.cs
@@ -21,51 +21,51 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class FormatPostingsFieldsWriter:FormatPostingsFieldsConsumer
-	{
-		
-		internal Directory dir;
-		internal System.String segment;
-		internal TermInfosWriter termsOut;
-		internal FieldInfos fieldInfos;
-		internal FormatPostingsTermsWriter termsWriter;
-		internal DefaultSkipListWriter skipListWriter;
-		internal int totalNumDocs;
-		
-		public FormatPostingsFieldsWriter(SegmentWriteState state, FieldInfos fieldInfos):base()
-		{
-			
-			dir = state.directory;
-			segment = state.segmentName;
-			totalNumDocs = state.numDocs;
-			this.fieldInfos = fieldInfos;
-			termsOut = new TermInfosWriter(dir, segment, fieldInfos, state.termIndexInterval);
-			
-			// TODO: this is a nasty abstraction violation (that we
-			// peek down to find freqOut/proxOut) -- we need a
-			// better abstraction here whereby these child consumers
-			// can provide skip data or not
-			skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval, termsOut.maxSkipLevels, totalNumDocs, null, null);
-			
-			state.flushedFiles.Add(state.SegmentFileName(IndexFileNames.TERMS_EXTENSION));
-			state.flushedFiles.Add(state.SegmentFileName(IndexFileNames.TERMS_INDEX_EXTENSION));
-			
-			termsWriter = new FormatPostingsTermsWriter(state, this);
-		}
-		
-		/// <summary>Add a new field </summary>
-		internal override FormatPostingsTermsConsumer AddField(FieldInfo field)
-		{
-			termsWriter.SetField(field);
-			return termsWriter;
-		}
-		
-		/// <summary>Called when we are done adding everything. </summary>
-		internal override void  Finish()
-		{
-			termsOut.Dispose();
-			termsWriter.Dispose();
-		}
-	}
+    
+    sealed class FormatPostingsFieldsWriter:FormatPostingsFieldsConsumer
+    {
+        
+        internal Directory dir;
+        internal System.String segment;
+        internal TermInfosWriter termsOut;
+        internal FieldInfos fieldInfos;
+        internal FormatPostingsTermsWriter termsWriter;
+        internal DefaultSkipListWriter skipListWriter;
+        internal int totalNumDocs;
+        
+        public FormatPostingsFieldsWriter(SegmentWriteState state, FieldInfos fieldInfos):base()
+        {
+            
+            dir = state.directory;
+            segment = state.segmentName;
+            totalNumDocs = state.numDocs;
+            this.fieldInfos = fieldInfos;
+            termsOut = new TermInfosWriter(dir, segment, fieldInfos, state.termIndexInterval);
+            
+            // TODO: this is a nasty abstraction violation (that we
+            // peek down to find freqOut/proxOut) -- we need a
+            // better abstraction here whereby these child consumers
+            // can provide skip data or not
+            skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval, termsOut.maxSkipLevels, totalNumDocs, null, null);
+            
+            state.flushedFiles.Add(state.SegmentFileName(IndexFileNames.TERMS_EXTENSION));
+            state.flushedFiles.Add(state.SegmentFileName(IndexFileNames.TERMS_INDEX_EXTENSION));
+            
+            termsWriter = new FormatPostingsTermsWriter(state, this);
+        }
+        
+        /// <summary>Add a new field </summary>
+        internal override FormatPostingsTermsConsumer AddField(FieldInfo field)
+        {
+            termsWriter.SetField(field);
+            return termsWriter;
+        }
+        
+        /// <summary>Called when we are done adding everything. </summary>
+        internal override void  Finish()
+        {
+            termsOut.Dispose();
+            termsWriter.Dispose();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsPositionsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsPositionsConsumer.cs b/src/core/Index/FormatPostingsPositionsConsumer.cs
index f5bc440..2821ef4 100644
--- a/src/core/Index/FormatPostingsPositionsConsumer.cs
+++ b/src/core/Index/FormatPostingsPositionsConsumer.cs
@@ -17,16 +17,16 @@
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class FormatPostingsPositionsConsumer
-	{
-		
-		/// <summary>Add a new position &amp; payload.  If payloadLength > 0
-		/// you must read those bytes from the IndexInput. 
-		/// </summary>
-		internal abstract void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength);
-		
-		/// <summary>Called when we are done adding positions &amp; payloads </summary>
-		internal abstract void  Finish();
-	}
+    
+    abstract class FormatPostingsPositionsConsumer
+    {
+        
+        /// <summary>Add a new position &amp; payload.  If payloadLength > 0
+        /// you must read those bytes from the IndexInput. 
+        /// </summary>
+        internal abstract void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength);
+        
+        /// <summary>Called when we are done adding positions &amp; payloads </summary>
+        internal abstract void  Finish();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsPositionsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsPositionsWriter.cs b/src/core/Index/FormatPostingsPositionsWriter.cs
index 8b70fcc..fc6e1e2 100644
--- a/src/core/Index/FormatPostingsPositionsWriter.cs
+++ b/src/core/Index/FormatPostingsPositionsWriter.cs
@@ -22,80 +22,80 @@ using IndexOutput = Lucene.Net.Store.IndexOutput;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class FormatPostingsPositionsWriter:FormatPostingsPositionsConsumer
-	{
-		internal FormatPostingsDocsWriter parent;
-		internal IndexOutput out_Renamed;
-		
-		internal bool omitTermFreqAndPositions;
-		internal bool storePayloads;
-		internal int lastPayloadLength = - 1;
-		
-		internal FormatPostingsPositionsWriter(SegmentWriteState state, FormatPostingsDocsWriter parent)
-		{
-			this.parent = parent;
-			omitTermFreqAndPositions = parent.omitTermFreqAndPositions;
-			if (parent.parent.parent.fieldInfos.HasProx())
-			{
-				// At least one field does not omit TF, so create the
-				// prox file
-				System.String fileName = IndexFileNames.SegmentFileName(parent.parent.parent.segment, IndexFileNames.PROX_EXTENSION);
-				state.flushedFiles.Add(fileName);
-				out_Renamed = parent.parent.parent.dir.CreateOutput(fileName);
-				parent.skipListWriter.SetProxOutput(out_Renamed);
-			}
-			// Every field omits TF so we will write no prox file
-			else
-				out_Renamed = null;
-		}
-		
-		internal int lastPosition;
-		
-		/// <summary>Add a new position &amp; payload </summary>
-		internal override void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength)
-		{
-			System.Diagnostics.Debug.Assert(!omitTermFreqAndPositions, "omitTermFreqAndPositions is true");
-			System.Diagnostics.Debug.Assert(out_Renamed != null);
-			
-			int delta = position - lastPosition;
-			lastPosition = position;
-			
-			if (storePayloads)
-			{
-				if (payloadLength != lastPayloadLength)
-				{
-					lastPayloadLength = payloadLength;
-					out_Renamed.WriteVInt((delta << 1) | 1);
-					out_Renamed.WriteVInt(payloadLength);
-				}
-				else
-					out_Renamed.WriteVInt(delta << 1);
-				if (payloadLength > 0)
-					out_Renamed.WriteBytes(payload, payloadLength);
-			}
-			else
-				out_Renamed.WriteVInt(delta);
-		}
-		
-		internal void  SetField(FieldInfo fieldInfo)
-		{
-			omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
-			storePayloads = omitTermFreqAndPositions?false:fieldInfo.storePayloads;
-		}
-		
-		/// <summary>Called when we are done adding positions &amp; payloads </summary>
-		internal override void  Finish()
-		{
-			lastPosition = 0;
-			lastPayloadLength = - 1;
-		}
-		
+    
+    sealed class FormatPostingsPositionsWriter:FormatPostingsPositionsConsumer
+    {
+        internal FormatPostingsDocsWriter parent;
+        internal IndexOutput out_Renamed;
+        
+        internal bool omitTermFreqAndPositions;
+        internal bool storePayloads;
+        internal int lastPayloadLength = - 1;
+        
+        internal FormatPostingsPositionsWriter(SegmentWriteState state, FormatPostingsDocsWriter parent)
+        {
+            this.parent = parent;
+            omitTermFreqAndPositions = parent.omitTermFreqAndPositions;
+            if (parent.parent.parent.fieldInfos.HasProx())
+            {
+                // At least one field does not omit TF, so create the
+                // prox file
+                System.String fileName = IndexFileNames.SegmentFileName(parent.parent.parent.segment, IndexFileNames.PROX_EXTENSION);
+                state.flushedFiles.Add(fileName);
+                out_Renamed = parent.parent.parent.dir.CreateOutput(fileName);
+                parent.skipListWriter.SetProxOutput(out_Renamed);
+            }
+            // Every field omits TF so we will write no prox file
+            else
+                out_Renamed = null;
+        }
+        
+        internal int lastPosition;
+        
+        /// <summary>Add a new position &amp; payload </summary>
+        internal override void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength)
+        {
+            System.Diagnostics.Debug.Assert(!omitTermFreqAndPositions, "omitTermFreqAndPositions is true");
+            System.Diagnostics.Debug.Assert(out_Renamed != null);
+            
+            int delta = position - lastPosition;
+            lastPosition = position;
+            
+            if (storePayloads)
+            {
+                if (payloadLength != lastPayloadLength)
+                {
+                    lastPayloadLength = payloadLength;
+                    out_Renamed.WriteVInt((delta << 1) | 1);
+                    out_Renamed.WriteVInt(payloadLength);
+                }
+                else
+                    out_Renamed.WriteVInt(delta << 1);
+                if (payloadLength > 0)
+                    out_Renamed.WriteBytes(payload, payloadLength);
+            }
+            else
+                out_Renamed.WriteVInt(delta);
+        }
+        
+        internal void  SetField(FieldInfo fieldInfo)
+        {
+            omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+            storePayloads = omitTermFreqAndPositions?false:fieldInfo.storePayloads;
+        }
+        
+        /// <summary>Called when we are done adding positions &amp; payloads </summary>
+        internal override void  Finish()
+        {
+            lastPosition = 0;
+            lastPayloadLength = - 1;
+        }
+        
         public void Dispose()
         {
             // Move to protected method if class becomes unsealed
             if (out_Renamed != null)
                 out_Renamed.Close();
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsTermsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsTermsConsumer.cs b/src/core/Index/FormatPostingsTermsConsumer.cs
index 637ecff..eb26223 100644
--- a/src/core/Index/FormatPostingsTermsConsumer.cs
+++ b/src/core/Index/FormatPostingsTermsConsumer.cs
@@ -21,32 +21,32 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> NOTE: this API is experimental and will likely change</summary>
-	
-	abstract class FormatPostingsTermsConsumer
-	{
-		
-		/// <summary>Adds a new term in this field; term ends with U+FFFF
-		/// char 
-		/// </summary>
-		internal abstract FormatPostingsDocsConsumer AddTerm(char[] text, int start);
-		
-		internal char[] termBuffer;
-		internal virtual FormatPostingsDocsConsumer AddTerm(System.String text)
-		{
-			int len = text.Length;
-			if (termBuffer == null || termBuffer.Length < 1 + len)
-				termBuffer = new char[ArrayUtil.GetNextSize(1 + len)];
-	        for (int i = 0; i < len; i++)
-	        {
-		        termBuffer[i] = (char) text[i];
-	        }
-			termBuffer[len] = (char) (0xffff);
-			return AddTerm(termBuffer, 0);
-		}
-		
-		/// <summary>Called when we are done adding terms to this field </summary>
-		internal abstract void  Finish();
-	}
+    
+    /// <summary> NOTE: this API is experimental and will likely change</summary>
+    
+    abstract class FormatPostingsTermsConsumer
+    {
+        
+        /// <summary>Adds a new term in this field; term ends with U+FFFF
+        /// char 
+        /// </summary>
+        internal abstract FormatPostingsDocsConsumer AddTerm(char[] text, int start);
+        
+        internal char[] termBuffer;
+        internal virtual FormatPostingsDocsConsumer AddTerm(System.String text)
+        {
+            int len = text.Length;
+            if (termBuffer == null || termBuffer.Length < 1 + len)
+                termBuffer = new char[ArrayUtil.GetNextSize(1 + len)];
+            for (int i = 0; i < len; i++)
+            {
+                termBuffer[i] = (char) text[i];
+            }
+            termBuffer[len] = (char) (0xffff);
+            return AddTerm(termBuffer, 0);
+        }
+        
+        /// <summary>Called when we are done adding terms to this field </summary>
+        internal abstract void  Finish();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FormatPostingsTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FormatPostingsTermsWriter.cs b/src/core/Index/FormatPostingsTermsWriter.cs
index 87d2026..7f3126c 100644
--- a/src/core/Index/FormatPostingsTermsWriter.cs
+++ b/src/core/Index/FormatPostingsTermsWriter.cs
@@ -19,59 +19,59 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class FormatPostingsTermsWriter : FormatPostingsTermsConsumer, IDisposable
-	{
-		internal FormatPostingsFieldsWriter parent;
-		internal FormatPostingsDocsWriter docsWriter;
-		internal TermInfosWriter termsOut;
-		internal FieldInfo fieldInfo;
-		
-		internal FormatPostingsTermsWriter(SegmentWriteState state, FormatPostingsFieldsWriter parent):base()
-		{
-			this.parent = parent;
-			termsOut = parent.termsOut;
-			docsWriter = new FormatPostingsDocsWriter(state, this);
-		}
-		
-		internal void  SetField(FieldInfo fieldInfo)
-		{
-			this.fieldInfo = fieldInfo;
-			docsWriter.SetField(fieldInfo);
-		}
-		
-		internal char[] currentTerm;
-		internal int currentTermStart;
-		
-		internal long freqStart;
-		internal long proxStart;
-		
-		/// <summary>Adds a new term in this field </summary>
-		internal override FormatPostingsDocsConsumer AddTerm(char[] text, int start)
-		{
-			currentTerm = text;
-			currentTermStart = start;
-			
-			// TODO: this is abstraction violation -- ideally this
-			// terms writer is not so "invasive", looking for file
-			// pointers in its child consumers.
-			freqStart = docsWriter.out_Renamed.FilePointer;
-			if (docsWriter.posWriter.out_Renamed != null)
-				proxStart = docsWriter.posWriter.out_Renamed.FilePointer;
-			
-			parent.skipListWriter.ResetSkip();
-			
-			return docsWriter;
-		}
-		
-		/// <summary>Called when we are done adding terms to this field </summary>
-		internal override void  Finish()
-		{
-		}
-		
+    
+    sealed class FormatPostingsTermsWriter : FormatPostingsTermsConsumer, IDisposable
+    {
+        internal FormatPostingsFieldsWriter parent;
+        internal FormatPostingsDocsWriter docsWriter;
+        internal TermInfosWriter termsOut;
+        internal FieldInfo fieldInfo;
+        
+        internal FormatPostingsTermsWriter(SegmentWriteState state, FormatPostingsFieldsWriter parent):base()
+        {
+            this.parent = parent;
+            termsOut = parent.termsOut;
+            docsWriter = new FormatPostingsDocsWriter(state, this);
+        }
+        
+        internal void  SetField(FieldInfo fieldInfo)
+        {
+            this.fieldInfo = fieldInfo;
+            docsWriter.SetField(fieldInfo);
+        }
+        
+        internal char[] currentTerm;
+        internal int currentTermStart;
+        
+        internal long freqStart;
+        internal long proxStart;
+        
+        /// <summary>Adds a new term in this field </summary>
+        internal override FormatPostingsDocsConsumer AddTerm(char[] text, int start)
+        {
+            currentTerm = text;
+            currentTermStart = start;
+            
+            // TODO: this is abstraction violation -- ideally this
+            // terms writer is not so "invasive", looking for file
+            // pointers in its child consumers.
+            freqStart = docsWriter.out_Renamed.FilePointer;
+            if (docsWriter.posWriter.out_Renamed != null)
+                proxStart = docsWriter.posWriter.out_Renamed.FilePointer;
+            
+            parent.skipListWriter.ResetSkip();
+            
+            return docsWriter;
+        }
+        
+        /// <summary>Called when we are done adding terms to this field </summary>
+        internal override void  Finish()
+        {
+        }
+        
         public void Dispose()
         {
             docsWriter.Dispose();
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FreqProxFieldMergeState.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxFieldMergeState.cs b/src/core/Index/FreqProxFieldMergeState.cs
index 5306918..c3bd35f 100644
--- a/src/core/Index/FreqProxFieldMergeState.cs
+++ b/src/core/Index/FreqProxFieldMergeState.cs
@@ -20,98 +20,98 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	// TODO FI: some of this is "generic" to TermsHash* so we
-	// should factor it out so other consumers don't have to
-	// duplicate this code
-	
-	/// <summary>Used by DocumentsWriter to merge the postings from
-	/// multiple ThreadStates when creating a segment 
-	/// </summary>
-	sealed class FreqProxFieldMergeState
-	{
-		
-		internal FreqProxTermsWriterPerField field;
-		internal int numPostings;
-		internal CharBlockPool charPool;
-		internal RawPostingList[] postings;
-		
-		private FreqProxTermsWriter.PostingList p;
-		internal char[] text;
-		internal int textOffset;
-		
-		private int postingUpto = - 1;
-		
-		internal ByteSliceReader freq = new ByteSliceReader();
-		internal ByteSliceReader prox = new ByteSliceReader();
-		
-		internal int docID;
-		internal int termFreq;
-		
-		public FreqProxFieldMergeState(FreqProxTermsWriterPerField field)
-		{
-			this.field = field;
-			this.charPool = field.perThread.termsHashPerThread.charPool;
-			this.numPostings = field.termsHashPerField.numPostings;
-			this.postings = field.termsHashPerField.SortPostings();
-		}
-		
-		internal bool NextTerm()
-		{
-			postingUpto++;
-			if (postingUpto == numPostings)
-				return false;
-			
-			p = (FreqProxTermsWriter.PostingList) postings[postingUpto];
-			docID = 0;
-			
-			text = charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
-			textOffset = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
-			
-			field.termsHashPerField.InitReader(freq, p, 0);
-			if (!field.fieldInfo.omitTermFreqAndPositions)
-				field.termsHashPerField.InitReader(prox, p, 1);
-			
-			// Should always be true
-			bool result = NextDoc();
-			System.Diagnostics.Debug.Assert(result);
-			
-			return true;
-		}
-		
-		public bool NextDoc()
-		{
-			if (freq.Eof())
-			{
-				if (p.lastDocCode != - 1)
-				{
-					// Return last doc
-					docID = p.lastDocID;
-					if (!field.omitTermFreqAndPositions)
-						termFreq = p.docFreq;
-					p.lastDocCode = - 1;
-					return true;
-				}
-				// EOF
-				else
-					return false;
-			}
-			
-			int code = freq.ReadVInt();
-			if (field.omitTermFreqAndPositions)
-				docID += code;
-			else
-			{
-				docID += Number.URShift(code, 1);
-				if ((code & 1) != 0)
-					termFreq = 1;
-				else
-					termFreq = freq.ReadVInt();
-			}
-			
-			System.Diagnostics.Debug.Assert(docID != p.lastDocID);
-			
-			return true;
-		}
-	}
+    
+    // TODO FI: some of this is "generic" to TermsHash* so we
+    // should factor it out so other consumers don't have to
+    // duplicate this code
+    
+    /// <summary>Used by DocumentsWriter to merge the postings from
+    /// multiple ThreadStates when creating a segment 
+    /// </summary>
+    sealed class FreqProxFieldMergeState
+    {
+        
+        internal FreqProxTermsWriterPerField field;
+        internal int numPostings;
+        internal CharBlockPool charPool;
+        internal RawPostingList[] postings;
+        
+        private FreqProxTermsWriter.PostingList p;
+        internal char[] text;
+        internal int textOffset;
+        
+        private int postingUpto = - 1;
+        
+        internal ByteSliceReader freq = new ByteSliceReader();
+        internal ByteSliceReader prox = new ByteSliceReader();
+        
+        internal int docID;
+        internal int termFreq;
+        
+        public FreqProxFieldMergeState(FreqProxTermsWriterPerField field)
+        {
+            this.field = field;
+            this.charPool = field.perThread.termsHashPerThread.charPool;
+            this.numPostings = field.termsHashPerField.numPostings;
+            this.postings = field.termsHashPerField.SortPostings();
+        }
+        
+        internal bool NextTerm()
+        {
+            postingUpto++;
+            if (postingUpto == numPostings)
+                return false;
+            
+            p = (FreqProxTermsWriter.PostingList) postings[postingUpto];
+            docID = 0;
+            
+            text = charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+            textOffset = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+            
+            field.termsHashPerField.InitReader(freq, p, 0);
+            if (!field.fieldInfo.omitTermFreqAndPositions)
+                field.termsHashPerField.InitReader(prox, p, 1);
+            
+            // Should always be true
+            bool result = NextDoc();
+            System.Diagnostics.Debug.Assert(result);
+            
+            return true;
+        }
+        
+        public bool NextDoc()
+        {
+            if (freq.Eof())
+            {
+                if (p.lastDocCode != - 1)
+                {
+                    // Return last doc
+                    docID = p.lastDocID;
+                    if (!field.omitTermFreqAndPositions)
+                        termFreq = p.docFreq;
+                    p.lastDocCode = - 1;
+                    return true;
+                }
+                // EOF
+                else
+                    return false;
+            }
+            
+            int code = freq.ReadVInt();
+            if (field.omitTermFreqAndPositions)
+                docID += code;
+            else
+            {
+                docID += Number.URShift(code, 1);
+                if ((code & 1) != 0)
+                    termFreq = 1;
+                else
+                    termFreq = freq.ReadVInt();
+            }
+            
+            System.Diagnostics.Debug.Assert(docID != p.lastDocID);
+            
+            return true;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FreqProxTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriter.cs b/src/core/Index/FreqProxTermsWriter.cs
index f98d646..a289a47 100644
--- a/src/core/Index/FreqProxTermsWriter.cs
+++ b/src/core/Index/FreqProxTermsWriter.cs
@@ -21,283 +21,283 @@ using UnicodeUtil = Lucene.Net.Util.UnicodeUtil;
 
 namespace Lucene.Net.Index
 {
-	sealed class FreqProxTermsWriter : TermsHashConsumer
-	{
-		public override TermsHashConsumerPerThread AddThread(TermsHashPerThread perThread)
-		{
-			return new FreqProxTermsWriterPerThread(perThread);
-		}
-		
-		internal override void  CreatePostings(RawPostingList[] postings, int start, int count)
-		{
-			int end = start + count;
-			for (int i = start; i < end; i++)
-				postings[i] = new PostingList();
-		}
-		
-		private static int compareText(char[] text1, int pos1, char[] text2, int pos2)
-		{
-			while (true)
-			{
-				char c1 = text1[pos1++];
-				char c2 = text2[pos2++];
-				if (c1 != c2)
-				{
-					if (0xffff == c2)
-						return 1;
-					else if (0xffff == c1)
-						return - 1;
-					else
-						return c1 - c2;
-				}
-				else if (0xffff == c1)
-					return 0;
-			}
-		}
-		
-		internal override void  CloseDocStore(SegmentWriteState state)
-		{
-		}
-		public override void  Abort()
-		{
-		}
-		
-		
-		// TODO: would be nice to factor out more of this, eg the
-		// FreqProxFieldMergeState, and code to visit all Fields
-		// under the same FieldInfo together, up into TermsHash*.
-		// Other writers would presumably share alot of this...
+    sealed class FreqProxTermsWriter : TermsHashConsumer
+    {
+        public override TermsHashConsumerPerThread AddThread(TermsHashPerThread perThread)
+        {
+            return new FreqProxTermsWriterPerThread(perThread);
+        }
+        
+        internal override void  CreatePostings(RawPostingList[] postings, int start, int count)
+        {
+            int end = start + count;
+            for (int i = start; i < end; i++)
+                postings[i] = new PostingList();
+        }
+        
+        private static int compareText(char[] text1, int pos1, char[] text2, int pos2)
+        {
+            while (true)
+            {
+                char c1 = text1[pos1++];
+                char c2 = text2[pos2++];
+                if (c1 != c2)
+                {
+                    if (0xffff == c2)
+                        return 1;
+                    else if (0xffff == c1)
+                        return - 1;
+                    else
+                        return c1 - c2;
+                }
+                else if (0xffff == c1)
+                    return 0;
+            }
+        }
+        
+        internal override void  CloseDocStore(SegmentWriteState state)
+        {
+        }
+        public override void  Abort()
+        {
+        }
+        
+        
+        // TODO: would be nice to factor out more of this, eg the
+        // FreqProxFieldMergeState, and code to visit all Fields
+        // under the same FieldInfo together, up into TermsHash*.
+        // Other writers would presumably share alot of this...
         public override void Flush(IDictionary<TermsHashConsumerPerThread, ICollection<TermsHashConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
-			
-			// Gather all FieldData's that have postings, across all
-			// ThreadStates
-			var allFields = new List<FreqProxTermsWriterPerField>();
+        {
+            
+            // Gather all FieldData's that have postings, across all
+            // ThreadStates
+            var allFields = new List<FreqProxTermsWriterPerField>();
 
             foreach(var entry in threadsAndFields)
-			{
-				var fields = entry.Value;
-				
-				foreach(var i in fields)
-				{
-					FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField)i;
-					if (perField.termsHashPerField.numPostings > 0)
-						allFields.Add(perField);
-				}
-			}
-			
-			// Sort by field name
+            {
+                var fields = entry.Value;
+                
+                foreach(var i in fields)
+                {
+                    FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField)i;
+                    if (perField.termsHashPerField.numPostings > 0)
+                        allFields.Add(perField);
+                }
+            }
+            
+            // Sort by field name
             allFields.Sort();
-			int numAllFields = allFields.Count;
-			
-			// TODO: allow Lucene user to customize this consumer:
-			FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
-			/*
-			Current writer chain:
-			FormatPostingsFieldsConsumer
-			-> IMPL: FormatPostingsFieldsWriter
-			-> FormatPostingsTermsConsumer
-			-> IMPL: FormatPostingsTermsWriter
-			-> FormatPostingsDocConsumer
-			-> IMPL: FormatPostingsDocWriter
-			-> FormatPostingsPositionsConsumer
-			-> IMPL: FormatPostingsPositionsWriter
-			*/
-			
-			int start = 0;
-			while (start < numAllFields)
-			{
-				FieldInfo fieldInfo = allFields[start].fieldInfo;
-				System.String fieldName = fieldInfo.name;
-				
-				int end = start + 1;
-				while (end < numAllFields && allFields[end].fieldInfo.name.Equals(fieldName))
-					end++;
-				
-				FreqProxTermsWriterPerField[] fields = new FreqProxTermsWriterPerField[end - start];
-				for (int i = start; i < end; i++)
-				{
-					fields[i - start] = allFields[i];
-					
-					// Aggregate the storePayload as seen by the same
-					// field across multiple threads
-					fieldInfo.storePayloads |= fields[i - start].hasPayloads;
-				}
-				
-				// If this field has postings then add them to the
-				// segment
-				AppendPostings(fields, consumer);
-				
-				for (int i = 0; i < fields.Length; i++)
-				{
-					TermsHashPerField perField = fields[i].termsHashPerField;
-					int numPostings = perField.numPostings;
-					perField.Reset();
-					perField.ShrinkHash(numPostings);
-					fields[i].Reset();
-				}
-				
-				start = end;
-			}
+            int numAllFields = allFields.Count;
+            
+            // TODO: allow Lucene user to customize this consumer:
+            FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
+            /*
+            Current writer chain:
+            FormatPostingsFieldsConsumer
+            -> IMPL: FormatPostingsFieldsWriter
+            -> FormatPostingsTermsConsumer
+            -> IMPL: FormatPostingsTermsWriter
+            -> FormatPostingsDocConsumer
+            -> IMPL: FormatPostingsDocWriter
+            -> FormatPostingsPositionsConsumer
+            -> IMPL: FormatPostingsPositionsWriter
+            */
+            
+            int start = 0;
+            while (start < numAllFields)
+            {
+                FieldInfo fieldInfo = allFields[start].fieldInfo;
+                System.String fieldName = fieldInfo.name;
+                
+                int end = start + 1;
+                while (end < numAllFields && allFields[end].fieldInfo.name.Equals(fieldName))
+                    end++;
+                
+                FreqProxTermsWriterPerField[] fields = new FreqProxTermsWriterPerField[end - start];
+                for (int i = start; i < end; i++)
+                {
+                    fields[i - start] = allFields[i];
+                    
+                    // Aggregate the storePayload as seen by the same
+                    // field across multiple threads
+                    fieldInfo.storePayloads |= fields[i - start].hasPayloads;
+                }
+                
+                // If this field has postings then add them to the
+                // segment
+                AppendPostings(fields, consumer);
+                
+                for (int i = 0; i < fields.Length; i++)
+                {
+                    TermsHashPerField perField = fields[i].termsHashPerField;
+                    int numPostings = perField.numPostings;
+                    perField.Reset();
+                    perField.ShrinkHash(numPostings);
+                    fields[i].Reset();
+                }
+                
+                start = end;
+            }
 
             foreach(var entry in threadsAndFields)
-			{
-				FreqProxTermsWriterPerThread perThread = (FreqProxTermsWriterPerThread) entry.Key;
-				perThread.termsHashPerThread.Reset(true);
-			}
-			
-			consumer.Finish();
-		}
-		
-		private byte[] payloadBuffer;
-		
-		/* Walk through all unique text tokens (Posting
-		* instances) found in this field and serialize them
-		* into a single RAM segment. */
-		internal void  AppendPostings(FreqProxTermsWriterPerField[] fields, FormatPostingsFieldsConsumer consumer)
-		{
-			
-			int numFields = fields.Length;
-			
-			FreqProxFieldMergeState[] mergeStates = new FreqProxFieldMergeState[numFields];
-			
-			for (int i = 0; i < numFields; i++)
-			{
-				FreqProxFieldMergeState fms = mergeStates[i] = new FreqProxFieldMergeState(fields[i]);
-				
-				System.Diagnostics.Debug.Assert(fms.field.fieldInfo == fields [0].fieldInfo);
-				
-				// Should always be true
-				bool result = fms.NextTerm();
-				System.Diagnostics.Debug.Assert(result);
-			}
-			
-			FormatPostingsTermsConsumer termsConsumer = consumer.AddField(fields[0].fieldInfo);
-			
-			FreqProxFieldMergeState[] termStates = new FreqProxFieldMergeState[numFields];
-			
-			bool currentFieldOmitTermFreqAndPositions = fields[0].fieldInfo.omitTermFreqAndPositions;
-			
-			while (numFields > 0)
-			{
-				
-				// Get the next term to merge
-				termStates[0] = mergeStates[0];
-				int numToMerge = 1;
-				
-				for (int i = 1; i < numFields; i++)
-				{
-					char[] text = mergeStates[i].text;
-					int textOffset = mergeStates[i].textOffset;
-					int cmp = compareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
-					
-					if (cmp < 0)
-					{
-						termStates[0] = mergeStates[i];
-						numToMerge = 1;
-					}
-					else if (cmp == 0)
-						termStates[numToMerge++] = mergeStates[i];
-				}
-				
-				FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(termStates[0].text, termStates[0].textOffset);
-				
-				// Now termStates has numToMerge FieldMergeStates
-				// which all share the same term.  Now we must
-				// interleave the docID streams.
-				while (numToMerge > 0)
-				{
-					
-					FreqProxFieldMergeState minState = termStates[0];
-					for (int i = 1; i < numToMerge; i++)
-						if (termStates[i].docID < minState.docID)
-							minState = termStates[i];
-					
-					int termDocFreq = minState.termFreq;
-					
-					FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(minState.docID, termDocFreq);
-					
-					ByteSliceReader prox = minState.prox;
-					
-					// Carefully copy over the prox + payload info,
-					// changing the format to match Lucene's segment
-					// format.
-					if (!currentFieldOmitTermFreqAndPositions)
-					{
-						// omitTermFreqAndPositions == false so we do write positions &
-						// payload          
-						int position = 0;
-						for (int j = 0; j < termDocFreq; j++)
-						{
-							int code = prox.ReadVInt();
-							position += (code >> 1);
-							
-							int payloadLength;
-							if ((code & 1) != 0)
-							{
-								// This position has a payload
-								payloadLength = prox.ReadVInt();
-								
-								if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
-									payloadBuffer = new byte[payloadLength];
-								
-								prox.ReadBytes(payloadBuffer, 0, payloadLength);
-							}
-							else
-								payloadLength = 0;
-							
-							posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
-						} //End for
-						
-						posConsumer.Finish();
-					}
-					
-					if (!minState.NextDoc())
-					{
-						
-						// Remove from termStates
-						int upto = 0;
-						for (int i = 0; i < numToMerge; i++)
-							if (termStates[i] != minState)
-								termStates[upto++] = termStates[i];
-						numToMerge--;
-						System.Diagnostics.Debug.Assert(upto == numToMerge);
-						
-						// Advance this state to the next term
-						
-						if (!minState.NextTerm())
-						{
-							// OK, no more terms, so remove from mergeStates
-							// as well
-							upto = 0;
-							for (int i = 0; i < numFields; i++)
-								if (mergeStates[i] != minState)
-									mergeStates[upto++] = mergeStates[i];
-							numFields--;
-							System.Diagnostics.Debug.Assert(upto == numFields);
-						}
-					}
-				}
-				
-				docConsumer.Finish();
-			}
-			
-			termsConsumer.Finish();
-		}
+            {
+                FreqProxTermsWriterPerThread perThread = (FreqProxTermsWriterPerThread) entry.Key;
+                perThread.termsHashPerThread.Reset(true);
+            }
+            
+            consumer.Finish();
+        }
+        
+        private byte[] payloadBuffer;
+        
+        /* Walk through all unique text tokens (Posting
+        * instances) found in this field and serialize them
+        * into a single RAM segment. */
+        internal void  AppendPostings(FreqProxTermsWriterPerField[] fields, FormatPostingsFieldsConsumer consumer)
+        {
+            
+            int numFields = fields.Length;
+            
+            FreqProxFieldMergeState[] mergeStates = new FreqProxFieldMergeState[numFields];
+            
+            for (int i = 0; i < numFields; i++)
+            {
+                FreqProxFieldMergeState fms = mergeStates[i] = new FreqProxFieldMergeState(fields[i]);
+                
+                System.Diagnostics.Debug.Assert(fms.field.fieldInfo == fields [0].fieldInfo);
+                
+                // Should always be true
+                bool result = fms.NextTerm();
+                System.Diagnostics.Debug.Assert(result);
+            }
+            
+            FormatPostingsTermsConsumer termsConsumer = consumer.AddField(fields[0].fieldInfo);
+            
+            FreqProxFieldMergeState[] termStates = new FreqProxFieldMergeState[numFields];
+            
+            bool currentFieldOmitTermFreqAndPositions = fields[0].fieldInfo.omitTermFreqAndPositions;
+            
+            while (numFields > 0)
+            {
+                
+                // Get the next term to merge
+                termStates[0] = mergeStates[0];
+                int numToMerge = 1;
+                
+                for (int i = 1; i < numFields; i++)
+                {
+                    char[] text = mergeStates[i].text;
+                    int textOffset = mergeStates[i].textOffset;
+                    int cmp = compareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
+                    
+                    if (cmp < 0)
+                    {
+                        termStates[0] = mergeStates[i];
+                        numToMerge = 1;
+                    }
+                    else if (cmp == 0)
+                        termStates[numToMerge++] = mergeStates[i];
+                }
+                
+                FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(termStates[0].text, termStates[0].textOffset);
+                
+                // Now termStates has numToMerge FieldMergeStates
+                // which all share the same term.  Now we must
+                // interleave the docID streams.
+                while (numToMerge > 0)
+                {
+                    
+                    FreqProxFieldMergeState minState = termStates[0];
+                    for (int i = 1; i < numToMerge; i++)
+                        if (termStates[i].docID < minState.docID)
+                            minState = termStates[i];
+                    
+                    int termDocFreq = minState.termFreq;
+                    
+                    FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(minState.docID, termDocFreq);
+                    
+                    ByteSliceReader prox = minState.prox;
+                    
+                    // Carefully copy over the prox + payload info,
+                    // changing the format to match Lucene's segment
+                    // format.
+                    if (!currentFieldOmitTermFreqAndPositions)
+                    {
+                        // omitTermFreqAndPositions == false so we do write positions &
+                        // payload          
+                        int position = 0;
+                        for (int j = 0; j < termDocFreq; j++)
+                        {
+                            int code = prox.ReadVInt();
+                            position += (code >> 1);
+                            
+                            int payloadLength;
+                            if ((code & 1) != 0)
+                            {
+                                // This position has a payload
+                                payloadLength = prox.ReadVInt();
+                                
+                                if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
+                                    payloadBuffer = new byte[payloadLength];
+                                
+                                prox.ReadBytes(payloadBuffer, 0, payloadLength);
+                            }
+                            else
+                                payloadLength = 0;
+                            
+                            posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
+                        } //End for
+                        
+                        posConsumer.Finish();
+                    }
+                    
+                    if (!minState.NextDoc())
+                    {
+                        
+                        // Remove from termStates
+                        int upto = 0;
+                        for (int i = 0; i < numToMerge; i++)
+                            if (termStates[i] != minState)
+                                termStates[upto++] = termStates[i];
+                        numToMerge--;
+                        System.Diagnostics.Debug.Assert(upto == numToMerge);
+                        
+                        // Advance this state to the next term
+                        
+                        if (!minState.NextTerm())
+                        {
+                            // OK, no more terms, so remove from mergeStates
+                            // as well
+                            upto = 0;
+                            for (int i = 0; i < numFields; i++)
+                                if (mergeStates[i] != minState)
+                                    mergeStates[upto++] = mergeStates[i];
+                            numFields--;
+                            System.Diagnostics.Debug.Assert(upto == numFields);
+                        }
+                    }
+                }
+                
+                docConsumer.Finish();
+            }
+            
+            termsConsumer.Finish();
+        }
 
-		internal UnicodeUtil.UTF8Result termsUTF8 = new UnicodeUtil.UTF8Result();
-		
-		internal sealed class PostingList:RawPostingList
-		{
-			internal int docFreq; // # times this term occurs in the current doc
-			internal int lastDocID; // Last docID where this term occurred
-			internal int lastDocCode; // Code for prior doc
-			internal int lastPosition; // Last position where this term occurred
-		}
-		
-		internal override int BytesPerPosting()
-		{
-			return RawPostingList.BYTES_SIZE + 4 * DocumentsWriter.INT_NUM_BYTE;
-		}
-	}
+        internal UnicodeUtil.UTF8Result termsUTF8 = new UnicodeUtil.UTF8Result();
+        
+        internal sealed class PostingList:RawPostingList
+        {
+            internal int docFreq; // # times this term occurs in the current doc
+            internal int lastDocID; // Last docID where this term occurred
+            internal int lastDocCode; // Code for prior doc
+            internal int lastPosition; // Last position where this term occurred
+        }
+        
+        internal override int BytesPerPosting()
+        {
+            return RawPostingList.BYTES_SIZE + 4 * DocumentsWriter.INT_NUM_BYTE;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FreqProxTermsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriterPerField.cs b/src/core/Index/FreqProxTermsWriterPerField.cs
index c654b48..8facae6 100644
--- a/src/core/Index/FreqProxTermsWriterPerField.cs
+++ b/src/core/Index/FreqProxTermsWriterPerField.cs
@@ -21,176 +21,176 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	// TODO: break into separate freq and prox writers as
-	// codecs; make separate container (tii/tis/skip/*) that can
-	// be configured as any number of files 1..N
-	sealed class FreqProxTermsWriterPerField:TermsHashConsumerPerField, System.IComparable<FreqProxTermsWriterPerField>
-	{
-		
-		internal FreqProxTermsWriterPerThread perThread;
-		internal TermsHashPerField termsHashPerField;
-		internal FieldInfo fieldInfo;
-		internal DocumentsWriter.DocState docState;
-		internal FieldInvertState fieldState;
-		internal bool omitTermFreqAndPositions;
-		internal IPayloadAttribute payloadAttribute;
-		
-		public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo)
-		{
-			this.termsHashPerField = termsHashPerField;
-			this.perThread = perThread;
-			this.fieldInfo = fieldInfo;
-			docState = termsHashPerField.docState;
-			fieldState = termsHashPerField.fieldState;
-			omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
-		}
-		
-		internal override int GetStreamCount()
-		{
-			if (fieldInfo.omitTermFreqAndPositions)
-				return 1;
-			else
-				return 2;
-		}
-		
-		internal override void  Finish()
-		{
-		}
-		
-		internal bool hasPayloads;
-		
-		internal override void  SkippingLongTerm()
-		{
-		}
-		
-		public int CompareTo(FreqProxTermsWriterPerField other)
-		{
-			return String.CompareOrdinal(fieldInfo.name, other.fieldInfo.name);
-		}
-		
-		internal void  Reset()
-		{
-			// Record, up front, whether our in-RAM format will be
-			// with or without term freqs:
-			omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
-			payloadAttribute = null;
-		}
-		
-		internal override bool Start(IFieldable[] fields, int count)
-		{
-			for (int i = 0; i < count; i++)
-				if (fields[i].IsIndexed)
-					return true;
-			return false;
-		}
-		
-		internal override void  Start(IFieldable f)
-		{
+    
+    // TODO: break into separate freq and prox writers as
+    // codecs; make separate container (tii/tis/skip/*) that can
+    // be configured as any number of files 1..N
+    sealed class FreqProxTermsWriterPerField:TermsHashConsumerPerField, System.IComparable<FreqProxTermsWriterPerField>
+    {
+        
+        internal FreqProxTermsWriterPerThread perThread;
+        internal TermsHashPerField termsHashPerField;
+        internal FieldInfo fieldInfo;
+        internal DocumentsWriter.DocState docState;
+        internal FieldInvertState fieldState;
+        internal bool omitTermFreqAndPositions;
+        internal IPayloadAttribute payloadAttribute;
+        
+        public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.termsHashPerField = termsHashPerField;
+            this.perThread = perThread;
+            this.fieldInfo = fieldInfo;
+            docState = termsHashPerField.docState;
+            fieldState = termsHashPerField.fieldState;
+            omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+        }
+        
+        internal override int GetStreamCount()
+        {
+            if (fieldInfo.omitTermFreqAndPositions)
+                return 1;
+            else
+                return 2;
+        }
+        
+        internal override void  Finish()
+        {
+        }
+        
+        internal bool hasPayloads;
+        
+        internal override void  SkippingLongTerm()
+        {
+        }
+        
+        public int CompareTo(FreqProxTermsWriterPerField other)
+        {
+            return String.CompareOrdinal(fieldInfo.name, other.fieldInfo.name);
+        }
+        
+        internal void  Reset()
+        {
+            // Record, up front, whether our in-RAM format will be
+            // with or without term freqs:
+            omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+            payloadAttribute = null;
+        }
+        
+        internal override bool Start(IFieldable[] fields, int count)
+        {
+            for (int i = 0; i < count; i++)
+                if (fields[i].IsIndexed)
+                    return true;
+            return false;
+        }
+        
+        internal override void  Start(IFieldable f)
+        {
             if (fieldState.attributeSource.HasAttribute<IPayloadAttribute>())
-			{
+            {
                 payloadAttribute = fieldState.attributeSource.GetAttribute<IPayloadAttribute>();
-			}
-			else
-			{
-				payloadAttribute = null;
-			}
-		}
-		
-		internal void  WriteProx(FreqProxTermsWriter.PostingList p, int proxCode)
-		{
-			Payload payload;
-			if (payloadAttribute == null)
-			{
-				payload = null;
-			}
-			else
-			{
-				payload = payloadAttribute.Payload;
-			}
-			
-			if (payload != null && payload.internalLength > 0)
-			{
-				termsHashPerField.WriteVInt(1, (proxCode << 1) | 1);
-				termsHashPerField.WriteVInt(1, payload.internalLength);
-				termsHashPerField.WriteBytes(1, payload.data, payload.internalOffset, payload.internalLength);
-				hasPayloads = true;
-			}
-			else
-				termsHashPerField.WriteVInt(1, proxCode << 1);
-			p.lastPosition = fieldState.position;
-		}
-		
-		internal override void  NewTerm(RawPostingList p0)
-		{
-			// First time we're seeing this term since the last
-			// flush
-			System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.newTerm start"));
-			FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
-			p.lastDocID = docState.docID;
-			if (omitTermFreqAndPositions)
-			{
-				p.lastDocCode = docState.docID;
-			}
-			else
-			{
-				p.lastDocCode = docState.docID << 1;
-				p.docFreq = 1;
-				WriteProx(p, fieldState.position);
-			}
-		}
-		
-		internal override void  AddTerm(RawPostingList p0)
-		{
-			
-			System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.addTerm start"));
-			
-			FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
-			
-			System.Diagnostics.Debug.Assert(omitTermFreqAndPositions || p.docFreq > 0);
-			
-			if (omitTermFreqAndPositions)
-			{
-				if (docState.docID != p.lastDocID)
-				{
-					System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
-					termsHashPerField.WriteVInt(0, p.lastDocCode);
-					p.lastDocCode = docState.docID - p.lastDocID;
-					p.lastDocID = docState.docID;
-				}
-			}
-			else
-			{
-				if (docState.docID != p.lastDocID)
-				{
-					System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
-					// Term not yet seen in the current doc but previously
-					// seen in other doc(s) since the last flush
-					
-					// Now that we know doc freq for previous doc,
-					// write it & lastDocCode
-					if (1 == p.docFreq)
-						termsHashPerField.WriteVInt(0, p.lastDocCode | 1);
-					else
-					{
-						termsHashPerField.WriteVInt(0, p.lastDocCode);
-						termsHashPerField.WriteVInt(0, p.docFreq);
-					}
-					p.docFreq = 1;
-					p.lastDocCode = (docState.docID - p.lastDocID) << 1;
-					p.lastDocID = docState.docID;
-					WriteProx(p, fieldState.position);
-				}
-				else
-				{
-					p.docFreq++;
-					WriteProx(p, fieldState.position - p.lastPosition);
-				}
-			}
-		}
-		
-		public void  Abort()
-		{
-		}
-	}
+            }
+            else
+            {
+                payloadAttribute = null;
+            }
+        }
+        
+        internal void  WriteProx(FreqProxTermsWriter.PostingList p, int proxCode)
+        {
+            Payload payload;
+            if (payloadAttribute == null)
+            {
+                payload = null;
+            }
+            else
+            {
+                payload = payloadAttribute.Payload;
+            }
+            
+            if (payload != null && payload.internalLength > 0)
+            {
+                termsHashPerField.WriteVInt(1, (proxCode << 1) | 1);
+                termsHashPerField.WriteVInt(1, payload.internalLength);
+                termsHashPerField.WriteBytes(1, payload.data, payload.internalOffset, payload.internalLength);
+                hasPayloads = true;
+            }
+            else
+                termsHashPerField.WriteVInt(1, proxCode << 1);
+            p.lastPosition = fieldState.position;
+        }
+        
+        internal override void  NewTerm(RawPostingList p0)
+        {
+            // First time we're seeing this term since the last
+            // flush
+            System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.newTerm start"));
+            FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
+            p.lastDocID = docState.docID;
+            if (omitTermFreqAndPositions)
+            {
+                p.lastDocCode = docState.docID;
+            }
+            else
+            {
+                p.lastDocCode = docState.docID << 1;
+                p.docFreq = 1;
+                WriteProx(p, fieldState.position);
+            }
+        }
+        
+        internal override void  AddTerm(RawPostingList p0)
+        {
+            
+            System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.addTerm start"));
+            
+            FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
+            
+            System.Diagnostics.Debug.Assert(omitTermFreqAndPositions || p.docFreq > 0);
+            
+            if (omitTermFreqAndPositions)
+            {
+                if (docState.docID != p.lastDocID)
+                {
+                    System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
+                    termsHashPerField.WriteVInt(0, p.lastDocCode);
+                    p.lastDocCode = docState.docID - p.lastDocID;
+                    p.lastDocID = docState.docID;
+                }
+            }
+            else
+            {
+                if (docState.docID != p.lastDocID)
+                {
+                    System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
+                    // Term not yet seen in the current doc but previously
+                    // seen in other doc(s) since the last flush
+                    
+                    // Now that we know doc freq for previous doc,
+                    // write it & lastDocCode
+                    if (1 == p.docFreq)
+                        termsHashPerField.WriteVInt(0, p.lastDocCode | 1);
+                    else
+                    {
+                        termsHashPerField.WriteVInt(0, p.lastDocCode);
+                        termsHashPerField.WriteVInt(0, p.docFreq);
+                    }
+                    p.docFreq = 1;
+                    p.lastDocCode = (docState.docID - p.lastDocID) << 1;
+                    p.lastDocID = docState.docID;
+                    WriteProx(p, fieldState.position);
+                }
+                else
+                {
+                    p.docFreq++;
+                    WriteProx(p, fieldState.position - p.lastPosition);
+                }
+            }
+        }
+        
+        public void  Abort()
+        {
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/FreqProxTermsWriterPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriterPerThread.cs b/src/core/Index/FreqProxTermsWriterPerThread.cs
index 01f1ae9..1e4ac41 100644
--- a/src/core/Index/FreqProxTermsWriterPerThread.cs
+++ b/src/core/Index/FreqProxTermsWriterPerThread.cs
@@ -19,34 +19,34 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class FreqProxTermsWriterPerThread:TermsHashConsumerPerThread
-	{
-		internal TermsHashPerThread termsHashPerThread;
-		internal DocumentsWriter.DocState docState;
-		
-		public FreqProxTermsWriterPerThread(TermsHashPerThread perThread)
-		{
-			docState = perThread.docState;
-			termsHashPerThread = perThread;
-		}
-		
-		public override TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo)
-		{
-			return new FreqProxTermsWriterPerField(termsHashPerField, this, fieldInfo);
-		}
-		
-		public override void  StartDocument()
-		{
-		}
-		
-		public override DocumentsWriter.DocWriter FinishDocument()
-		{
-			return null;
-		}
-		
-		public override void  Abort()
-		{
-		}
-	}
+    
+    sealed class FreqProxTermsWriterPerThread:TermsHashConsumerPerThread
+    {
+        internal TermsHashPerThread termsHashPerThread;
+        internal DocumentsWriter.DocState docState;
+        
+        public FreqProxTermsWriterPerThread(TermsHashPerThread perThread)
+        {
+            docState = perThread.docState;
+            termsHashPerThread = perThread;
+        }
+        
+        public override TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo)
+        {
+            return new FreqProxTermsWriterPerField(termsHashPerField, this, fieldInfo);
+        }
+        
+        public override void  StartDocument()
+        {
+        }
+        
+        public override DocumentsWriter.DocWriter FinishDocument()
+        {
+            return null;
+        }
+        
+        public override void  Abort()
+        {
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexCommit.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexCommit.cs b/src/core/Index/IndexCommit.cs
index 306d7f1..7bfd351 100644
--- a/src/core/Index/IndexCommit.cs
+++ b/src/core/Index/IndexCommit.cs
@@ -22,98 +22,98 @@ using Directory = Lucene.Net.Store.Directory;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> <p/>Expert: represents a single commit into an index as seen by the
-	/// <see cref="IndexDeletionPolicy" /> or <see cref="IndexReader" />.<p/>
-	/// 
-	/// <p/> Changes to the content of an index are made visible
-	/// only after the writer who made that change commits by
-	/// writing a new segments file
-	/// (<c>segments_N</c>). This point in time, when the
-	/// action of writing of a new segments file to the directory
-	/// is completed, is an index commit.<p/>
-	/// 
-	/// <p/>Each index commit point has a unique segments file
-	/// associated with it. The segments file associated with a
-	/// later index commit point would have a larger N.<p/>
-	/// 
-	/// <p/><b>WARNING</b>: This API is a new and experimental and
-	/// may suddenly change. <p/>
-	/// </summary>
-	
-	public abstract class IndexCommit
-	{
-	    /// <summary> Get the segments file (<c>segments_N</c>) associated 
-	    /// with this commit point.
-	    /// </summary>
-	    public abstract string SegmentsFileName { get; }
+    
+    /// <summary> <p/>Expert: represents a single commit into an index as seen by the
+    /// <see cref="IndexDeletionPolicy" /> or <see cref="IndexReader" />.<p/>
+    /// 
+    /// <p/> Changes to the content of an index are made visible
+    /// only after the writer who made that change commits by
+    /// writing a new segments file
+    /// (<c>segments_N</c>). This point in time, when the
+    /// action of writing of a new segments file to the directory
+    /// is completed, is an index commit.<p/>
+    /// 
+    /// <p/>Each index commit point has a unique segments file
+    /// associated with it. The segments file associated with a
+    /// later index commit point would have a larger N.<p/>
+    /// 
+    /// <p/><b>WARNING</b>: This API is a new and experimental and
+    /// may suddenly change. <p/>
+    /// </summary>
+    
+    public abstract class IndexCommit
+    {
+        /// <summary> Get the segments file (<c>segments_N</c>) associated 
+        /// with this commit point.
+        /// </summary>
+        public abstract string SegmentsFileName { get; }
 
-	    /// <summary> Returns all index files referenced by this commit point.</summary>
-	    public abstract ICollection<string> FileNames { get; }
+        /// <summary> Returns all index files referenced by this commit point.</summary>
+        public abstract ICollection<string> FileNames { get; }
 
-	    /// <summary> Returns the <see cref="Store.Directory" /> for the index.</summary>
-	    public abstract Directory Directory { get; }
+        /// <summary> Returns the <see cref="Store.Directory" /> for the index.</summary>
+        public abstract Directory Directory { get; }
 
-	    /// <summary> Delete this commit point.  This only applies when using
-		/// the commit point in the context of IndexWriter's
-		/// IndexDeletionPolicy.
-		/// <p/>
-		/// Upon calling this, the writer is notified that this commit 
-		/// point should be deleted. 
-		/// <p/>
-		/// Decision that a commit-point should be deleted is taken by the <see cref="IndexDeletionPolicy" /> in effect
+        /// <summary> Delete this commit point.  This only applies when using
+        /// the commit point in the context of IndexWriter's
+        /// IndexDeletionPolicy.
+        /// <p/>
+        /// Upon calling this, the writer is notified that this commit 
+        /// point should be deleted. 
+        /// <p/>
+        /// Decision that a commit-point should be deleted is taken by the <see cref="IndexDeletionPolicy" /> in effect
         /// and therefore this should only be called by its <see cref="IndexDeletionPolicy.OnInit{T}(IList{T})" /> or 
         /// <see cref="IndexDeletionPolicy.OnCommit{T}(IList{T})" /> methods.
-		/// </summary>
+        /// </summary>
         public abstract void Delete();
 
-	    public abstract bool IsDeleted { get; }
+        public abstract bool IsDeleted { get; }
 
-	    /// <summary> Returns true if this commit is an optimized index.</summary>
-	    public abstract bool IsOptimized { get; }
+        /// <summary> Returns true if this commit is an optimized index.</summary>
+        public abstract bool IsOptimized { get; }
 
-	    /// <summary> Two IndexCommits are equal if both their Directory and versions are equal.</summary>
-		public  override bool Equals(System.Object other)
-		{
-			if (other is IndexCommit)
-			{
-				IndexCommit otherCommit = (IndexCommit) other;
-				return otherCommit.Directory.Equals(Directory) && otherCommit.Version == Version;
-			}
-			else
-				return false;
-		}
-		
-		public override int GetHashCode()
-		{
-			return (int)(Directory.GetHashCode() + Version);
-		}
+        /// <summary> Two IndexCommits are equal if both their Directory and versions are equal.</summary>
+        public  override bool Equals(System.Object other)
+        {
+            if (other is IndexCommit)
+            {
+                IndexCommit otherCommit = (IndexCommit) other;
+                return otherCommit.Directory.Equals(Directory) && otherCommit.Version == Version;
+            }
+            else
+                return false;
+        }
+        
+        public override int GetHashCode()
+        {
+            return (int)(Directory.GetHashCode() + Version);
+        }
 
-	    /// <summary>Returns the version for this IndexCommit.  This is the
-	    /// same value that <see cref="IndexReader.Version" /> would
-	    /// return if it were opened on this commit. 
-	    /// </summary>
-	    public abstract long Version { get; }
+        /// <summary>Returns the version for this IndexCommit.  This is the
+        /// same value that <see cref="IndexReader.Version" /> would
+        /// return if it were opened on this commit. 
+        /// </summary>
+        public abstract long Version { get; }
 
-	    /// <summary>Returns the generation (the _N in segments_N) for this
-	    /// IndexCommit 
-	    /// </summary>
-	    public abstract long Generation { get; }
+        /// <summary>Returns the generation (the _N in segments_N) for this
+        /// IndexCommit 
+        /// </summary>
+        public abstract long Generation { get; }
 
-	    /// <summary>Convenience method that returns the last modified time
-	    /// of the segments_N file corresponding to this index
-	    /// commit, equivalent to
-	    /// getDirectory().fileModified(getSegmentsFileName()). 
-	    /// </summary>
-	    public virtual long Timestamp
-	    {
-	        get { return Directory.FileModified(SegmentsFileName); }
-	    }
+        /// <summary>Convenience method that returns the last modified time
+        /// of the segments_N file corresponding to this index
+        /// commit, equivalent to
+        /// getDirectory().fileModified(getSegmentsFileName()). 
+        /// </summary>
+        public virtual long Timestamp
+        {
+            get { return Directory.FileModified(SegmentsFileName); }
+        }
 
-	    /// <summary>Returns userData, previously passed to 
-	    /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
-	    /// for this commit.  IDictionary is String -> String. 
-	    /// </summary>
-	    public abstract IDictionary<string, string> UserData { get; }
-	}
+        /// <summary>Returns userData, previously passed to 
+        /// <see cref="IndexWriter.Commit(System.Collections.Generic.IDictionary{string, string})" />
+        /// for this commit.  IDictionary is String -> String. 
+        /// </summary>
+        public abstract IDictionary<string, string> UserData { get; }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/IndexDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexDeletionPolicy.cs b/src/core/Index/IndexDeletionPolicy.cs
index bef9924..fb27ec0 100644
--- a/src/core/Index/IndexDeletionPolicy.cs
+++ b/src/core/Index/IndexDeletionPolicy.cs
@@ -20,58 +20,58 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> <p/>Expert: policy for deletion of stale <see cref="IndexCommit">index commits</see>. 
-	/// 
-	/// <p/>Implement this interface, and pass it to one
-	/// of the <see cref="IndexWriter" /> or <see cref="IndexReader" />
-	/// constructors, to customize when older
-	/// <see cref="IndexCommit">point-in-time commits</see>
-	/// are deleted from the index directory.  The default deletion policy
-	/// is <see cref="KeepOnlyLastCommitDeletionPolicy" />, which always
-	/// removes old commits as soon as a new commit is done (this
-	/// matches the behavior before 2.2).<p/>
-	/// 
-	/// <p/>One expected use case for this (and the reason why it
-	/// was first created) is to work around problems with an
-	/// index directory accessed via filesystems like NFS because
-	/// NFS does not provide the "delete on last close" semantics
-	/// that Lucene's "point in time" search normally relies on.
-	/// By implementing a custom deletion policy, such as "a
-	/// commit is only removed once it has been stale for more
-	/// than X minutes", you can give your readers time to
-	/// refresh to the new commit before <see cref="IndexWriter" />
-	/// removes the old commits.  Note that doing so will
-	/// increase the storage requirements of the index.  See <a
-	/// target="top"
-	/// href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
-	/// for details.<p/>
-	/// </summary>
-	
-	public interface IndexDeletionPolicy
-	{
-		
-		/// <summary> <p/>This is called once when a writer is first
-		/// instantiated to give the policy a chance to remove old
-		/// commit points.<p/>
-		/// 
-		/// <p/>The writer locates all index commits present in the 
-		/// index directory and calls this method.  The policy may 
-		/// choose to delete some of the commit points, doing so by
-		/// calling method <see cref="IndexCommit.Delete()" /> 
-		/// of <see cref="IndexCommit" />.<p/>
-		/// 
-		/// <p/><u>Note:</u> the last CommitPoint is the most recent one,
-		/// i.e. the "front index state". Be careful not to delete it,
-		/// unless you know for sure what you are doing, and unless 
-		/// you can afford to lose the index content while doing that. 
-		/// 
-		/// </summary>
-		/// <param name="commits">List of current 
-		/// <see cref="IndexCommit">point-in-time commits</see>,
-		/// sorted by age (the 0th one is the oldest commit).
-		/// </param>
-		void  OnInit<T>(IList<T> commits) where T : IndexCommit;
+    
+    /// <summary> <p/>Expert: policy for deletion of stale <see cref="IndexCommit">index commits</see>. 
+    /// 
+    /// <p/>Implement this interface, and pass it to one
+    /// of the <see cref="IndexWriter" /> or <see cref="IndexReader" />
+    /// constructors, to customize when older
+    /// <see cref="IndexCommit">point-in-time commits</see>
+    /// are deleted from the index directory.  The default deletion policy
+    /// is <see cref="KeepOnlyLastCommitDeletionPolicy" />, which always
+    /// removes old commits as soon as a new commit is done (this
+    /// matches the behavior before 2.2).<p/>
+    /// 
+    /// <p/>One expected use case for this (and the reason why it
+    /// was first created) is to work around problems with an
+    /// index directory accessed via filesystems like NFS because
+    /// NFS does not provide the "delete on last close" semantics
+    /// that Lucene's "point in time" search normally relies on.
+    /// By implementing a custom deletion policy, such as "a
+    /// commit is only removed once it has been stale for more
+    /// than X minutes", you can give your readers time to
+    /// refresh to the new commit before <see cref="IndexWriter" />
+    /// removes the old commits.  Note that doing so will
+    /// increase the storage requirements of the index.  See <a
+    /// target="top"
+    /// href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
+    /// for details.<p/>
+    /// </summary>
+    
+    public interface IndexDeletionPolicy
+    {
+        
+        /// <summary> <p/>This is called once when a writer is first
+        /// instantiated to give the policy a chance to remove old
+        /// commit points.<p/>
+        /// 
+        /// <p/>The writer locates all index commits present in the 
+        /// index directory and calls this method.  The policy may 
+        /// choose to delete some of the commit points, doing so by
+        /// calling method <see cref="IndexCommit.Delete()" /> 
+        /// of <see cref="IndexCommit" />.<p/>
+        /// 
+        /// <p/><u>Note:</u> the last CommitPoint is the most recent one,
+        /// i.e. the "front index state". Be careful not to delete it,
+        /// unless you know for sure what you are doing, and unless 
+        /// you can afford to lose the index content while doing that. 
+        /// 
+        /// </summary>
+        /// <param name="commits">List of current 
+        /// <see cref="IndexCommit">point-in-time commits</see>,
+        /// sorted by age (the 0th one is the oldest commit).
+        /// </param>
+        void  OnInit<T>(IList<T> commits) where T : IndexCommit;
 
         /// <summary>
         /// <p>This is called each time the writer completed a commit.
@@ -94,6 +94,6 @@ namespace Lucene.Net.Index
         /// <param name="commits">
         /// List of <see cref="IndexCommit" />, sorted by age (the 0th one is the oldest commit).
         /// </param>
-		void  OnCommit<T>(IList<T> commits) where T : IndexCommit;
-	}
+        void  OnCommit<T>(IList<T> commits) where T : IndexCommit;
+    }
 }
\ No newline at end of file