You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2013/04/03 19:40:00 UTC

[17/51] [partial] Mass convert mixed tabs to spaces

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DirectoryReader.cs b/src/core/Index/DirectoryReader.cs
index 574448d..5fdd3c0 100644
--- a/src/core/Index/DirectoryReader.cs
+++ b/src/core/Index/DirectoryReader.cs
@@ -572,9 +572,9 @@ namespace Lucene.Net.Index
 
         class AnonymousFindSegmentsFile : SegmentInfos.FindSegmentsFile
         {
-        	readonly DirectoryReader enclosingInstance;
-        	readonly bool openReadOnly;
-        	readonly Directory dir;
+            readonly DirectoryReader enclosingInstance;
+            readonly bool openReadOnly;
+            readonly Directory dir;
             public AnonymousFindSegmentsFile(Directory directory, bool openReadOnly, DirectoryReader dirReader) : base(directory)
             {
                 this.dir = directory;
@@ -664,7 +664,7 @@ namespace Lucene.Net.Index
             {
                 // check cache
                 int n = subReaders.Sum(t => t.NumDocs()); // cache miss--recompute
-            	numDocs = n;
+                numDocs = n;
             }
             return numDocs;
         }
@@ -713,9 +713,9 @@ namespace Lucene.Net.Index
         protected internal override void  DoUndeleteAll()
         {
             foreach (SegmentReader t in subReaders)
-            	t.UndeleteAll();
+                t.UndeleteAll();
 
-        	hasDeletions = false;
+            hasDeletions = false;
             numDocs = - 1; // invalidate cache
         }
         
@@ -755,7 +755,7 @@ namespace Lucene.Net.Index
         public override bool HasNorms(System.String field)
         {
             EnsureOpen();
-        	return subReaders.Any(t => t.HasNorms(field));
+            return subReaders.Any(t => t.HasNorms(field));
         }
         
         public override byte[] Norms(System.String field)
@@ -926,9 +926,9 @@ namespace Lucene.Net.Index
                 try
                 {
                     foreach (SegmentReader t in subReaders)
-                    	t.Commit();
+                        t.Commit();
 
-                	// Sync all files we just wrote
+                    // Sync all files we just wrote
                     foreach(string fileName in segmentInfos.Files(internalDirectory, false))
                     {
                         if(!synced.Contains(fileName))
@@ -980,23 +980,23 @@ namespace Lucene.Net.Index
         
         internal virtual void  StartCommit()
         {
-        	rollbackHasChanges = hasChanges;
-        	foreach (SegmentReader t in subReaders)
-        	{
-        		t.StartCommit();
-        	}
+            rollbackHasChanges = hasChanges;
+            foreach (SegmentReader t in subReaders)
+            {
+                t.StartCommit();
+            }
         }
 
-    	internal virtual void  RollbackCommit()
-    	{
-    		hasChanges = rollbackHasChanges;
-    		foreach (SegmentReader t in subReaders)
-    		{
-    			t.RollbackCommit();
-    		}
-    	}
+        internal virtual void  RollbackCommit()
+        {
+            hasChanges = rollbackHasChanges;
+            foreach (SegmentReader t in subReaders)
+            {
+                t.RollbackCommit();
+            }
+        }
 
-    	public override IDictionary<string, string> CommitUserData
+        public override IDictionary<string, string> CommitUserData
         {
             get
             {
@@ -1027,19 +1027,19 @@ namespace Lucene.Net.Index
                 normsCache = null;
                 foreach (SegmentReader t in subReaders)
                 {
-					// try to close each reader, even if an exception is thrown
-                	try
-                	{
-                		t.DecRef();
-                	}
-                	catch (System.IO.IOException e)
-                	{
-                		if (ioe == null)
-                			ioe = e;
-                	}
+                    // try to close each reader, even if an exception is thrown
+                    try
+                    {
+                        t.DecRef();
+                    }
+                    catch (System.IO.IOException e)
+                    {
+                        if (ioe == null)
+                            ioe = e;
+                    }
                 }
 
-            	// NOTE: only needed in case someone had asked for
+                // NOTE: only needed in case someone had asked for
                 // FieldCache for top-level reader (which is generally
                 // not a good idea):
                 Search.FieldCache_Fields.DEFAULT.Purge(this);
@@ -1111,31 +1111,31 @@ namespace Lucene.Net.Index
             
             foreach (string fileName in files)
             {
-            	if (fileName.StartsWith(IndexFileNames.SEGMENTS) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
-            	{
+                if (fileName.StartsWith(IndexFileNames.SEGMENTS) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
+                {
                     
-            		var sis = new SegmentInfos();
-            		try
-            		{
-            			// IOException allowed to throw there, in case
-            			// segments_N is corrupt
-            			sis.Read(dir, fileName);
-            		}
-            		catch (System.IO.FileNotFoundException)
-            		{
-            			// LUCENE-948: on NFS (and maybe others), if
-            			// you have writers switching back and forth
-            			// between machines, it's very likely that the
-            			// dir listing will be stale and will claim a
-            			// file segments_X exists when in fact it
-            			// doesn't.  So, we catch this and handle it
-            			// as if the file does not exist
-            			sis = null;
-            		}
+                    var sis = new SegmentInfos();
+                    try
+                    {
+                        // IOException allowed to throw there, in case
+                        // segments_N is corrupt
+                        sis.Read(dir, fileName);
+                    }
+                    catch (System.IO.FileNotFoundException)
+                    {
+                        // LUCENE-948: on NFS (and maybe others), if
+                        // you have writers switching back and forth
+                        // between machines, it's very likely that the
+                        // dir listing will be stale and will claim a
+                        // file segments_X exists when in fact it
+                        // doesn't.  So, we catch this and handle it
+                        // as if the file does not exist
+                        sis = null;
+                    }
                     
-            		if (sis != null)
-            			commits.Add(new ReaderCommit(sis, dir));
-            	}
+                    if (sis != null)
+                        commits.Add(new ReaderCommit(sis, dir));
+                }
             }
             
             return commits;
@@ -1144,12 +1144,12 @@ namespace Lucene.Net.Index
         private sealed class ReaderCommit:IndexCommit
         {
             private readonly String segmentsFileName;
-        	private readonly ICollection<string> files;
-        	private readonly Directory dir;
-        	private readonly long generation;
-        	private readonly long version;
-        	private readonly bool isOptimized;
-        	private readonly IDictionary<string, string> userData;
+            private readonly ICollection<string> files;
+            private readonly Directory dir;
+            private readonly long generation;
+            private readonly long version;
+            private readonly bool isOptimized;
+            private readonly IDictionary<string, string> userData;
             
             internal ReaderCommit(SegmentInfos infos, Directory dir)
             {
@@ -1230,10 +1230,10 @@ namespace Lucene.Net.Index
                 {
                     IndexReader reader = readers[i];
 
-                	TermEnum termEnum = t != null ? reader.Terms(t) : reader.Terms();
+                    TermEnum termEnum = t != null ? reader.Terms(t) : reader.Terms();
 
-                	var smi = new SegmentMergeInfo(starts[i], termEnum, reader) {ord = i};
-                	if (t == null?smi.Next():termEnum.Term != null)
+                    var smi = new SegmentMergeInfo(starts[i], termEnum, reader) {ord = i};
+                    if (t == null?smi.Next():termEnum.Term != null)
                         queue.Add(smi);
                     // initialize queue
                     else
@@ -1250,12 +1250,12 @@ namespace Lucene.Net.Index
             {
                 foreach (SegmentMergeInfo smi in matchingSegments)
                 {
-                	if (smi == null)
-                		break;
-                	if (smi.Next())
-                		queue.Add(smi);
-                	else
-                		smi.Dispose(); // done with segment
+                    if (smi == null)
+                        break;
+                    if (smi.Next())
+                        queue.Add(smi);
+                    else
+                        smi.Dispose(); // done with segment
                 }
                 
                 int numMatchingSegments = 0;
@@ -1353,13 +1353,13 @@ namespace Lucene.Net.Index
             public virtual void  Seek(TermEnum termEnum)
             {
                 Seek(termEnum.Term);
-            	var multiTermEnum = termEnum as MultiTermEnum;
-            	if (multiTermEnum != null)
-            	{
-            		tenum = multiTermEnum;
-            		if (topReader != tenum.topReader)
-            			tenum = null;
-            	}
+                var multiTermEnum = termEnum as MultiTermEnum;
+                if (multiTermEnum != null)
+                {
+                    tenum = multiTermEnum;
+                    if (topReader != tenum.topReader)
+                        tenum = null;
+                }
             }
             
             public virtual bool Next()
@@ -1469,7 +1469,7 @@ namespace Lucene.Net.Index
             private TermDocs TermDocs(int i)
             {
                 TermDocs result = readerTermDocs[i] ?? (readerTermDocs[i] = TermDocs(readers[i]));
-            	if (smi != null)
+                if (smi != null)
                 {
                     System.Diagnostics.Debug.Assert((smi.ord == i));
                     System.Diagnostics.Debug.Assert((smi.termEnum.Term.Equals(term)));
@@ -1503,8 +1503,8 @@ namespace Lucene.Net.Index
                 {
                     foreach (TermDocs t in readerTermDocs)
                     {
-                    	if (t != null)
-                    		t.Close();
+                        if (t != null)
+                            t.Close();
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocConsumer.cs b/src/core/Index/DocConsumer.cs
index 238e38c..e5ea817 100644
--- a/src/core/Index/DocConsumer.cs
+++ b/src/core/Index/DocConsumer.cs
@@ -19,13 +19,13 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocConsumer
-	{
-		public abstract DocConsumerPerThread AddThread(DocumentsWriterThreadState perThread);
-		public abstract void  Flush(System.Collections.Generic.ICollection<DocConsumerPerThread> threads, SegmentWriteState state);
-		public abstract void  CloseDocStore(SegmentWriteState state);
-		public abstract void  Abort();
-		public abstract bool FreeRAM();
-	}
+    
+    abstract class DocConsumer
+    {
+        public abstract DocConsumerPerThread AddThread(DocumentsWriterThreadState perThread);
+        public abstract void  Flush(System.Collections.Generic.ICollection<DocConsumerPerThread> threads, SegmentWriteState state);
+        public abstract void  CloseDocStore(SegmentWriteState state);
+        public abstract void  Abort();
+        public abstract bool FreeRAM();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocConsumerPerThread.cs b/src/core/Index/DocConsumerPerThread.cs
index 7c7ed02..d92457d 100644
--- a/src/core/Index/DocConsumerPerThread.cs
+++ b/src/core/Index/DocConsumerPerThread.cs
@@ -19,19 +19,19 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocConsumerPerThread
-	{
-		
-		/// <summary>Process the document. If there is
-		/// something for this document to be done in docID order,
-		/// you should encapsulate that as a
-		/// DocumentsWriter.DocWriter and return it.
-		/// DocumentsWriter then calls finish() on this object
-		/// when it's its turn. 
-		/// </summary>
-		public abstract DocumentsWriter.DocWriter ProcessDocument();
-		
-		public abstract void  Abort();
-	}
+    
+    abstract class DocConsumerPerThread
+    {
+        
+        /// <summary>Process the document. If there is
+        /// something for this document to be done in docID order,
+        /// you should encapsulate that as a
+        /// DocumentsWriter.DocWriter and return it.
+        /// DocumentsWriter then calls finish() on this object
+        /// when it's its turn. 
+        /// </summary>
+        public abstract DocumentsWriter.DocWriter ProcessDocument();
+        
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumer.cs b/src/core/Index/DocFieldConsumer.cs
index 7fc59da..ef4abaf 100644
--- a/src/core/Index/DocFieldConsumer.cs
+++ b/src/core/Index/DocFieldConsumer.cs
@@ -20,37 +20,37 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumer
-	{
-		
-		internal FieldInfos fieldInfos;
-		
-		/// <summary>Called when DocumentsWriter decides to create a new
-		/// segment 
-		/// </summary>
+    
+    abstract class DocFieldConsumer
+    {
+        
+        internal FieldInfos fieldInfos;
+        
+        /// <summary>Called when DocumentsWriter decides to create a new
+        /// segment 
+        /// </summary>
         public abstract void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state);
-		
-		/// <summary>Called when DocumentsWriter decides to close the doc
-		/// stores 
-		/// </summary>
-		public abstract void  CloseDocStore(SegmentWriteState state);
-		
-		/// <summary>Called when an aborting exception is hit </summary>
-		public abstract void  Abort();
-		
-		/// <summary>Add a new thread </summary>
-		public abstract DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread);
-		
-		/// <summary>Called when DocumentsWriter is using too much RAM.
-		/// The consumer should free RAM, if possible, returning
-		/// true if any RAM was in fact freed. 
-		/// </summary>
-		public abstract bool FreeRAM();
-		
-		internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			this.fieldInfos = fieldInfos;
-		}
-	}
+        
+        /// <summary>Called when DocumentsWriter decides to close the doc
+        /// stores 
+        /// </summary>
+        public abstract void  CloseDocStore(SegmentWriteState state);
+        
+        /// <summary>Called when an aborting exception is hit </summary>
+        public abstract void  Abort();
+        
+        /// <summary>Add a new thread </summary>
+        public abstract DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread);
+        
+        /// <summary>Called when DocumentsWriter is using too much RAM.
+        /// The consumer should free RAM, if possible, returning
+        /// true if any RAM was in fact freed. 
+        /// </summary>
+        public abstract bool FreeRAM();
+        
+        internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            this.fieldInfos = fieldInfos;
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumerPerField.cs b/src/core/Index/DocFieldConsumerPerField.cs
index 27636e2..54d4743 100644
--- a/src/core/Index/DocFieldConsumerPerField.cs
+++ b/src/core/Index/DocFieldConsumerPerField.cs
@@ -20,11 +20,11 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumerPerField
-	{
-		/// <summary>Processes all occurrences of a single field </summary>
-		public abstract void  ProcessFields(IFieldable[] fields, int count);
-		public abstract void  Abort();
-	}
+    
+    abstract class DocFieldConsumerPerField
+    {
+        /// <summary>Processes all occurrences of a single field </summary>
+        public abstract void  ProcessFields(IFieldable[] fields, int count);
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumerPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumerPerThread.cs b/src/core/Index/DocFieldConsumerPerThread.cs
index 8f533ac..1b8b58f 100644
--- a/src/core/Index/DocFieldConsumerPerThread.cs
+++ b/src/core/Index/DocFieldConsumerPerThread.cs
@@ -19,12 +19,12 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	abstract class DocFieldConsumerPerThread
-	{
-		public abstract void  StartDocument();
-		public abstract DocumentsWriter.DocWriter FinishDocument();
-		public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
-		public abstract void  Abort();
-	}
+    
+    abstract class DocFieldConsumerPerThread
+    {
+        public abstract void  StartDocument();
+        public abstract DocumentsWriter.DocWriter FinishDocument();
+        public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
+        public abstract void  Abort();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumers.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumers.cs b/src/core/Index/DocFieldConsumers.cs
index 61b9b1d..11db7ec 100644
--- a/src/core/Index/DocFieldConsumers.cs
+++ b/src/core/Index/DocFieldConsumers.cs
@@ -22,200 +22,200 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is just a "splitter" class: it lets you wrap two
-	/// DocFieldConsumer instances as a single consumer. 
-	/// </summary>
-	
-	sealed class DocFieldConsumers : DocFieldConsumer
-	{
-		private void  InitBlock()
-		{
-			docFreeList = new PerDoc[1];
-		}
-		internal DocFieldConsumer one;
-		internal DocFieldConsumer two;
-		
-		public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two)
-		{
-			InitBlock();
-			this.one = one;
-			this.two = two;
-		}
-		
-		internal override void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			base.SetFieldInfos(fieldInfos);
-			one.SetFieldInfos(fieldInfos);
-			two.SetFieldInfos(fieldInfos);
-		}
+    
+    /// <summary>This is just a "splitter" class: it lets you wrap two
+    /// DocFieldConsumer instances as a single consumer. 
+    /// </summary>
+    
+    sealed class DocFieldConsumers : DocFieldConsumer
+    {
+        private void  InitBlock()
+        {
+            docFreeList = new PerDoc[1];
+        }
+        internal DocFieldConsumer one;
+        internal DocFieldConsumer two;
+        
+        public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two)
+        {
+            InitBlock();
+            this.one = one;
+            this.two = two;
+        }
+        
+        internal override void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            base.SetFieldInfos(fieldInfos);
+            one.SetFieldInfos(fieldInfos);
+            two.SetFieldInfos(fieldInfos);
+        }
 
         public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
+        {
 
             var oneThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			var twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			
-			foreach(var entry in threadsAndFields)
-			{
-				DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
+            var twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
+            
+            foreach(var entry in threadsAndFields)
+            {
+                DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
                 ICollection<DocFieldConsumerPerField> fields = entry.Value;
 
                 IEnumerator<DocFieldConsumerPerField> fieldsIt = fields.GetEnumerator();
                 ICollection<DocFieldConsumerPerField> oneFields = new HashSet<DocFieldConsumerPerField>();
                 ICollection<DocFieldConsumerPerField> twoFields = new HashSet<DocFieldConsumerPerField>();
-				while (fieldsIt.MoveNext())
-				{
-					DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
-					oneFields.Add(perField.one);
-					twoFields.Add(perField.two);
-				}
-				
-				oneThreadsAndFields[perThread.one] = oneFields;
-				twoThreadsAndFields[perThread.two] = twoFields;
-			}
-			
-			
-			one.Flush(oneThreadsAndFields, state);
-			two.Flush(twoThreadsAndFields, state);
-		}
+                while (fieldsIt.MoveNext())
+                {
+                    DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
+                    oneFields.Add(perField.one);
+                    twoFields.Add(perField.two);
+                }
+                
+                oneThreadsAndFields[perThread.one] = oneFields;
+                twoThreadsAndFields[perThread.two] = twoFields;
+            }
+            
+            
+            one.Flush(oneThreadsAndFields, state);
+            two.Flush(twoThreadsAndFields, state);
+        }
 
-	    public override void  CloseDocStore(SegmentWriteState state)
-		{
-			try
-			{
-				one.CloseDocStore(state);
-			}
-			finally
-			{
-				two.CloseDocStore(state);
-			}
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-		
-		public override bool FreeRAM()
-		{
-			bool any = one.FreeRAM();
-			any |= two.FreeRAM();
-			return any;
-		}
-		
-		public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
-		{
-			return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.AddThread(docFieldProcessorPerThread), two.AddThread(docFieldProcessorPerThread));
-		}
-		
-		internal PerDoc[] docFreeList;
-		internal int freeCount;
-		internal int allocCount;
-		
-		internal PerDoc GetPerDoc()
-		{
-			lock (this)
-			{
-				if (freeCount == 0)
-				{
-					allocCount++;
-					if (allocCount > docFreeList.Length)
-					{
-						// Grow our free list up front to make sure we have
-						// enough space to recycle all outstanding PerDoc
-						// instances
-						System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
-						docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
-					}
-					return new PerDoc(this);
-				}
-				else
-					return docFreeList[--freeCount];
-			}
-		}
-		
-		internal void  FreePerDoc(PerDoc perDoc)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
-				docFreeList[freeCount++] = perDoc;
-			}
-		}
-		
-		internal class PerDoc:DocumentsWriter.DocWriter
-		{
-			public PerDoc(DocFieldConsumers enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(DocFieldConsumers enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private DocFieldConsumers enclosingInstance;
-			public DocFieldConsumers Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			internal DocumentsWriter.DocWriter one;
-			internal DocumentsWriter.DocWriter two;
-			
-			public override long SizeInBytes()
-			{
-				return one.SizeInBytes() + two.SizeInBytes();
-			}
-			
-			public override void  Finish()
-			{
-				try
-				{
-					try
-					{
-						one.Finish();
-					}
-					finally
-					{
-						two.Finish();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-			
-			public override void  Abort()
-			{
-				try
-				{
-					try
-					{
-						one.Abort();
-					}
-					finally
-					{
-						two.Abort();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-		}
-	}
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            try
+            {
+                one.CloseDocStore(state);
+            }
+            finally
+            {
+                two.CloseDocStore(state);
+            }
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+        
+        public override bool FreeRAM()
+        {
+            bool any = one.FreeRAM();
+            any |= two.FreeRAM();
+            return any;
+        }
+        
+        public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+        {
+            return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.AddThread(docFieldProcessorPerThread), two.AddThread(docFieldProcessorPerThread));
+        }
+        
+        internal PerDoc[] docFreeList;
+        internal int freeCount;
+        internal int allocCount;
+        
+        internal PerDoc GetPerDoc()
+        {
+            lock (this)
+            {
+                if (freeCount == 0)
+                {
+                    allocCount++;
+                    if (allocCount > docFreeList.Length)
+                    {
+                        // Grow our free list up front to make sure we have
+                        // enough space to recycle all outstanding PerDoc
+                        // instances
+                        System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                        docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                    }
+                    return new PerDoc(this);
+                }
+                else
+                    return docFreeList[--freeCount];
+            }
+        }
+        
+        internal void  FreePerDoc(PerDoc perDoc)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                docFreeList[freeCount++] = perDoc;
+            }
+        }
+        
+        internal class PerDoc:DocumentsWriter.DocWriter
+        {
+            public PerDoc(DocFieldConsumers enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(DocFieldConsumers enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private DocFieldConsumers enclosingInstance;
+            public DocFieldConsumers Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            internal DocumentsWriter.DocWriter one;
+            internal DocumentsWriter.DocWriter two;
+            
+            public override long SizeInBytes()
+            {
+                return one.SizeInBytes() + two.SizeInBytes();
+            }
+            
+            public override void  Finish()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Finish();
+                    }
+                    finally
+                    {
+                        two.Finish();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+            
+            public override void  Abort()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Abort();
+                    }
+                    finally
+                    {
+                        two.Abort();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumersPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumersPerField.cs b/src/core/Index/DocFieldConsumersPerField.cs
index 71e96e0..e8ae3ea 100644
--- a/src/core/Index/DocFieldConsumersPerField.cs
+++ b/src/core/Index/DocFieldConsumersPerField.cs
@@ -20,37 +20,37 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class DocFieldConsumersPerField:DocFieldConsumerPerField
-	{
-		
-		internal DocFieldConsumerPerField one;
-		internal DocFieldConsumerPerField two;
-		internal DocFieldConsumersPerThread perThread;
-		
-		public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two)
-		{
-			this.perThread = perThread;
-			this.one = one;
-			this.two = two;
-		}
-		
-		public override void  ProcessFields(IFieldable[] fields, int count)
-		{
-			one.ProcessFields(fields, count);
-			two.ProcessFields(fields, count);
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-	}
+    
+    sealed class DocFieldConsumersPerField:DocFieldConsumerPerField
+    {
+        
+        internal DocFieldConsumerPerField one;
+        internal DocFieldConsumerPerField two;
+        internal DocFieldConsumersPerThread perThread;
+        
+        public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two)
+        {
+            this.perThread = perThread;
+            this.one = one;
+            this.two = two;
+        }
+        
+        public override void  ProcessFields(IFieldable[] fields, int count)
+        {
+            one.ProcessFields(fields, count);
+            two.ProcessFields(fields, count);
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldConsumersPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldConsumersPerThread.cs b/src/core/Index/DocFieldConsumersPerThread.cs
index 7098966..eea1378 100644
--- a/src/core/Index/DocFieldConsumersPerThread.cs
+++ b/src/core/Index/DocFieldConsumersPerThread.cs
@@ -19,64 +19,64 @@ using System;
 
 namespace Lucene.Net.Index
 {
-	
-	sealed class DocFieldConsumersPerThread:DocFieldConsumerPerThread
-	{
-		
-		internal DocFieldConsumerPerThread one;
-		internal DocFieldConsumerPerThread two;
-		internal DocFieldConsumers parent;
-		internal DocumentsWriter.DocState docState;
-		
-		public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two)
-		{
-			this.parent = parent;
-			this.one = one;
-			this.two = two;
-			docState = docFieldProcessorPerThread.docState;
-		}
-		
-		public override void  StartDocument()
-		{
-			one.StartDocument();
-			two.StartDocument();
-		}
-		
-		public override void  Abort()
-		{
-			try
-			{
-				one.Abort();
-			}
-			finally
-			{
-				two.Abort();
-			}
-		}
-		
-		public override DocumentsWriter.DocWriter FinishDocument()
-		{
-			DocumentsWriter.DocWriter oneDoc = one.FinishDocument();
-			DocumentsWriter.DocWriter twoDoc = two.FinishDocument();
-			if (oneDoc == null)
-				return twoDoc;
-			else if (twoDoc == null)
-				return oneDoc;
-			else
-			{
-				DocFieldConsumers.PerDoc both = parent.GetPerDoc();
-				both.docID = docState.docID;
-				System.Diagnostics.Debug.Assert(oneDoc.docID == docState.docID);
-				System.Diagnostics.Debug.Assert(twoDoc.docID == docState.docID);
-				both.one = oneDoc;
-				both.two = twoDoc;
-				return both;
-			}
-		}
-		
-		public override DocFieldConsumerPerField AddField(FieldInfo fi)
-		{
-			return new DocFieldConsumersPerField(this, one.AddField(fi), two.AddField(fi));
-		}
-	}
+    
+    sealed class DocFieldConsumersPerThread:DocFieldConsumerPerThread
+    {
+        
+        internal DocFieldConsumerPerThread one;
+        internal DocFieldConsumerPerThread two;
+        internal DocFieldConsumers parent;
+        internal DocumentsWriter.DocState docState;
+        
+        public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two)
+        {
+            this.parent = parent;
+            this.one = one;
+            this.two = two;
+            docState = docFieldProcessorPerThread.docState;
+        }
+        
+        public override void  StartDocument()
+        {
+            one.StartDocument();
+            two.StartDocument();
+        }
+        
+        public override void  Abort()
+        {
+            try
+            {
+                one.Abort();
+            }
+            finally
+            {
+                two.Abort();
+            }
+        }
+        
+        public override DocumentsWriter.DocWriter FinishDocument()
+        {
+            DocumentsWriter.DocWriter oneDoc = one.FinishDocument();
+            DocumentsWriter.DocWriter twoDoc = two.FinishDocument();
+            if (oneDoc == null)
+                return twoDoc;
+            else if (twoDoc == null)
+                return oneDoc;
+            else
+            {
+                DocFieldConsumers.PerDoc both = parent.GetPerDoc();
+                both.docID = docState.docID;
+                System.Diagnostics.Debug.Assert(oneDoc.docID == docState.docID);
+                System.Diagnostics.Debug.Assert(twoDoc.docID == docState.docID);
+                both.one = oneDoc;
+                both.two = twoDoc;
+                return both;
+            }
+        }
+        
+        public override DocFieldConsumerPerField AddField(FieldInfo fi)
+        {
+            return new DocFieldConsumersPerField(this, one.AddField(fi), two.AddField(fi));
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessor.cs b/src/core/Index/DocFieldProcessor.cs
index 4289118..0fce156 100644
--- a/src/core/Index/DocFieldProcessor.cs
+++ b/src/core/Index/DocFieldProcessor.cs
@@ -22,71 +22,71 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> This is a DocConsumer that gathers all fields under the
-	/// same name, and calls per-field consumers to process field
-	/// by field.  This class doesn't doesn't do any "real" work
-	/// of its own: it just forwards the fields to a
-	/// DocFieldConsumer.
-	/// </summary>
-	
-	sealed class DocFieldProcessor : DocConsumer
-	{
-		
-		internal DocumentsWriter docWriter;
-		internal FieldInfos fieldInfos = new FieldInfos();
-		internal DocFieldConsumer consumer;
-		internal StoredFieldsWriter fieldsWriter;
-		
-		public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer)
-		{
-			this.docWriter = docWriter;
-			this.consumer = consumer;
-			consumer.SetFieldInfos(fieldInfos);
-			fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
-		}
-		
-		public override void  CloseDocStore(SegmentWriteState state)
-		{
-			consumer.CloseDocStore(state);
-			fieldsWriter.CloseDocStore(state);
-		}
-		
-		public override void Flush(ICollection<DocConsumerPerThread> threads, SegmentWriteState state)
-		{
-			var childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
-			foreach(DocConsumerPerThread thread in threads)
-			{
+    
+    /// <summary> This is a DocConsumer that gathers all fields under the
+    /// same name, and calls per-field consumers to process field
+    /// by field.  This class doesn't doesn't do any "real" work
+    /// of its own: it just forwards the fields to a
+    /// DocFieldConsumer.
+    /// </summary>
+    
+    sealed class DocFieldProcessor : DocConsumer
+    {
+        
+        internal DocumentsWriter docWriter;
+        internal FieldInfos fieldInfos = new FieldInfos();
+        internal DocFieldConsumer consumer;
+        internal StoredFieldsWriter fieldsWriter;
+        
+        public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer)
+        {
+            this.docWriter = docWriter;
+            this.consumer = consumer;
+            consumer.SetFieldInfos(fieldInfos);
+            fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
+        }
+        
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            consumer.CloseDocStore(state);
+            fieldsWriter.CloseDocStore(state);
+        }
+        
+        public override void Flush(ICollection<DocConsumerPerThread> threads, SegmentWriteState state)
+        {
+            var childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
+            foreach(DocConsumerPerThread thread in threads)
+            {
                 DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread)thread;
-				childThreadsAndFields[perThread.consumer] = perThread.Fields();
-				perThread.TrimFields(state);
-			}
-			fieldsWriter.Flush(state);
-			consumer.Flush(childThreadsAndFields, state);
-			
-			// Important to save after asking consumer to flush so
-			// consumer can alter the FieldInfo* if necessary.  EG,
-			// FreqProxTermsWriter does this with
-			// FieldInfo.storePayload.
-			System.String fileName = state.SegmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
-			fieldInfos.Write(state.directory, fileName);
+                childThreadsAndFields[perThread.consumer] = perThread.Fields();
+                perThread.TrimFields(state);
+            }
+            fieldsWriter.Flush(state);
+            consumer.Flush(childThreadsAndFields, state);
+            
+            // Important to save after asking consumer to flush so
+            // consumer can alter the FieldInfo* if necessary.  EG,
+            // FreqProxTermsWriter does this with
+            // FieldInfo.storePayload.
+            System.String fileName = state.SegmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
+            fieldInfos.Write(state.directory, fileName);
             state.flushedFiles.Add(fileName);
-		}
-		
-		public override void  Abort()
-		{
-			fieldsWriter.Abort();
-			consumer.Abort();
-		}
-		
-		public override bool FreeRAM()
-		{
-			return consumer.FreeRAM();
-		}
-		
-		public override DocConsumerPerThread AddThread(DocumentsWriterThreadState threadState)
-		{
-			return new DocFieldProcessorPerThread(threadState, this);
-		}
-	}
+        }
+        
+        public override void  Abort()
+        {
+            fieldsWriter.Abort();
+            consumer.Abort();
+        }
+        
+        public override bool FreeRAM()
+        {
+            return consumer.FreeRAM();
+        }
+        
+        public override DocConsumerPerThread AddThread(DocumentsWriterThreadState threadState)
+        {
+            return new DocFieldProcessorPerThread(threadState, this);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessorPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessorPerField.cs b/src/core/Index/DocFieldProcessorPerField.cs
index 1078988..86a03e7 100644
--- a/src/core/Index/DocFieldProcessorPerField.cs
+++ b/src/core/Index/DocFieldProcessorPerField.cs
@@ -20,30 +20,30 @@ using Lucene.Net.Documents;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Holds all per thread, per field state.</summary>
-	
-	sealed class DocFieldProcessorPerField
-	{
-		
-		internal DocFieldConsumerPerField consumer;
-		internal FieldInfo fieldInfo;
-		
-		internal DocFieldProcessorPerField next;
-		internal int lastGen = - 1;
-		
-		internal int fieldCount;
-		internal IFieldable[] fields = new IFieldable[1];
-		
-		public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
-		{
-			this.consumer = perThread.consumer.AddField(fieldInfo);
-			this.fieldInfo = fieldInfo;
-		}
-		
-		public void  Abort()
-		{
-			consumer.Abort();
-		}
-	}
+    
+    /// <summary> Holds all per thread, per field state.</summary>
+    
+    sealed class DocFieldProcessorPerField
+    {
+        
+        internal DocFieldConsumerPerField consumer;
+        internal FieldInfo fieldInfo;
+        
+        internal DocFieldProcessorPerField next;
+        internal int lastGen = - 1;
+        
+        internal int fieldCount;
+        internal IFieldable[] fields = new IFieldable[1];
+        
+        public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
+        {
+            this.consumer = perThread.consumer.AddField(fieldInfo);
+            this.fieldInfo = fieldInfo;
+        }
+        
+        public void  Abort()
+        {
+            consumer.Abort();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocFieldProcessorPerThread.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessorPerThread.cs b/src/core/Index/DocFieldProcessorPerThread.cs
index d108116..45eaff3 100644
--- a/src/core/Index/DocFieldProcessorPerThread.cs
+++ b/src/core/Index/DocFieldProcessorPerThread.cs
@@ -23,186 +23,186 @@ using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary> Gathers all Fieldables for a document under the same
-	/// name, updates FieldInfos, and calls per-field consumers
-	/// to process field by field.
-	/// 
-	/// Currently, only a single thread visits the fields,
-	/// sequentially, for processing.
-	/// </summary>
-	
-	sealed class DocFieldProcessorPerThread:DocConsumerPerThread
-	{
-		private void  InitBlock()
-		{
-			docFreeList = new PerDoc[1];
-		}
-		
-		internal float docBoost;
-		internal int fieldGen;
-		internal DocFieldProcessor docFieldProcessor;
-		internal FieldInfos fieldInfos;
-		internal DocFieldConsumerPerThread consumer;
-		
-		// Holds all fields seen in current doc
-		internal DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
-		internal int fieldCount;
-		
-		// Hash table for all fields ever seen
-		internal DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
-		internal int hashMask = 1;
-		internal int totalFieldCount;
-		
-		internal StoredFieldsWriterPerThread fieldsWriter;
-		
-		internal DocumentsWriter.DocState docState;
-		
-		public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor)
-		{
-			InitBlock();
-			this.docState = threadState.docState;
-			this.docFieldProcessor = docFieldProcessor;
-			this.fieldInfos = docFieldProcessor.fieldInfos;
-			this.consumer = docFieldProcessor.consumer.AddThread(this);
-			fieldsWriter = docFieldProcessor.fieldsWriter.AddThread(docState);
-		}
-		
-		public override void  Abort()
-		{
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField field = fieldHash[i];
-				while (field != null)
-				{
-					DocFieldProcessorPerField next = field.next;
-					field.Abort();
-					field = next;
-				}
-			}
-			fieldsWriter.Abort();
-			consumer.Abort();
-		}
-		
-		public System.Collections.Generic.ICollection<DocFieldConsumerPerField> Fields()
-		{
-		    System.Collections.Generic.ICollection<DocFieldConsumerPerField> fields =
-		        new System.Collections.Generic.HashSet<DocFieldConsumerPerField>();
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField field = fieldHash[i];
-				while (field != null)
-				{
-					fields.Add(field.consumer);
-					field = field.next;
-				}
-			}
-			System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
-			return fields;
-		}
-		
-		/// <summary>If there are fields we've seen but did not see again
-		/// in the last run, then free them up. 
-		/// </summary>
-		
-		internal void  TrimFields(SegmentWriteState state)
-		{
-			
-			for (int i = 0; i < fieldHash.Length; i++)
-			{
-				DocFieldProcessorPerField perField = fieldHash[i];
-				DocFieldProcessorPerField lastPerField = null;
-				
-				while (perField != null)
-				{
-					
-					if (perField.lastGen == - 1)
-					{
-						
-						// This field was not seen since the previous
-						// flush, so, free up its resources now
-						
-						// Unhash
-						if (lastPerField == null)
-							fieldHash[i] = perField.next;
-						else
-							lastPerField.next = perField.next;
-						
-						if (state.docWriter.infoStream != null)
-							state.docWriter.infoStream.WriteLine("  purge field=" + perField.fieldInfo.name);
-						
-						totalFieldCount--;
-					}
-					else
-					{
-						// Reset
-						perField.lastGen = - 1;
-						lastPerField = perField;
-					}
-					
-					perField = perField.next;
-				}
-			}
-		}
-		
-		private void  Rehash()
-		{
-			int newHashSize = (fieldHash.Length * 2);
-			System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
-			
-			DocFieldProcessorPerField[] newHashArray = new DocFieldProcessorPerField[newHashSize];
-			
-			// Rehash
-			int newHashMask = newHashSize - 1;
-			for (int j = 0; j < fieldHash.Length; j++)
-			{
-				DocFieldProcessorPerField fp0 = fieldHash[j];
-				while (fp0 != null)
-				{
-					int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
-					DocFieldProcessorPerField nextFP0 = fp0.next;
-					fp0.next = newHashArray[hashPos2];
-					newHashArray[hashPos2] = fp0;
-					fp0 = nextFP0;
-				}
-			}
-			
-			fieldHash = newHashArray;
-			hashMask = newHashMask;
-		}
-		
-		public override DocumentsWriter.DocWriter ProcessDocument()
-		{
-			
-			consumer.StartDocument();
-			fieldsWriter.StartDocument();
-			
-			Document doc = docState.doc;
-			
-			System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
-			
-			fieldCount = 0;
-			
-			int thisFieldGen = fieldGen++;
-			
-			System.Collections.Generic.IList<IFieldable> docFields = doc.GetFields();
-			int numDocFields = docFields.Count;
-			
-			// Absorb any new fields first seen in this document.
-			// Also absorb any changes to fields we had already
-			// seen before (eg suddenly turning on norms or
-			// vectors, etc.):
-			
-			for (int i = 0; i < numDocFields; i++)
-			{
-				IFieldable field = docFields[i];
-				System.String fieldName = field.Name;
-				
-				// Make sure we have a PerField allocated
-				int hashPos = fieldName.GetHashCode() & hashMask;
-				DocFieldProcessorPerField fp = fieldHash[hashPos];
-				while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
-					fp = fp.next;
+    
+    /// <summary> Gathers all Fieldables for a document under the same
+    /// name, updates FieldInfos, and calls per-field consumers
+    /// to process field by field.
+    /// 
+    /// Currently, only a single thread visits the fields,
+    /// sequentially, for processing.
+    /// </summary>
+    
+    sealed class DocFieldProcessorPerThread:DocConsumerPerThread
+    {
+        private void  InitBlock()
+        {
+            docFreeList = new PerDoc[1];
+        }
+        
+        internal float docBoost;
+        internal int fieldGen;
+        internal DocFieldProcessor docFieldProcessor;
+        internal FieldInfos fieldInfos;
+        internal DocFieldConsumerPerThread consumer;
+        
+        // Holds all fields seen in current doc
+        internal DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
+        internal int fieldCount;
+        
+        // Hash table for all fields ever seen
+        internal DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
+        internal int hashMask = 1;
+        internal int totalFieldCount;
+        
+        internal StoredFieldsWriterPerThread fieldsWriter;
+        
+        internal DocumentsWriter.DocState docState;
+        
+        public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor)
+        {
+            InitBlock();
+            this.docState = threadState.docState;
+            this.docFieldProcessor = docFieldProcessor;
+            this.fieldInfos = docFieldProcessor.fieldInfos;
+            this.consumer = docFieldProcessor.consumer.AddThread(this);
+            fieldsWriter = docFieldProcessor.fieldsWriter.AddThread(docState);
+        }
+        
+        public override void  Abort()
+        {
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField field = fieldHash[i];
+                while (field != null)
+                {
+                    DocFieldProcessorPerField next = field.next;
+                    field.Abort();
+                    field = next;
+                }
+            }
+            fieldsWriter.Abort();
+            consumer.Abort();
+        }
+        
+        public System.Collections.Generic.ICollection<DocFieldConsumerPerField> Fields()
+        {
+            System.Collections.Generic.ICollection<DocFieldConsumerPerField> fields =
+                new System.Collections.Generic.HashSet<DocFieldConsumerPerField>();
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField field = fieldHash[i];
+                while (field != null)
+                {
+                    fields.Add(field.consumer);
+                    field = field.next;
+                }
+            }
+            System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
+            return fields;
+        }
+        
+        /// <summary>If there are fields we've seen but did not see again
+        /// in the last run, then free them up. 
+        /// </summary>
+        
+        internal void  TrimFields(SegmentWriteState state)
+        {
+            
+            for (int i = 0; i < fieldHash.Length; i++)
+            {
+                DocFieldProcessorPerField perField = fieldHash[i];
+                DocFieldProcessorPerField lastPerField = null;
+                
+                while (perField != null)
+                {
+                    
+                    if (perField.lastGen == - 1)
+                    {
+                        
+                        // This field was not seen since the previous
+                        // flush, so, free up its resources now
+                        
+                        // Unhash
+                        if (lastPerField == null)
+                            fieldHash[i] = perField.next;
+                        else
+                            lastPerField.next = perField.next;
+                        
+                        if (state.docWriter.infoStream != null)
+                            state.docWriter.infoStream.WriteLine("  purge field=" + perField.fieldInfo.name);
+                        
+                        totalFieldCount--;
+                    }
+                    else
+                    {
+                        // Reset
+                        perField.lastGen = - 1;
+                        lastPerField = perField;
+                    }
+                    
+                    perField = perField.next;
+                }
+            }
+        }
+        
+        private void  Rehash()
+        {
+            int newHashSize = (fieldHash.Length * 2);
+            System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
+            
+            DocFieldProcessorPerField[] newHashArray = new DocFieldProcessorPerField[newHashSize];
+            
+            // Rehash
+            int newHashMask = newHashSize - 1;
+            for (int j = 0; j < fieldHash.Length; j++)
+            {
+                DocFieldProcessorPerField fp0 = fieldHash[j];
+                while (fp0 != null)
+                {
+                    int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
+                    DocFieldProcessorPerField nextFP0 = fp0.next;
+                    fp0.next = newHashArray[hashPos2];
+                    newHashArray[hashPos2] = fp0;
+                    fp0 = nextFP0;
+                }
+            }
+            
+            fieldHash = newHashArray;
+            hashMask = newHashMask;
+        }
+        
+        public override DocumentsWriter.DocWriter ProcessDocument()
+        {
+            
+            consumer.StartDocument();
+            fieldsWriter.StartDocument();
+            
+            Document doc = docState.doc;
+            
+            System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
+            
+            fieldCount = 0;
+            
+            int thisFieldGen = fieldGen++;
+            
+            System.Collections.Generic.IList<IFieldable> docFields = doc.GetFields();
+            int numDocFields = docFields.Count;
+            
+            // Absorb any new fields first seen in this document.
+            // Also absorb any changes to fields we had already
+            // seen before (eg suddenly turning on norms or
+            // vectors, etc.):
+            
+            for (int i = 0; i < numDocFields; i++)
+            {
+                IFieldable field = docFields[i];
+                System.String fieldName = field.Name;
+                
+                // Make sure we have a PerField allocated
+                int hashPos = fieldName.GetHashCode() & hashMask;
+                DocFieldProcessorPerField fp = fieldHash[hashPos];
+                while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
+                    fp = fp.next;
 
                 if (fp == null)
                 {
@@ -231,248 +231,248 @@ namespace Lucene.Net.Index
                                         field.OmitNorms, false, field.OmitTermFreqAndPositions);
                 }
 
-			    if (thisFieldGen != fp.lastGen)
-				{
-					
-					// First time we're seeing this field for this doc
-					fp.fieldCount = 0;
-					
-					if (fieldCount == fields.Length)
-					{
-						int newSize = fields.Length * 2;
-						DocFieldProcessorPerField[] newArray = new DocFieldProcessorPerField[newSize];
-						Array.Copy(fields, 0, newArray, 0, fieldCount);
-						fields = newArray;
-					}
-					
-					fields[fieldCount++] = fp;
-					fp.lastGen = thisFieldGen;
-				}
-				
-				if (fp.fieldCount == fp.fields.Length)
-				{
-					IFieldable[] newArray = new IFieldable[fp.fields.Length * 2];
-					Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
-					fp.fields = newArray;
-				}
-				
-				fp.fields[fp.fieldCount++] = field;
-				if (field.IsStored)
-				{
-					fieldsWriter.AddField(field, fp.fieldInfo);
-				}
-			}
-			
-			// If we are writing vectors then we must visit
-			// fields in sorted order so they are written in
-			// sorted order.  TODO: we actually only need to
-			// sort the subset of fields that have vectors
-			// enabled; we could save [small amount of] CPU
-			// here.
-			QuickSort(fields, 0, fieldCount - 1);
-			
-			for (int i = 0; i < fieldCount; i++)
-				fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
+                if (thisFieldGen != fp.lastGen)
+                {
+                    
+                    // First time we're seeing this field for this doc
+                    fp.fieldCount = 0;
+                    
+                    if (fieldCount == fields.Length)
+                    {
+                        int newSize = fields.Length * 2;
+                        DocFieldProcessorPerField[] newArray = new DocFieldProcessorPerField[newSize];
+                        Array.Copy(fields, 0, newArray, 0, fieldCount);
+                        fields = newArray;
+                    }
+                    
+                    fields[fieldCount++] = fp;
+                    fp.lastGen = thisFieldGen;
+                }
+                
+                if (fp.fieldCount == fp.fields.Length)
+                {
+                    IFieldable[] newArray = new IFieldable[fp.fields.Length * 2];
+                    Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
+                    fp.fields = newArray;
+                }
+                
+                fp.fields[fp.fieldCount++] = field;
+                if (field.IsStored)
+                {
+                    fieldsWriter.AddField(field, fp.fieldInfo);
+                }
+            }
+            
+            // If we are writing vectors then we must visit
+            // fields in sorted order so they are written in
+            // sorted order.  TODO: we actually only need to
+            // sort the subset of fields that have vectors
+            // enabled; we could save [small amount of] CPU
+            // here.
+            QuickSort(fields, 0, fieldCount - 1);
+            
+            for (int i = 0; i < fieldCount; i++)
+                fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
 
             if (docState.maxTermPrefix != null && docState.infoStream != null)
             {
                 docState.infoStream.WriteLine("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped.  Please correct the analyzer to not produce such terms.  The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
                 docState.maxTermPrefix = null;
             }
-			
-			DocumentsWriter.DocWriter one = fieldsWriter.FinishDocument();
-			DocumentsWriter.DocWriter two = consumer.FinishDocument();
-			if (one == null)
-			{
-				return two;
-			}
-			else if (two == null)
-			{
-				return one;
-			}
-			else
-			{
-				PerDoc both = GetPerDoc();
-				both.docID = docState.docID;
-				System.Diagnostics.Debug.Assert(one.docID == docState.docID);
-				System.Diagnostics.Debug.Assert(two.docID == docState.docID);
-				both.one = one;
-				both.two = two;
-				return both;
-			}
-		}
-		
-		internal void  QuickSort(DocFieldProcessorPerField[] array, int lo, int hi)
-		{
-			if (lo >= hi)
-				return ;
-			else if (hi == 1 + lo)
-			{
-				if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
-				{
-					DocFieldProcessorPerField tmp = array[lo];
-					array[lo] = array[hi];
-					array[hi] = tmp;
-				}
-				return ;
-			}
-			
-			int mid = Number.URShift((lo + hi), 1);
-			
-			if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
-			{
-				DocFieldProcessorPerField tmp = array[lo];
-				array[lo] = array[mid];
-				array[mid] = tmp;
-			}
-			
-			if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
-			{
-				DocFieldProcessorPerField tmp = array[mid];
-				array[mid] = array[hi];
-				array[hi] = tmp;
-				
-				if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
-				{
-					DocFieldProcessorPerField tmp2 = array[lo];
-					array[lo] = array[mid];
-					array[mid] = tmp2;
-				}
-			}
-			
-			int left = lo + 1;
-			int right = hi - 1;
-			
-			if (left >= right)
-				return ;
-			
-			DocFieldProcessorPerField partition = array[mid];
-			
-			for (; ; )
-			{
-				while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
-					--right;
-				
-				while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
-					++left;
-				
-				if (left < right)
-				{
-					DocFieldProcessorPerField tmp = array[left];
-					array[left] = array[right];
-					array[right] = tmp;
-					--right;
-				}
-				else
-				{
-					break;
-				}
-			}
-			
-			QuickSort(array, lo, left);
-			QuickSort(array, left + 1, hi);
-		}
-		
-		internal PerDoc[] docFreeList;
-		internal int freeCount;
-		internal int allocCount;
-		
-		internal PerDoc GetPerDoc()
-		{
-			lock (this)
-			{
-				if (freeCount == 0)
-				{
-					allocCount++;
-					if (allocCount > docFreeList.Length)
-					{
-						// Grow our free list up front to make sure we have
-						// enough space to recycle all outstanding PerDoc
-						// instances
-						System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
-						docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
-					}
-					return new PerDoc(this);
-				}
-				else
-					return docFreeList[--freeCount];
-			}
-		}
-		
-		internal void  FreePerDoc(PerDoc perDoc)
-		{
-			lock (this)
-			{
-				System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
-				docFreeList[freeCount++] = perDoc;
-			}
-		}
-		
-		internal class PerDoc:DocumentsWriter.DocWriter
-		{
-			public PerDoc(DocFieldProcessorPerThread enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(DocFieldProcessorPerThread enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private DocFieldProcessorPerThread enclosingInstance;
-			public DocFieldProcessorPerThread Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			
-			internal DocumentsWriter.DocWriter one;
-			internal DocumentsWriter.DocWriter two;
-			
-			public override long SizeInBytes()
-			{
-				return one.SizeInBytes() + two.SizeInBytes();
-			}
-			
-			public override void  Finish()
-			{
-				try
-				{
-					try
-					{
-						one.Finish();
-					}
-					finally
-					{
-						two.Finish();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-			
-			public override void  Abort()
-			{
-				try
-				{
-					try
-					{
-						one.Abort();
-					}
-					finally
-					{
-						two.Abort();
-					}
-				}
-				finally
-				{
-					Enclosing_Instance.FreePerDoc(this);
-				}
-			}
-		}
-	}
+            
+            DocumentsWriter.DocWriter one = fieldsWriter.FinishDocument();
+            DocumentsWriter.DocWriter two = consumer.FinishDocument();
+            if (one == null)
+            {
+                return two;
+            }
+            else if (two == null)
+            {
+                return one;
+            }
+            else
+            {
+                PerDoc both = GetPerDoc();
+                both.docID = docState.docID;
+                System.Diagnostics.Debug.Assert(one.docID == docState.docID);
+                System.Diagnostics.Debug.Assert(two.docID == docState.docID);
+                both.one = one;
+                both.two = two;
+                return both;
+            }
+        }
+        
+        internal void  QuickSort(DocFieldProcessorPerField[] array, int lo, int hi)
+        {
+            if (lo >= hi)
+                return ;
+            else if (hi == 1 + lo)
+            {
+                if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+                {
+                    DocFieldProcessorPerField tmp = array[lo];
+                    array[lo] = array[hi];
+                    array[hi] = tmp;
+                }
+                return ;
+            }
+            
+            int mid = Number.URShift((lo + hi), 1);
+            
+            if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+            {
+                DocFieldProcessorPerField tmp = array[lo];
+                array[lo] = array[mid];
+                array[mid] = tmp;
+            }
+            
+            if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+            {
+                DocFieldProcessorPerField tmp = array[mid];
+                array[mid] = array[hi];
+                array[hi] = tmp;
+                
+                if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+                {
+                    DocFieldProcessorPerField tmp2 = array[lo];
+                    array[lo] = array[mid];
+                    array[mid] = tmp2;
+                }
+            }
+            
+            int left = lo + 1;
+            int right = hi - 1;
+            
+            if (left >= right)
+                return ;
+            
+            DocFieldProcessorPerField partition = array[mid];
+            
+            for (; ; )
+            {
+                while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
+                    --right;
+                
+                while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
+                    ++left;
+                
+                if (left < right)
+                {
+                    DocFieldProcessorPerField tmp = array[left];
+                    array[left] = array[right];
+                    array[right] = tmp;
+                    --right;
+                }
+                else
+                {
+                    break;
+                }
+            }
+            
+            QuickSort(array, lo, left);
+            QuickSort(array, left + 1, hi);
+        }
+        
+        internal PerDoc[] docFreeList;
+        internal int freeCount;
+        internal int allocCount;
+        
+        internal PerDoc GetPerDoc()
+        {
+            lock (this)
+            {
+                if (freeCount == 0)
+                {
+                    allocCount++;
+                    if (allocCount > docFreeList.Length)
+                    {
+                        // Grow our free list up front to make sure we have
+                        // enough space to recycle all outstanding PerDoc
+                        // instances
+                        System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                        docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                    }
+                    return new PerDoc(this);
+                }
+                else
+                    return docFreeList[--freeCount];
+            }
+        }
+        
+        internal void  FreePerDoc(PerDoc perDoc)
+        {
+            lock (this)
+            {
+                System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                docFreeList[freeCount++] = perDoc;
+            }
+        }
+        
+        internal class PerDoc:DocumentsWriter.DocWriter
+        {
+            public PerDoc(DocFieldProcessorPerThread enclosingInstance)
+            {
+                InitBlock(enclosingInstance);
+            }
+            private void  InitBlock(DocFieldProcessorPerThread enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private DocFieldProcessorPerThread enclosingInstance;
+            public DocFieldProcessorPerThread Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+                
+            }
+            
+            internal DocumentsWriter.DocWriter one;
+            internal DocumentsWriter.DocWriter two;
+            
+            public override long SizeInBytes()
+            {
+                return one.SizeInBytes() + two.SizeInBytes();
+            }
+            
+            public override void  Finish()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Finish();
+                    }
+                    finally
+                    {
+                        two.Finish();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+            
+            public override void  Abort()
+            {
+                try
+                {
+                    try
+                    {
+                        one.Abort();
+                    }
+                    finally
+                    {
+                        two.Abort();
+                    }
+                }
+                finally
+                {
+                    Enclosing_Instance.FreePerDoc(this);
+                }
+            }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/62f018ab/src/core/Index/DocInverter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocInverter.cs b/src/core/Index/DocInverter.cs
index 4153465..9a058aa 100644
--- a/src/core/Index/DocInverter.cs
+++ b/src/core/Index/DocInverter.cs
@@ -20,78 +20,78 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Index
 {
-	
-	/// <summary>This is a DocFieldConsumer that inverts each field,
-	/// separately, from a Document, and accepts a
-	/// InvertedTermsConsumer to process those terms. 
-	/// </summary>
-	
-	sealed class DocInverter : DocFieldConsumer
-	{
-		
-		internal InvertedDocConsumer consumer;
-		internal InvertedDocEndConsumer endConsumer;
-		
-		public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer)
-		{
-			this.consumer = consumer;
-			this.endConsumer = endConsumer;
-		}
-		
-		internal override void  SetFieldInfos(FieldInfos fieldInfos)
-		{
-			base.SetFieldInfos(fieldInfos);
-			consumer.SetFieldInfos(fieldInfos);
-			endConsumer.SetFieldInfos(fieldInfos);
-		}
+    
+    /// <summary>This is a DocFieldConsumer that inverts each field,
+    /// separately, from a Document, and accepts a
+    /// InvertedTermsConsumer to process those terms. 
+    /// </summary>
+    
+    sealed class DocInverter : DocFieldConsumer
+    {
+        
+        internal InvertedDocConsumer consumer;
+        internal InvertedDocEndConsumer endConsumer;
+        
+        public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer)
+        {
+            this.consumer = consumer;
+            this.endConsumer = endConsumer;
+        }
+        
+        internal override void  SetFieldInfos(FieldInfos fieldInfos)
+        {
+            base.SetFieldInfos(fieldInfos);
+            consumer.SetFieldInfos(fieldInfos);
+            endConsumer.SetFieldInfos(fieldInfos);
+        }
 
         public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
-		{
+        {
 
             var childThreadsAndFields = new HashMap<InvertedDocConsumerPerThread, ICollection<InvertedDocConsumerPerField>>();
             var endChildThreadsAndFields = new HashMap<InvertedDocEndConsumerPerThread, ICollection<InvertedDocEndConsumerPerField>>();
 
             foreach (var entry in threadsAndFields)
-			{
-				var perThread = (DocInverterPerThread) entry.Key;
+            {
+                var perThread = (DocInverterPerThread) entry.Key;
 
-				ICollection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
-				ICollection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
-				foreach(DocFieldConsumerPerField field in entry.Value)
-				{
+                ICollection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
+                ICollection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
+                foreach(DocFieldConsumerPerField field in entry.Value)
+                {
                     var perField = (DocInverterPerField)field;
-					childFields.Add(perField.consumer);
-					endChildFields.Add(perField.endConsumer);
-				}
-				
-				childThreadsAndFields[perThread.consumer] = childFields;
-				endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
-			}
-			
-			consumer.Flush(childThreadsAndFields, state);
-			endConsumer.Flush(endChildThreadsAndFields, state);
-		}
+                    childFields.Add(perField.consumer);
+                    endChildFields.Add(perField.endConsumer);
+                }
+                
+                childThreadsAndFields[perThread.consumer] = childFields;
+                endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
+            }
+            
+            consumer.Flush(childThreadsAndFields, state);
+            endConsumer.Flush(endChildThreadsAndFields, state);
+        }
 
-	    public override void  CloseDocStore(SegmentWriteState state)
-		{
-			consumer.CloseDocStore(state);
-			endConsumer.CloseDocStore(state);
-		}
-		
-		public override void  Abort()
-		{
-			consumer.Abort();
-			endConsumer.Abort();
-		}
-		
-		public override bool FreeRAM()
-		{
-			return consumer.FreeRAM();
-		}
-		
-		public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
-		{
-			return new DocInverterPerThread(docFieldProcessorPerThread, this);
-		}
-	}
+        public override void  CloseDocStore(SegmentWriteState state)
+        {
+            consumer.CloseDocStore(state);
+            endConsumer.CloseDocStore(state);
+        }
+        
+        public override void  Abort()
+        {
+            consumer.Abort();
+            endConsumer.Abort();
+        }
+        
+        public override bool FreeRAM()
+        {
+            return consumer.FreeRAM();
+        }
+        
+        public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+        {
+            return new DocInverterPerThread(docFieldProcessorPerThread, this);
+        }
+    }
 }
\ No newline at end of file