You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2012/02/28 23:43:28 UTC

[Lucene.Net] svn commit: r1294875 [12/45] - in /incubator/lucene.net/trunk: ./ build/ build/vs2010/contrib/ build/vs2010/test/ doc/ src/ src/contrib/Analyzers/ src/contrib/Analyzers/AR/ src/contrib/Analyzers/BR/ src/contrib/Analyzers/CJK/ src/contrib/Analyzers/Cn/ ...

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttributeImpl.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttributeImpl.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttributeImpl.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenattributes/TermAttributeImpl.cs Tue Feb 28 22:43:08 2012
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using Lucene.Net.Support;
 using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 using AttributeImpl = Lucene.Net.Util.AttributeImpl;
 
@@ -70,7 +70,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 		{
 			int length = buffer.Length;
 			GrowTermBuffer(length);
-			SupportClass.TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+			TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
 			termLength = length;
 		}
 		
@@ -88,7 +88,7 @@ namespace Lucene.Net.Analysis.Tokenattri
 			System.Diagnostics.Debug.Assert(offset <= buffer.Length);
 			System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
 			GrowTermBuffer(length);
-			SupportClass.TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+			TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
 			termLength = length;
 		}
 		

Modified: incubator/lucene.net/trunk/src/core/Analysis/Tokenizer.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/Tokenizer.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/Tokenizer.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/Tokenizer.cs Tue Feb 28 22:43:08 2012
@@ -26,16 +26,16 @@ namespace Lucene.Net.Analysis
 	/// <p/>
 	/// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()" />
 	/// <p/>
-    /// NOTE: Subclasses overriding <see cref="TokenStream.Next(Token)" /> must call
+    /// NOTE: Subclasses overriding <see cref="TokenStream.IncrementToken()" /> must call
 	/// <see cref="AttributeSource.ClearAttributes()" /> before setting attributes.
-    /// Subclasses overriding <see cref="TokenStream.IncrementToken()" /> must call
-	/// <see cref="Token.Clear()" /> before setting Token attributes.
 	/// </summary>
 	
 	public abstract class Tokenizer:TokenStream
 	{
 		/// <summary>The text source for this Tokenizer. </summary>
 		protected internal System.IO.TextReader input;
+
+	    private bool isDisposed;
 		
 		/// <summary>Construct a tokenizer with null input. </summary>
 		protected internal Tokenizer()
@@ -70,17 +70,23 @@ namespace Lucene.Net.Analysis
 			this.input = CharReader.Get(input);
 		}
 		
-		/// <summary>By default, closes the input Reader. </summary>
-		public override void  Close()
-		{
-            if (input != null) {
-                input.Close();
-                // LUCENE-2387: don't hold onto Reader after close, so
-                // GC can reclaim
-                input = null;
+        protected override void Dispose(bool disposing)
+        {
+            if (isDisposed) return;
+
+            if (disposing)
+            {
+                if (input != null)
+                {
+                    input.Close();
+                }
             }
 
-		}
+            // LUCENE-2387: don't hold onto Reader after close, so
+            // GC can reclaim
+            input = null;
+            isDisposed = true;
+        }
   
 		/// <summary>Return the corrected offset. If <see cref="input" /> is a <see cref="CharStream" /> subclass
 		/// this method calls <see cref="CharStream.CorrectOffset" />, else returns <c>currentOff</c>.

Modified: incubator/lucene.net/trunk/src/core/Analysis/WordlistLoader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Analysis/WordlistLoader.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Analysis/WordlistLoader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Analysis/WordlistLoader.cs Tue Feb 28 22:43:08 2012
@@ -16,16 +16,12 @@
  */
 
 using System;
+using System.Collections.Generic;
 
 namespace Lucene.Net.Analysis
 {
 	
-	/// <summary> Loader for text files that represent a list of stopwords.
-	/// 
-	/// 
-	/// </summary>
-	/// <version>  $Id: WordlistLoader.java 706342 2008-10-20 17:19:29Z gsingers $
-	/// </version>
+	/// <summary> Loader for text files that represent a list of stopwords.</summary>
 	public class WordlistLoader
 	{
 		
@@ -33,15 +29,12 @@ namespace Lucene.Net.Analysis
 		/// leading and trailing whitespace). Every line of the file should contain only
 		/// one word. The words need to be in lowercase if you make use of an
 		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// 
 		/// </summary>
-		/// <param name="wordfile">File containing the wordlist
-		/// </param>
-		/// <returns> A HashSet with the file's words
-		/// </returns>
-		public static System.Collections.Hashtable GetWordSet(System.IO.FileInfo wordfile)
+		/// <param name="wordfile">File containing the wordlist</param>
+		/// <returns> A HashSet with the file's words</returns>
+		public static ISet<string> GetWordSet(System.IO.FileInfo wordfile)
 		{
-			System.Collections.Hashtable result = new System.Collections.Hashtable();
+			ISet<string> result = new HashSet<string>();
 			System.IO.StreamReader reader = null;
 			try
 			{
@@ -60,17 +53,13 @@ namespace Lucene.Net.Analysis
 		/// leading and trailing whitespace). Every line of the file should contain only
 		/// one word. The words need to be in lowercase if you make use of an
 		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// 
 		/// </summary>
-		/// <param name="wordfile">File containing the wordlist
-		/// </param>
-		/// <param name="comment">The comment string to ignore
-		/// </param>
-		/// <returns> A HashSet with the file's words
-		/// </returns>
-		public static System.Collections.Hashtable GetWordSet(System.IO.FileInfo wordfile, System.String comment)
+		/// <param name="wordfile">File containing the wordlist</param>
+		/// <param name="comment">The comment string to ignore</param>
+		/// <returns> A HashSet with the file's words</returns>
+		public static HashSet<string> GetWordSet(System.IO.FileInfo wordfile, System.String comment)
 		{
-			System.Collections.Hashtable result = new System.Collections.Hashtable();
+			HashSet<string> result = new HashSet<string>();
 			System.IO.StreamReader reader = null;
 			try
 			{
@@ -90,22 +79,19 @@ namespace Lucene.Net.Analysis
 		/// leading and trailing whitespace). Every line of the Reader should contain only
 		/// one word. The words need to be in lowercase if you make use of an
 		/// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
-		/// 
 		/// </summary>
-		/// <param name="reader">Reader containing the wordlist
-		/// </param>
-		/// <returns> A HashSet with the reader's words
-		/// </returns>
-		public static System.Collections.Hashtable GetWordSet(System.IO.TextReader reader)
+		/// <param name="reader">Reader containing the wordlist</param>
+		/// <returns>A HashSet with the reader's words</returns>
+		public static HashSet<string> GetWordSet(System.IO.TextReader reader)
 		{
-			System.Collections.Hashtable result = new System.Collections.Hashtable();
+			HashSet<string> result = new HashSet<string>();
 			System.IO.TextReader br = null;
 			try
 			{
 				System.String word = null;
 				while ((word = reader.ReadLine()) != null)
 				{
-					SupportClass.CollectionsHelper.Add(result, word.Trim());
+				    result.Add(word.Trim());
 				}
 			}
 			finally
@@ -128,9 +114,9 @@ namespace Lucene.Net.Analysis
 		/// </param>
 		/// <returns> A HashSet with the reader's words
 		/// </returns>
-        public static System.Collections.Hashtable GetWordSet(System.IO.TextReader reader, System.String comment)
+        public static HashSet<string> GetWordSet(System.IO.TextReader reader, System.String comment)
 		{
-			System.Collections.Hashtable result = new System.Collections.Hashtable();
+            HashSet<string> result = new HashSet<string>();
 			System.IO.StreamReader br = null;
 			try
 			{
@@ -139,7 +125,7 @@ namespace Lucene.Net.Analysis
 				{
 					if (word.StartsWith(comment) == false)
 					{
-						SupportClass.CollectionsHelper.Add(result, word.Trim());
+					    result.Add(word.Trim());
 					}
 				}
 			}
@@ -161,11 +147,11 @@ namespace Lucene.Net.Analysis
 		/// <returns> stem dictionary that overrules the stemming algorithm
 		/// </returns>
 		/// <throws>  IOException  </throws>
-		public static System.Collections.Hashtable GetStemDict(System.IO.FileInfo wordstemfile)
+		public static Dictionary<string, string> GetStemDict(System.IO.FileInfo wordstemfile)
 		{
 			if (wordstemfile == null)
 				throw new System.NullReferenceException("wordstemfile may not be null");
-			System.Collections.Hashtable result = new System.Collections.Hashtable();
+            Dictionary<string, string> result = new Dictionary<string, string>();
 			System.IO.StreamReader br = null;
 			System.IO.StreamReader fr = null;
 			try

Modified: incubator/lucene.net/trunk/src/core/AssemblyInfo.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/AssemblyInfo.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/AssemblyInfo.cs (original)
+++ incubator/lucene.net/trunk/src/core/AssemblyInfo.cs Tue Feb 28 22:43:08 2012
@@ -36,8 +36,14 @@ using System.Runtime.CompilerServices;
 
 [assembly: CLSCompliant(true)]
 
-[assembly: AssemblyInformationalVersionAttribute("2.9.4")]
+[assembly: AssemblyInformationalVersionAttribute("3.0.3")]
 
+// for testing
+[assembly: InternalsVisibleTo("Lucene.Net.Test, PublicKey=002400000480000094000000060200000024000052534131000400000100010075a07ce602f88e" +
+                                                         "f263c7db8cb342c58ebd49ecdcc210fac874260b0213fb929ac3dcaf4f5b39744b800f99073eca" +
+                                                         "72aebfac5f7284e1d5f2c82012a804a140f06d7d043d83e830cdb606a04da2ad5374cc92c0a495" +
+                                                         "08437802fb4f8fb80a05e59f80afb99f4ccd0dfe44065743543c4b053b669509d29d332cd32a0c" +
+                                                         "b1e97e84")]
 
 //
 // Version information for an assembly consists of the following four values:
@@ -50,7 +56,7 @@ using System.Runtime.CompilerServices;
 // You can specify all the values or you can default the Revision and Build Numbers 
 // by using the '*' as shown below:
 
-[assembly: AssemblyVersion("2.9.4.001")]
+[assembly: AssemblyVersion("3.0.3")]
 
 
 //

Modified: incubator/lucene.net/trunk/src/core/Document/AbstractField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/AbstractField.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/AbstractField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/AbstractField.cs Tue Feb 28 22:43:08 2012
@@ -24,8 +24,6 @@ using SpanQuery = Lucene.Net.Search.Span
 
 namespace Lucene.Net.Documents
 {
-	
-	
 	/// <summary> 
 	/// 
 	/// 
@@ -43,7 +41,6 @@ namespace Lucene.Net.Documents
 		protected internal bool isIndexed = true;
 		protected internal bool isTokenized = true;
 		protected internal bool isBinary = false;
-		protected internal bool isCompressed = false;
 		protected internal bool lazy = false;
 		protected internal bool omitTermFreqAndPositions = false;
 		protected internal float boost = 1.0f;
@@ -64,58 +61,11 @@ namespace Lucene.Net.Documents
 			if (name == null)
 				throw new System.NullReferenceException("name cannot be null");
 			this.name = StringHelper.Intern(name); // field names are interned
-			
-			if (store == Field.Store.YES)
-			{
-				this.isStored = true;
-				this.isCompressed = false;
-			}
-			else if (store == Field.Store.COMPRESS)
-			{
-				this.isStored = true;
-				this.isCompressed = true;
-			}
-			else if (store == Field.Store.NO)
-			{
-				this.isStored = false;
-				this.isCompressed = false;
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown store parameter " + store);
-			}
-			
-			if (index == Field.Index.NO)
-			{
-				this.isIndexed = false;
-				this.isTokenized = false;
-			}
-			else if (index == Field.Index.ANALYZED)
-			{
-				this.isIndexed = true;
-				this.isTokenized = true;
-			}
-			else if (index == Field.Index.NOT_ANALYZED)
-			{
-				this.isIndexed = true;
-				this.isTokenized = false;
-			}
-			else if (index == Field.Index.NOT_ANALYZED_NO_NORMS)
-			{
-				this.isIndexed = true;
-				this.isTokenized = false;
-				this.omitNorms = true;
-			}
-			else if (index == Field.Index.ANALYZED_NO_NORMS)
-			{
-				this.isIndexed = true;
-				this.isTokenized = true;
-				this.omitNorms = true;
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown index parameter " + index);
-			}
+
+		    this.isStored = store.IsStored();
+		    this.isIndexed = index.IsIndexed();
+		    this.isTokenized = index.IsAnalyzed();
+		    this.omitNorms = index.OmitNorms();
 			
 			this.isBinary = false;
 			
@@ -155,7 +105,7 @@ namespace Lucene.Net.Documents
 		/// 
 		/// <p/>Note: this value is not stored directly with the document in the index.
 		/// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
-		/// <see cref="Lucene.Net.Search.Hits.Doc(int)" /> may thus not have the same value present as when
+		/// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
 		/// this field was indexed.
 		/// 
 		/// </summary>
@@ -176,40 +126,9 @@ namespace Lucene.Net.Documents
 		
 		protected internal virtual void  SetStoreTermVector(Field.TermVector termVector)
 		{
-			if (termVector == Field.TermVector.NO)
-			{
-				this.storeTermVector = false;
-				this.storePositionWithTermVector = false;
-				this.storeOffsetWithTermVector = false;
-			}
-			else if (termVector == Field.TermVector.YES)
-			{
-				this.storeTermVector = true;
-				this.storePositionWithTermVector = false;
-				this.storeOffsetWithTermVector = false;
-			}
-			else if (termVector == Field.TermVector.WITH_POSITIONS)
-			{
-				this.storeTermVector = true;
-				this.storePositionWithTermVector = true;
-				this.storeOffsetWithTermVector = false;
-			}
-			else if (termVector == Field.TermVector.WITH_OFFSETS)
-			{
-				this.storeTermVector = true;
-				this.storePositionWithTermVector = false;
-				this.storeOffsetWithTermVector = true;
-			}
-			else if (termVector == Field.TermVector.WITH_POSITIONS_OFFSETS)
-			{
-				this.storeTermVector = true;
-				this.storePositionWithTermVector = true;
-				this.storeOffsetWithTermVector = true;
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown termVector parameter " + termVector);
-			}
+		    this.storeTermVector = termVector.IsStored();
+		    this.storePositionWithTermVector = termVector.WithPositions();
+		    this.storeOffsetWithTermVector = termVector.WithOffsets();
 		}
 		
 		/// <summary>True iff the value of the field is to be stored in the index for return
@@ -238,12 +157,6 @@ namespace Lucene.Net.Documents
 			return isTokenized;
 		}
 		
-		/// <summary>True if the value of the field is stored and compressed within the index </summary>
-		public bool IsCompressed()
-		{
-			return isCompressed;
-		}
-		
 		/// <summary>True iff the term or terms used to index this field are stored as a term
 		/// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
 		/// These methods do not provide access to the original content of the field,
@@ -308,10 +221,7 @@ namespace Lucene.Net.Documents
 		{
 			if (isBinary)
 			{
-				if (!isCompressed)
-					return binaryLength;
-				else
-					return ((byte[]) fieldsData).Length;
+                return binaryLength;
 			}
 			else if (fieldsData is byte[])
 				return ((byte[]) fieldsData).Length;
@@ -335,14 +245,6 @@ namespace Lucene.Net.Documents
 			return omitNorms;
 		}
 		
-		/// <deprecated> Renamed to <see cref="GetOmitTermFreqAndPositions" /> 
-		/// </deprecated>
-        [Obsolete("Renamed to GetOmitTermFreqAndPositions")]
-		public virtual bool GetOmitTf()
-		{
-			return omitTermFreqAndPositions;
-		}
-		
 		/// <seealso cref="SetOmitTermFreqAndPositions">
 		/// </seealso>
 		public virtual bool GetOmitTermFreqAndPositions()
@@ -360,14 +262,6 @@ namespace Lucene.Net.Documents
 			this.omitNorms = omitNorms;
 		}
 		
-		/// <deprecated> Renamed to <see cref="SetOmitTermFreqAndPositions" /> 
-		/// </deprecated>
-        [Obsolete("Renamed to SetOmitTermFreqAndPositions")]
-		public virtual void  SetOmitTf(bool omitTermFreqAndPositions)
-		{
-			this.omitTermFreqAndPositions = omitTermFreqAndPositions;
-		}
-		
 		/// <summary>Expert:
 		/// 
 		/// If set, omit term freq, positions and payloads from
@@ -396,10 +290,6 @@ namespace Lucene.Net.Documents
 			if (isStored)
 			{
 				result.Append("stored");
-				if (isCompressed)
-					result.Append("/compressed");
-				else
-					result.Append("/uncompressed");
 			}
 			if (isIndexed)
 			{
@@ -461,9 +351,8 @@ namespace Lucene.Net.Documents
 			result.Append('>');
 			return result.ToString();
 		}
-		public abstract Lucene.Net.Analysis.TokenStream TokenStreamValue();
-		public abstract System.IO.TextReader ReaderValue();
-		public abstract System.String StringValue();
-		public abstract byte[] BinaryValue();
+        public abstract Lucene.Net.Analysis.TokenStream TokenStreamValue();
+        public abstract System.IO.TextReader ReaderValue();
+        public abstract System.String StringValue();
 	}
 }
\ No newline at end of file

Modified: incubator/lucene.net/trunk/src/core/Document/CompressionTools.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/CompressionTools.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/CompressionTools.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/CompressionTools.cs Tue Feb 28 22:43:08 2012
@@ -22,7 +22,7 @@
 // http://www.icsharpcode.net/OpenSource/SharpZipLib/
 
 using System;
-
+using Lucene.Net.Support;
 using UnicodeUtil = Lucene.Net.Util.UnicodeUtil;
 
 namespace Lucene.Net.Documents
@@ -31,9 +31,7 @@ namespace Lucene.Net.Documents
 	/// <summary>Simple utility class providing static methods to
 	/// compress and decompress binary data for stored fields.
 	/// This class uses java.util.zip.Deflater and Inflater
-	/// classes to compress and decompress, which is the same
-	/// format previously used by the now deprecated
-	/// Field.Store.COMPRESS.
+	/// classes to compress and decompress.
 	/// </summary>
 	
 	public class CompressionTools
@@ -56,7 +54,7 @@ namespace Lucene.Net.Documents
 			* the uncompressed data. */
 			System.IO.MemoryStream bos = new System.IO.MemoryStream(length);
 
-            SupportClass.SharpZipLib.Deflater compressor = SupportClass.SharpZipLib.CreateDeflater();
+            Deflater compressor = SharpZipLib.CreateDeflater();
 			
 			try
 			{
@@ -82,19 +80,19 @@ namespace Lucene.Net.Documents
 		/// <summary>Compresses the specified byte range, with default BEST_COMPRESSION level </summary>
 		public static byte[] Compress(byte[] value_Renamed, int offset, int length)
         {
-			return Compress(value_Renamed, offset, length, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+			return Compress(value_Renamed, offset, length, Deflater.BEST_COMPRESSION);
 		}
 		
 		/// <summary>Compresses all bytes in the array, with default BEST_COMPRESSION level </summary>
 		public static byte[] Compress(byte[] value_Renamed)
 		{
-            return Compress(value_Renamed, 0, value_Renamed.Length, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+            return Compress(value_Renamed, 0, value_Renamed.Length, Deflater.BEST_COMPRESSION);
 		}
 		
 		/// <summary>Compresses the String value, with default BEST_COMPRESSION level </summary>
 		public static byte[] CompressString(System.String value_Renamed)
 		{
-            return CompressString(value_Renamed, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+            return CompressString(value_Renamed, Deflater.BEST_COMPRESSION);
 		}
 		
 		/// <summary>Compresses the String value using the specified
@@ -116,7 +114,7 @@ namespace Lucene.Net.Documents
 			// Create an expandable byte array to hold the decompressed data
 			System.IO.MemoryStream bos = new System.IO.MemoryStream(value_Renamed.Length);
 			
-			SupportClass.SharpZipLib.Inflater decompressor = SupportClass.SharpZipLib.CreateInflater();
+			Inflater decompressor = SharpZipLib.CreateInflater();
 			
 			try
 			{

Modified: incubator/lucene.net/trunk/src/core/Document/DateField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/DateField.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/DateField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/DateField.cs Tue Feb 28 22:43:08 2012
@@ -16,9 +16,9 @@
  */
 
 using System;
-
+using Lucene.Net.Search;
+using Lucene.Net.Support;
 using NumericUtils = Lucene.Net.Util.NumericUtils;
-using NumericRangeQuery = Lucene.Net.Search.NumericRangeQuery;
 using PrefixQuery = Lucene.Net.Search.PrefixQuery;
 using TermRangeQuery = Lucene.Net.Search.TermRangeQuery;
 // for javadoc
@@ -51,15 +51,15 @@ namespace Lucene.Net.Documents
 	/// For indexing a <see cref="DateTime" />, convert it to unix timestamp as
 	/// <c>long</c> and
 	/// index this as a numeric value with <see cref="NumericField" />
-	/// and use <see cref="NumericRangeQuery" /> to query it.
+	/// and use <see cref="NumericRangeQuery{T}" /> to query it.
 	/// 
 	/// </summary>
 	/// <deprecated> If you build a new index, use <see cref="DateTools" /> or 
 	/// <see cref="NumericField" /> instead.
 	/// This class is included for use with existing
-	/// indices and will be removed in a future release.
+	/// indices and will be removed in a future (possibly Lucene 4.0)
 	/// </deprecated>
-    [Obsolete("If you build a new index, use DateTools or NumericField instead.This class is included for use with existing indices and will be removed in a future release.")]
+    [Obsolete("If you build a new index, use DateTools or NumericField instead.This class is included for use with existing indices and will be removed in a future release (possibly Lucene 4.0).")]
 	public class DateField
 	{
 		
@@ -68,7 +68,7 @@ namespace Lucene.Net.Documents
 		}
 		
 		// make date strings long enough to last a millenium
-        private static int DATE_LEN = SupportClass.Number.ToString(1000L * 365 * 24 * 60 * 60 * 1000, SupportClass.Number.MAX_RADIX).Length;
+        private static int DATE_LEN = Number.ToString(1000L * 365 * 24 * 60 * 60 * 1000, Number.MAX_RADIX).Length;
 
 		public static System.String MIN_DATE_STRING()
 		{
@@ -78,7 +78,7 @@ namespace Lucene.Net.Documents
 		public static System.String MAX_DATE_STRING()
 		{
 			char[] buffer = new char[DATE_LEN];
-            char c = SupportClass.Character.ForDigit(SupportClass.Character.MAX_RADIX - 1, SupportClass.Character.MAX_RADIX);
+            char c = Character.ForDigit(Character.MAX_RADIX - 1, Character.MAX_RADIX);
 			for (int i = 0; i < DATE_LEN; i++)
 				buffer[i] = c;
 			return new System.String(buffer);
@@ -103,7 +103,7 @@ namespace Lucene.Net.Documents
 			if (time < 0)
 				throw new System.SystemException("time '" + time + "' is too early, must be >= 0");
 
-            System.String s = SupportClass.Number.ToString(time, SupportClass.Character.MAX_RADIX);
+            System.String s = Number.ToString(time, Character.MAX_RADIX);
 			
 			if (s.Length > DATE_LEN)
 				throw new System.SystemException("time '" + time + "' is too late, length of string " + "representation must be <= " + DATE_LEN);
@@ -123,7 +123,7 @@ namespace Lucene.Net.Documents
 		/// <summary>Converts a string-encoded date into a millisecond time. </summary>
 		public static long StringToTime(System.String s)
 		{
-            return SupportClass.Number.Parse(s, SupportClass.Number.MAX_RADIX);
+            return Number.Parse(s, Number.MAX_RADIX);
 		}
 		/// <summary>Converts a string-encoded date into a Date object. </summary>
         public static System.DateTime StringToDate(System.String s)

Modified: incubator/lucene.net/trunk/src/core/Document/DateTools.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/DateTools.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/DateTools.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/DateTools.cs Tue Feb 28 22:43:08 2012
@@ -16,9 +16,8 @@
  */
 
 using System;
-
+using Lucene.Net.Search;
 using NumericUtils = Lucene.Net.Util.NumericUtils;
-using NumericRangeQuery = Lucene.Net.Search.NumericRangeQuery;
 
 namespace Lucene.Net.Documents
 {
@@ -43,7 +42,7 @@ namespace Lucene.Net.Documents
     /// For indexing a <see cref="DateTime" />, convert it to unix timestamp as
 	/// <c>long</c> and
 	/// index this as a numeric value with <see cref="NumericField" />
-	/// and use <see cref="NumericRangeQuery" /> to query it.
+	/// and use <see cref="NumericRangeQuery{T}" /> to query it.
 	/// </summary>
 	public class DateTools
 	{

Modified: incubator/lucene.net/trunk/src/core/Document/Document.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/Document.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/Document.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/Document.cs Tue Feb 28 22:43:08 2012
@@ -93,7 +93,7 @@ namespace Lucene.Net.Documents
 				return iter.Current;
 			}
 		}
-		internal System.Collections.IList fields = new System.Collections.ArrayList();
+		internal System.Collections.Generic.IList<Fieldable> fields = new System.Collections.Generic.List<Fieldable>();
 		private float boost = 1.0f;
 		
 		/// <summary>Constructs a new document with no fields. </summary>
@@ -162,10 +162,10 @@ namespace Lucene.Net.Documents
 		/// </summary>
 		public void  RemoveField(System.String name)
 		{
-			System.Collections.IEnumerator it = fields.GetEnumerator();
+			System.Collections.Generic.IEnumerator<Fieldable> it = fields.GetEnumerator();
 			while (it.MoveNext())
 			{
-				Fieldable field = (Fieldable) it.Current;
+				Fieldable field = it.Current;
 				if (field.Name().Equals(name))
 				{
                     fields.Remove(field);
@@ -186,7 +186,7 @@ namespace Lucene.Net.Documents
 		{
             for (int i = fields.Count - 1; i >= 0; i--)
             {
-                Fieldable field = (Fieldable) fields[i];
+                Fieldable field = fields[i];
                 if (field.Name().Equals(name))
                 {
                     fields.RemoveAt(i);
@@ -201,13 +201,7 @@ namespace Lucene.Net.Documents
 		/// </summary>
 		public Field GetField(System.String name)
 		{
-			for (int i = 0; i < fields.Count; i++)
-			{
-				Field field = (Field) fields[i];
-				if (field.Name().Equals(name))
-					return field;
-			}
-			return null;
+		    return (Field) GetFieldable(name);
 		}
 		
 		
@@ -217,9 +211,8 @@ namespace Lucene.Net.Documents
 		/// </summary>
 		public Fieldable GetFieldable(System.String name)
 		{
-			for (int i = 0; i < fields.Count; i++)
-			{
-				Fieldable field = (Fieldable) fields[i];
+			foreach(Fieldable field in fields)
+            {
 				if (field.Name().Equals(name))
 					return field;
 			}
@@ -233,30 +226,20 @@ namespace Lucene.Net.Documents
 		/// </summary>
 		public System.String Get(System.String name)
 		{
-			for (int i = 0; i < fields.Count; i++)
+			foreach(Fieldable field in fields)
 			{
-				Fieldable field = (Fieldable) fields[i];
 				if (field.Name().Equals(name) && (!field.IsBinary()))
 					return field.StringValue();
 			}
 			return null;
 		}
 		
-		/// <summary>Returns an Enumeration of all the fields in a document.</summary>
-		/// <deprecated> use <see cref="GetFields()" /> instead
-		/// </deprecated>
-        [Obsolete("Use GetFields() instead")]
-		public System.Collections.IEnumerator Fields()
-		{
-			return new AnonymousClassEnumeration(this);
-		}
-		
 		/// <summary>Returns a List of all the fields in a document.
 		/// <p/>Note that fields which are <i>not</i> <see cref="Fieldable.IsStored()">stored</see> are
 		/// <i>not</i> available in documents retrieved from the
 		/// index, e.g. <see cref="Searcher.Doc(int)" /> or <see cref="IndexReader.Document(int)" />.
 		/// </summary>
-		public System.Collections.IList GetFields()
+		public System.Collections.Generic.IList<Fieldable> GetFields()
 		{
 			return fields;
 		}
@@ -275,20 +258,19 @@ namespace Lucene.Net.Documents
 		/// </returns>
 		public Field[] GetFields(System.String name)
 		{
-			System.Collections.ArrayList result = new System.Collections.ArrayList();
-			for (int i = 0; i < fields.Count; i++)
+			var result = new System.Collections.Generic.List<Field>();
+			foreach(Fieldable field in fields)
 			{
-				Field field = (Field) fields[i];
 				if (field.Name().Equals(name))
 				{
-					result.Add(field);
+					result.Add((Field)field);
 				}
 			}
 			
 			if (result.Count == 0)
 				return NO_FIELDS;
 			
-			return (Field[]) result.ToArray(typeof(Field));
+			return result.ToArray();
 		}
 		
 		
@@ -305,10 +287,9 @@ namespace Lucene.Net.Documents
 		/// </returns>
 		public Fieldable[] GetFieldables(System.String name)
 		{
-			System.Collections.ArrayList result = new System.Collections.ArrayList();
-			for (int i = 0; i < fields.Count; i++)
+			var result = new System.Collections.Generic.List<Fieldable>();
+			foreach(Fieldable field in fields)
 			{
-				Fieldable field = (Fieldable) fields[i];
 				if (field.Name().Equals(name))
 				{
 					result.Add(field);
@@ -318,7 +299,7 @@ namespace Lucene.Net.Documents
 			if (result.Count == 0)
 				return NO_FIELDABLES;
 			
-			return (Fieldable[]) result.ToArray(typeof(Fieldable));
+			return result.ToArray();
 		}
 		
 		
@@ -334,10 +315,9 @@ namespace Lucene.Net.Documents
 		/// </returns>
 		public System.String[] GetValues(System.String name)
 		{
-			System.Collections.ArrayList result = new System.Collections.ArrayList();
-			for (int i = 0; i < fields.Count; i++)
+			var result = new System.Collections.Generic.List<string>();
+			foreach(Fieldable field in fields)
 			{
-				Fieldable field = (Fieldable) fields[i];
 				if (field.Name().Equals(name) && (!field.IsBinary()))
 					result.Add(field.StringValue());
 			}
@@ -345,7 +325,7 @@ namespace Lucene.Net.Documents
 			if (result.Count == 0)
 				return NO_STRINGS;
 			
-			return (System.String[]) result.ToArray(typeof(System.String));
+			return result.ToArray();
 		}
 		
 		private static readonly byte[][] NO_BYTES = new byte[0][];
@@ -362,34 +342,17 @@ namespace Lucene.Net.Documents
 		/// </returns>
 		public byte[][] GetBinaryValues(System.String name)
 		{
-			System.Collections.IList result = new System.Collections.ArrayList();
-			for (int i = 0; i < fields.Count; i++)
+			var result = new System.Collections.Generic.List<byte[]>();
+			foreach(Fieldable field in fields)
 			{
-				Fieldable field = (Fieldable) fields[i];
 				if (field.Name().Equals(name) && (field.IsBinary()))
-					result.Add(field.BinaryValue());
+					result.Add(field.GetBinaryValue());
 			}
 			
 			if (result.Count == 0)
 				return NO_BYTES;
-			
-            System.Collections.ICollection c = result;
-            object[] objects = new byte[result.Count][];
-
-            System.Type type = objects.GetType().GetElementType();
-            object[] objs = (object[])Array.CreateInstance(type, c.Count);
-
-            System.Collections.IEnumerator e = c.GetEnumerator();
-            int ii = 0;
 
-            while (e.MoveNext())
-                objs[ii++] = e.Current;
-
-            // If objects is smaller than c then do not return the new array in the parameter
-            if (objects.Length >= c.Count)
-                objs.CopyTo(objects, 0);
-
-            return (byte[][])objs;
+            return result.ToArray();
         }
 		
 		/// <summary> Returns an array of bytes for the first (or only) field that has the name
@@ -404,11 +367,10 @@ namespace Lucene.Net.Documents
 		/// </returns>
 		public byte[] GetBinaryValue(System.String name)
 		{
-			for (int i = 0; i < fields.Count; i++)
+			foreach(Fieldable field in fields)
 			{
-				Fieldable field = (Fieldable) fields[i];
 				if (field.Name().Equals(name) && (field.IsBinary()))
-					return field.BinaryValue();
+					return field.GetBinaryValue();
 			}
 			return null;
 		}
@@ -420,7 +382,7 @@ namespace Lucene.Net.Documents
 			buffer.Append("Document<");
 			for (int i = 0; i < fields.Count; i++)
 			{
-				Fieldable field = (Fieldable) fields[i];
+				Fieldable field = fields[i];
 				buffer.Append(field.ToString());
 				if (i != fields.Count - 1)
 					buffer.Append(" ");
@@ -429,7 +391,7 @@ namespace Lucene.Net.Documents
 			return buffer.ToString();
 		}
 
-        public System.Collections.IList fields_ForNUnit
+        public System.Collections.Generic.IList<Fieldable> fields_ForNUnit
         {
             get { return fields; }
         }

Modified: incubator/lucene.net/trunk/src/core/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/Field.cs?rev=1294875&r1=1294874&r2=1294875&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/Field.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/Field.cs Tue Feb 28 22:43:08 2012
@@ -19,630 +19,649 @@ using System;
 
 using TokenStream = Lucene.Net.Analysis.TokenStream;
 using IndexWriter = Lucene.Net.Index.IndexWriter;
-using Parameter = Lucene.Net.Util.Parameter;
 using StringHelper = Lucene.Net.Util.StringHelper;
 
 namespace Lucene.Net.Documents
 {
-	
-	/// <summary>A field is a section of a Document.  Each field has two parts, a name and a
-	/// value.  Values may be free text, provided as a String or as a Reader, or they
-	/// may be atomic keywords, which are not further processed.  Such keywords may
-	/// be used to represent dates, urls, etc.  Fields are optionally stored in the
-	/// index, so that they may be returned with hits on the document.
-	/// </summary>
-	
-	[Serializable]
-	public sealed class Field:AbstractField, Fieldable
-	{
-		
-		/// <summary>Specifies whether and how a field should be stored. </summary>
-		[Serializable]
-		public sealed class Store:Parameter
-		{
-			
-			internal Store(System.String name):base(name)
-			{
-			}
-			
-			/// <summary>Store the original field value in the index in a compressed form. This is
-			/// useful for long documents and for binary valued fields.
-			/// </summary>
-			/// <deprecated> Please use <see cref="CompressionTools" /> instead.
-			/// For string fields that were previously indexed and stored using compression,
-			/// the new way to achieve this is: First add the field indexed-only (no store)
-			/// and additionally using the same field name as a binary, stored field
-			/// with <see cref="CompressionTools.CompressString(string)" />.
-			/// </deprecated>
-			public static readonly Store COMPRESS = new Store("COMPRESS");
-			
-			/// <summary>Store the original field value in the index. This is useful for short texts
-			/// like a document's title which should be displayed with the results. The
-			/// value is stored in its original form, i.e. no analyzer is used before it is
-			/// stored.
-			/// </summary>
-			public static readonly Store YES = new Store("YES");
-			
-			/// <summary>Do not store the field value in the index. </summary>
-			public static readonly Store NO = new Store("NO");
-		}
-		
-		/// <summary>Specifies whether and how a field should be indexed. </summary>
-		[Serializable]
-		public sealed class Index:Parameter
-		{
-			
-			internal Index(System.String name):base(name)
-			{
-			}
-			
-			/// <summary>Do not index the field value. This field can thus not be searched,
-			/// but one can still access its contents provided it is
-			/// <see cref="Field.Store">stored</see>. 
-			/// </summary>
-			public static readonly Index NO = new Index("NO");
-			
-			/// <summary>Index the tokens produced by running the field's
-			/// value through an Analyzer.  This is useful for
-			/// common text. 
-			/// </summary>
-			public static readonly Index ANALYZED = new Index("ANALYZED");
-			
-			/// <deprecated> this has been renamed to <see cref="ANALYZED" /> 
-			/// </deprecated>
-            [Obsolete("this has been renamed to ANALYZED")]
-			public static readonly Index TOKENIZED;
-			
-			/// <summary>Index the field's value without using an Analyzer, so it can be searched.
-			/// As no analyzer is used the value will be stored as a single term. This is
-			/// useful for unique Ids like product numbers.
-			/// </summary>
-			public static readonly Index NOT_ANALYZED = new Index("NOT_ANALYZED");
-			
-			/// <deprecated> This has been renamed to <see cref="NOT_ANALYZED" /> 
-			/// </deprecated>
-            [Obsolete("This has been renamed to NOT_ANALYZED")]
-			public static readonly Index UN_TOKENIZED;
-			
-			/// <summary>Expert: Index the field's value without an Analyzer,
-			/// and also disable the storing of norms.  Note that you
-			/// can also separately enable/disable norms by calling
+    
+    /// <summary>A field is a section of a Document.  Each field has two parts, a name and a
+    /// value.  Values may be free text, provided as a String or as a Reader, or they
+    /// may be atomic keywords, which are not further processed.  Such keywords may
+    /// be used to represent dates, urls, etc.  Fields are optionally stored in the
+    /// index, so that they may be returned with hits on the document.
+    /// </summary>
+    
+    [Serializable]
+    public sealed class Field:AbstractField, Fieldable
+    {
+        /// <summary>Specifies whether and how a field should be stored. </summary>
+        public enum Store
+        {
+            /// <summary>Store the original field value in the index. This is useful for short texts
+            /// like a document's title which should be displayed with the results. The
+            /// value is stored in its original form, i.e. no analyzer is used before it is
+            /// stored.
+            /// </summary>
+            YES,
+
+            /// <summary>Do not store the field value in the index. </summary>
+            NO
+        }
+        
+        /// <summary>Specifies whether and how a field should be indexed. </summary>
+
+        public enum Index
+        {
+            /// <summary>Do not index the field value. This field can thus not be searched,
+            /// but one can still access its contents provided it is
+            /// <see cref="Field.Store">stored</see>. 
+            /// </summary>
+            NO,
+            
+            /// <summary>Index the tokens produced by running the field's
+            /// value through an Analyzer.  This is useful for
+            /// common text. 
+            /// </summary>
+            ANALYZED,
+            
+            /// <summary>Index the field's value without using an Analyzer, so it can be searched.
+            /// As no analyzer is used the value will be stored as a single term. This is
+            /// useful for unique Ids like product numbers.
+            /// </summary>
+            NOT_ANALYZED,
+            
+            /// <summary>Expert: Index the field's value without an Analyzer,
+            /// and also disable the storing of norms.  Note that you
+            /// can also separately enable/disable norms by calling
             /// <see cref="AbstractField.SetOmitNorms" />.  No norms means that
-			/// index-time field and document boosting and field
-			/// length normalization are disabled.  The benefit is
-			/// less memory usage as norms take up one byte of RAM
-			/// per indexed field for every document in the index,
-			/// during searching.  Note that once you index a given
-			/// field <i>with</i> norms enabled, disabling norms will
-			/// have no effect.  In other words, for this to have the
-			/// above described effect on a field, all instances of
-			/// that field must be indexed with NOT_ANALYZED_NO_NORMS
-			/// from the beginning. 
-			/// </summary>
-			public static readonly Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
-			
-			/// <deprecated> This has been renamed to
-			/// <see cref="NOT_ANALYZED_NO_NORMS" /> 
-			/// </deprecated>
-            [Obsolete("This has been renamed to NOT_ANALYZED_NO_NORMS")]
-			public static readonly Index NO_NORMS;
-			
-			/// <summary>Expert: Index the tokens produced by running the
-			/// field's value through an Analyzer, and also
-			/// separately disable the storing of norms.  See
-			/// <see cref="NOT_ANALYZED_NO_NORMS" /> for what norms are
-			/// and why you may want to disable them. 
-			/// </summary>
-			public static readonly Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
-			static Index()
-			{
-				TOKENIZED = ANALYZED;
-				UN_TOKENIZED = NOT_ANALYZED;
-				NO_NORMS = NOT_ANALYZED_NO_NORMS;
-			}
-		}
-		
-		/// <summary>Specifies whether and how a field should have term vectors. </summary>
-		[Serializable]
-		public sealed class TermVector:Parameter
-		{
-			
-			internal TermVector(System.String name):base(name)
-			{
-			}
-			
-			/// <summary>Do not store term vectors. </summary>
-			public static readonly TermVector NO = new TermVector("NO");
-			
-			/// <summary>Store the term vectors of each document. A term vector is a list
-			/// of the document's terms and their number of occurrences in that document. 
-			/// </summary>
-			public static readonly TermVector YES = new TermVector("YES");
-			
-			/// <summary> Store the term vector + token position information
-			/// 
-			/// </summary>
-			/// <seealso cref="YES">
-			/// </seealso>
-			public static readonly TermVector WITH_POSITIONS = new TermVector("WITH_POSITIONS");
-			
-			/// <summary> Store the term vector + Token offset information
-			/// 
-			/// </summary>
-			/// <seealso cref="YES">
-			/// </seealso>
-			public static readonly TermVector WITH_OFFSETS = new TermVector("WITH_OFFSETS");
-			
-			/// <summary> Store the term vector + Token position and offset information
-			/// 
-			/// </summary>
-			/// <seealso cref="YES">
-			/// </seealso>
-			/// <seealso cref="WITH_POSITIONS">
-			/// </seealso>
-			/// <seealso cref="WITH_OFFSETS">
-			/// </seealso>
-			public static readonly TermVector WITH_POSITIONS_OFFSETS = new TermVector("WITH_POSITIONS_OFFSETS");
-		}
-		
-		
-		/// <summary>The value of the field as a String, or null.  If null, the Reader value or
-		/// binary value is used.  Exactly one of stringValue(),
-		/// readerValue(), and getBinaryValue() must be set. 
-		/// </summary>
-		public override System.String StringValue()
-		{
-			return fieldsData is System.String?(System.String) fieldsData:null;
-		}
-		
-		/// <summary>The value of the field as a Reader, or null.  If null, the String value or
-		/// binary value is used.  Exactly one of stringValue(),
-		/// readerValue(), and getBinaryValue() must be set. 
-		/// </summary>
-		public override System.IO.TextReader ReaderValue()
-		{
-			return fieldsData is System.IO.TextReader?(System.IO.TextReader) fieldsData:null;
-		}
-		
-		/// <summary>The value of the field in Binary, or null.  If null, the Reader value,
-		/// or String value is used. Exactly one of stringValue(),
-		/// readerValue(), and getBinaryValue() must be set.
-		/// </summary>
-		/// <deprecated> This method must allocate a new byte[] if
-		/// the <see cref="AbstractField.GetBinaryOffset()" /> is non-zero
-		/// or <see cref="AbstractField.GetBinaryLength()" /> is not the
-		/// full length of the byte[]. Please use <see cref="AbstractField.GetBinaryValue()" />
-		/// instead, which simply
-		/// returns the byte[].
-		/// </deprecated>
-        [Obsolete("This method must allocate a new byte[] if the AbstractField.GetBinaryOffset() is non-zero or AbstractField.GetBinaryLength() is not the full length of the byte[]. Please use AbstractField.GetBinaryValue() instead, which simply returns the byte[].")]
-		public override byte[] BinaryValue()
-		{
-			if (!isBinary)
-				return null;
-			byte[] data = (byte[]) fieldsData;
-			if (binaryOffset == 0 && data.Length == binaryLength)
-				return data; //Optimization
-			
-			byte[] ret = new byte[binaryLength];
-			Array.Copy(data, binaryOffset, ret, 0, binaryLength);
-			return ret;
-		}
-		
-		/// <summary>The TokesStream for this field to be used when indexing, or null.  If null, the Reader value
-		/// or String value is analyzed to produce the indexed tokens. 
-		/// </summary>
-		public override TokenStream TokenStreamValue()
-		{
-			return tokenStream;
-		}
-		
-		
-		/// <summary><p/>Expert: change the value of this field.  This can
-		/// be used during indexing to re-use a single Field
-		/// instance to improve indexing speed by avoiding GC cost
-		/// of new'ing and reclaiming Field instances.  Typically
-		/// a single <see cref="Document" /> instance is re-used as
-		/// well.  This helps most on small documents.<p/>
-		/// 
-		/// <p/>Each Field instance should only be used once
-		/// within a single <see cref="Document" /> instance.  See <a
-		/// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
-		/// for details.<p/> 
-		/// </summary>
-		public void  SetValue(System.String value_Renamed)
-		{
-			if (isBinary)
-			{
-				throw new System.ArgumentException("cannot set a String value on a binary field");
-			}
-			fieldsData = value_Renamed;
-		}
-		
-		/// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
-		public void  SetValue(System.IO.TextReader value_Renamed)
-		{
-			if (isBinary)
-			{
-				throw new System.ArgumentException("cannot set a Reader value on a binary field");
-			}
-			if (isStored)
-			{
-				throw new System.ArgumentException("cannot set a Reader value on a stored field");
-			}
-			fieldsData = value_Renamed;
-		}
-		
-		/// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
-		public void  SetValue(byte[] value_Renamed)
-		{
-			if (!isBinary)
-			{
-				throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
-			}
-			fieldsData = value_Renamed;
-			binaryLength = value_Renamed.Length;
-			binaryOffset = 0;
-		}
-		
-		/// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
-		public void  SetValue(byte[] value_Renamed, int offset, int length)
-		{
-			if (!isBinary)
-			{
-				throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
-			}
-			fieldsData = value_Renamed;
-			binaryLength = length;
-			binaryOffset = offset;
-		}
-		
-		
-		/// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>.</summary>
-		/// <deprecated> use <see cref="SetTokenStream" /> 
-		/// </deprecated>
-        [Obsolete("use SetTokenStream ")]
-		public void SetValue(TokenStream value_Renamed)
-		{
-			if (isBinary)
-			{
-				throw new System.ArgumentException("cannot set a TokenStream value on a binary field");
-			}
-			if (isStored)
-			{
-				throw new System.ArgumentException("cannot set a TokenStream value on a stored field");
-			}
-			fieldsData = null;
-			tokenStream = value_Renamed;
-		}
-		
-		/// <summary>Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
-		/// May be combined with stored values from stringValue() or binaryValue() 
-		/// </summary>
-		public void  SetTokenStream(TokenStream tokenStream)
-		{
-			this.isIndexed = true;
-			this.isTokenized = true;
-			this.tokenStream = tokenStream;
-		}
-		
-		/// <summary> Create a field by specifying its name, value and how it will
-		/// be saved in the index. Term vectors will not be stored in the index.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="value_Renamed">The string to process
-		/// </param>
-		/// <param name="store">Whether <c>value</c> should be stored in the index
-		/// </param>
-		/// <param name="index">Whether the field should be indexed, and if so, if it should
-		/// be tokenized before indexing 
-		/// </param>
-		/// <throws>  NullPointerException if name or value is <c>null</c> </throws>
-		/// <throws>  IllegalArgumentException if the field is neither stored nor indexed  </throws>
-		public Field(System.String name, System.String value_Renamed, Store store, Index index):this(name, value_Renamed, store, index, TermVector.NO)
-		{
-		}
-		
-		/// <summary> Create a field by specifying its name, value and how it will
-		/// be saved in the index.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="value_Renamed">The string to process
-		/// </param>
-		/// <param name="store">Whether <c>value</c> should be stored in the index
-		/// </param>
-		/// <param name="index">Whether the field should be indexed, and if so, if it should
-		/// be tokenized before indexing 
-		/// </param>
-		/// <param name="termVector">Whether term vector should be stored
-		/// </param>
-		/// <throws>  NullPointerException if name or value is <c>null</c> </throws>
-		/// <throws>  IllegalArgumentException in any of the following situations: </throws>
-		/// <summary> <list> 
-		/// <item>the field is neither stored nor indexed</item> 
-		/// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
-		/// </list> 
-		/// </summary>
-		public Field(System.String name, System.String value_Renamed, Store store, Index index, TermVector termVector):this(name, true, value_Renamed, store, index, termVector)
-		{
-		}
-		
-		/// <summary> Create a field by specifying its name, value and how it will
-		/// be saved in the index.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="internName">Whether to .intern() name or not
-		/// </param>
-		/// <param name="value_Renamed">The string to process
-		/// </param>
-		/// <param name="store">Whether <c>value</c> should be stored in the index
-		/// </param>
-		/// <param name="index">Whether the field should be indexed, and if so, if it should
-		/// be tokenized before indexing 
-		/// </param>
-		/// <param name="termVector">Whether term vector should be stored
-		/// </param>
-		/// <throws>  NullPointerException if name or value is <c>null</c> </throws>
-		/// <throws>  IllegalArgumentException in any of the following situations: </throws>
-		/// <summary> <list> 
-		/// <item>the field is neither stored nor indexed</item> 
-		/// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
-		/// </list> 
-		/// </summary>
-		public Field(System.String name, bool internName, System.String value_Renamed, Store store, Index index, TermVector termVector)
-		{
-			if (name == null)
-				throw new System.NullReferenceException("name cannot be null");
-			if (value_Renamed == null)
-				throw new System.NullReferenceException("value cannot be null");
-			if (name.Length == 0 && value_Renamed.Length == 0)
-				throw new System.ArgumentException("name and value cannot both be empty");
-			if (index == Index.NO && store == Store.NO)
-				throw new System.ArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored");
-			if (index == Index.NO && termVector != TermVector.NO)
-				throw new System.ArgumentException("cannot store term vector information " + "for a field that is not indexed");
-			
-			if (internName)
-			// field names are optionally interned
-				name = StringHelper.Intern(name);
-			
-			this.name = name;
-			
-			this.fieldsData = value_Renamed;
-			
-			if (store == Store.YES)
-			{
-				this.isStored = true;
-				this.isCompressed = false;
-			}
-			else if (store == Store.COMPRESS)
-			{
-				this.isStored = true;
-				this.isCompressed = true;
-			}
-			else if (store == Store.NO)
-			{
-				this.isStored = false;
-				this.isCompressed = false;
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown store parameter " + store);
-			}
-			
-			if (index == Index.NO)
-			{
-				this.isIndexed = false;
-				this.isTokenized = false;
-				this.omitTermFreqAndPositions = false;
-				this.omitNorms = true;
-			}
-			else if (index == Index.ANALYZED)
-			{
-				this.isIndexed = true;
-				this.isTokenized = true;
-			}
-			else if (index == Index.NOT_ANALYZED)
-			{
-				this.isIndexed = true;
-				this.isTokenized = false;
-			}
-			else if (index == Index.NOT_ANALYZED_NO_NORMS)
-			{
-				this.isIndexed = true;
-				this.isTokenized = false;
-				this.omitNorms = true;
-			}
-			else if (index == Index.ANALYZED_NO_NORMS)
-			{
-				this.isIndexed = true;
-				this.isTokenized = true;
-				this.omitNorms = true;
-			}
-			else
-			{
-				throw new System.ArgumentException("unknown index parameter " + index);
-			}
-			
-			this.isBinary = false;
-			
-			SetStoreTermVector(termVector);
-		}
-		
-		/// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
-		/// not be stored.  The Reader is read only when the Document is added to the index,
-		/// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
-		/// has been called.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="reader">The reader with the content
-		/// </param>
-		/// <throws>  NullPointerException if name or reader is <c>null</c> </throws>
-		public Field(System.String name, System.IO.TextReader reader):this(name, reader, TermVector.NO)
-		{
-		}
-		
-		/// <summary> Create a tokenized and indexed field that is not stored, optionally with 
-		/// storing term vectors.  The Reader is read only when the Document is added to the index,
-		/// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
-		/// has been called.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="reader">The reader with the content
-		/// </param>
-		/// <param name="termVector">Whether term vector should be stored
-		/// </param>
-		/// <throws>  NullPointerException if name or reader is <c>null</c> </throws>
-		public Field(System.String name, System.IO.TextReader reader, TermVector termVector)
-		{
-			if (name == null)
-				throw new System.NullReferenceException("name cannot be null");
-			if (reader == null)
-				throw new System.NullReferenceException("reader cannot be null");
-			
-			this.name = StringHelper.Intern(name); // field names are interned
-			this.fieldsData = reader;
-			
-			this.isStored = false;
-			this.isCompressed = false;
-			
-			this.isIndexed = true;
-			this.isTokenized = true;
-			
-			this.isBinary = false;
-			
-			SetStoreTermVector(termVector);
-		}
-		
-		/// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
-		/// not be stored. This is useful for pre-analyzed fields.
-		/// The TokenStream is read only when the Document is added to the index,
-		/// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
-		/// has been called.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="tokenStream">The TokenStream with the content
-		/// </param>
-		/// <throws>  NullPointerException if name or tokenStream is <c>null</c> </throws>
-		public Field(System.String name, TokenStream tokenStream):this(name, tokenStream, TermVector.NO)
-		{
-		}
-		
-		/// <summary> Create a tokenized and indexed field that is not stored, optionally with 
-		/// storing term vectors.  This is useful for pre-analyzed fields.
-		/// The TokenStream is read only when the Document is added to the index,
-		/// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
-		/// has been called.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="tokenStream">The TokenStream with the content
-		/// </param>
-		/// <param name="termVector">Whether term vector should be stored
-		/// </param>
-		/// <throws>  NullPointerException if name or tokenStream is <c>null</c> </throws>
-		public Field(System.String name, TokenStream tokenStream, TermVector termVector)
-		{
-			if (name == null)
-				throw new System.NullReferenceException("name cannot be null");
-			if (tokenStream == null)
-				throw new System.NullReferenceException("tokenStream cannot be null");
-			
-			this.name = StringHelper.Intern(name); // field names are interned
-			this.fieldsData = null;
-			this.tokenStream = tokenStream;
-			
-			this.isStored = false;
-			this.isCompressed = false;
-			
-			this.isIndexed = true;
-			this.isTokenized = true;
-			
-			this.isBinary = false;
-			
-			SetStoreTermVector(termVector);
-		}
-		
-		
-		/// <summary> Create a stored field with binary value. Optionally the value may be compressed.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="value_Renamed">The binary value
-		/// </param>
-		/// <param name="store">How <c>value</c> should be stored (compressed or not)
-		/// </param>
-		/// <throws>  IllegalArgumentException if store is <c>Store.NO</c>  </throws>
-		public Field(System.String name, byte[] value_Renamed, Store store):this(name, value_Renamed, 0, value_Renamed.Length, store)
-		{
-		}
-		
-		/// <summary> Create a stored field with binary value. Optionally the value may be compressed.
-		/// 
-		/// </summary>
-		/// <param name="name">The name of the field
-		/// </param>
-		/// <param name="value_Renamed">The binary value
-		/// </param>
-		/// <param name="offset">Starting offset in value where this Field's bytes are
-		/// </param>
-		/// <param name="length">Number of bytes to use for this Field, starting at offset
-		/// </param>
-		/// <param name="store">How <c>value</c> should be stored (compressed or not)
-		/// </param>
-		/// <throws>  IllegalArgumentException if store is <c>Store.NO</c>  </throws>
-		public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
-		{
-			
-			if (name == null)
-				throw new System.ArgumentException("name cannot be null");
-			if (value_Renamed == null)
-				throw new System.ArgumentException("value cannot be null");
-			
-			this.name = StringHelper.Intern(name); // field names are interned
-			fieldsData = value_Renamed;
-			
-			if (store == Store.YES)
-			{
-				isStored = true;
-				isCompressed = false;
-			}
-			else if (store == Store.COMPRESS)
-			{
-				isStored = true;
-				isCompressed = true;
-			}
-			else if (store == Store.NO)
-				throw new System.ArgumentException("binary values can't be unstored");
-			else
-			{
-				throw new System.ArgumentException("unknown store parameter " + store);
-			}
-			
-			isIndexed = false;
-			isTokenized = false;
-			omitTermFreqAndPositions = false;
-			omitNorms = true;
-			
-			isBinary = true;
-			binaryLength = length;
-			binaryOffset = offset;
-			
-			SetStoreTermVector(TermVector.NO);
-		}
-	}
+            /// index-time field and document boosting and field
+            /// length normalization are disabled.  The benefit is
+            /// less memory usage as norms take up one byte of RAM
+            /// per indexed field for every document in the index,
+            /// during searching.  Note that once you index a given
+            /// field <i>with</i> norms enabled, disabling norms will
+            /// have no effect.  In other words, for this to have the
+            /// above described effect on a field, all instances of
+            /// that field must be indexed with NOT_ANALYZED_NO_NORMS
+            /// from the beginning. 
+            /// </summary>
+            NOT_ANALYZED_NO_NORMS,
+            
+            /// <summary>Expert: Index the tokens produced by running the
+            /// field's value through an Analyzer, and also
+            /// separately disable the storing of norms.  See
+            /// <see cref="NOT_ANALYZED_NO_NORMS" /> for what norms are
+            /// and why you may want to disable them. 
+            /// </summary>
+            ANALYZED_NO_NORMS,
+        }
+        
+        /// <summary>Specifies whether and how a field should have term vectors. </summary>
+        public enum TermVector
+        {
+            /// <summary>Do not store term vectors. </summary>
+            NO,
+            
+            /// <summary>Store the term vectors of each document. A term vector is a list
+            /// of the document's terms and their number of occurrences in that document. 
+            /// </summary>
+            YES,
+            
+            /// <summary> Store the term vector + token position information
+            /// 
+            /// </summary>
+            /// <seealso cref="YES">
+            /// </seealso>
+            WITH_POSITIONS,
+            
+            /// <summary> Store the term vector + Token offset information
+            /// 
+            /// </summary>
+            /// <seealso cref="YES">
+            /// </seealso>
+            WITH_OFFSETS,
+            
+            /// <summary> Store the term vector + Token position and offset information
+            /// 
+            /// </summary>
+            /// <seealso cref="YES">
+            /// </seealso>
+            /// <seealso cref="WITH_POSITIONS">
+            /// </seealso>
+            /// <seealso cref="WITH_OFFSETS">
+            /// </seealso>
+            WITH_POSITIONS_OFFSETS,
+        }
+        
+        
+        /// <summary>The value of the field as a String, or null.  If null, the Reader value or
+        /// binary value is used.  Exactly one of stringValue(),
+        /// readerValue(), and getBinaryValue() must be set. 
+        /// </summary>
+        public override System.String StringValue()
+        {
+            return fieldsData is System.String?(System.String) fieldsData:null;
+        }
+        
+        /// <summary>The value of the field as a Reader, or null.  If null, the String value or
+        /// binary value is used.  Exactly one of stringValue(),
+        /// readerValue(), and getBinaryValue() must be set. 
+        /// </summary>
+        public override System.IO.TextReader ReaderValue()
+        {
+            return fieldsData is System.IO.TextReader?(System.IO.TextReader) fieldsData:null;
+        }
+
+        /// <summary>The TokesStream for this field to be used when indexing, or null.  If null, the Reader value
+        /// or String value is analyzed to produce the indexed tokens. 
+        /// </summary>
+        public override TokenStream TokenStreamValue()
+        {
+            return tokenStream;
+        }
+        
+        
+        /// <summary><p/>Expert: change the value of this field.  This can
+        /// be used during indexing to re-use a single Field
+        /// instance to improve indexing speed by avoiding GC cost
+        /// of new'ing and reclaiming Field instances.  Typically
+        /// a single <see cref="Document" /> instance is re-used as
+        /// well.  This helps most on small documents.<p/>
+        /// 
+        /// <p/>Each Field instance should only be used once
+        /// within a single <see cref="Document" /> instance.  See <a
+        /// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
+        /// for details.<p/> 
+        /// </summary>
+        public void  SetValue(System.String value_Renamed)
+        {
+            if (isBinary)
+            {
+                throw new System.ArgumentException("cannot set a String value on a binary field");
+            }
+            fieldsData = value_Renamed;
+        }
+        
+        /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+        public void  SetValue(System.IO.TextReader value_Renamed)
+        {
+            if (isBinary)
+            {
+                throw new System.ArgumentException("cannot set a Reader value on a binary field");
+            }
+            if (isStored)
+            {
+                throw new System.ArgumentException("cannot set a Reader value on a stored field");
+            }
+            fieldsData = value_Renamed;
+        }
+        
+        /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+        public void  SetValue(byte[] value_Renamed)
+        {
+            if (!isBinary)
+            {
+                throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+            }
+            fieldsData = value_Renamed;
+            binaryLength = value_Renamed.Length;
+            binaryOffset = 0;
+        }
+        
+        /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+        public void  SetValue(byte[] value_Renamed, int offset, int length)
+        {
+            if (!isBinary)
+            {
+                throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+            }
+            fieldsData = value_Renamed;
+            binaryLength = length;
+            binaryOffset = offset;
+        }
+        
+        /// <summary>Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
+        /// May be combined with stored values from stringValue() or GetBinaryValue() 
+        /// </summary>
+        public void  SetTokenStream(TokenStream tokenStream)
+        {
+            this.isIndexed = true;
+            this.isTokenized = true;
+            this.tokenStream = tokenStream;
+        }
+        
+        /// <summary> Create a field by specifying its name, value and how it will
+        /// be saved in the index. Term vectors will not be stored in the index.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="value_Renamed">The string to process
+        /// </param>
+        /// <param name="store">Whether <c>value</c> should be stored in the index
+        /// </param>
+        /// <param name="index">Whether the field should be indexed, and if so, if it should
+        /// be tokenized before indexing 
+        /// </param>
+        /// <throws>  NullPointerException if name or value is <c>null</c> </throws>
+        /// <throws>  IllegalArgumentException if the field is neither stored nor indexed  </throws>
+        public Field(System.String name, System.String value_Renamed, Store store, Index index)
+            : this(name, value_Renamed, store, index, TermVector.NO)
+        {
+        }
+        
+        /// <summary> Create a field by specifying its name, value and how it will
+        /// be saved in the index.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="value_Renamed">The string to process
+        /// </param>
+        /// <param name="store">Whether <c>value</c> should be stored in the index
+        /// </param>
+        /// <param name="index">Whether the field should be indexed, and if so, if it should
+        /// be tokenized before indexing 
+        /// </param>
+        /// <param name="termVector">Whether term vector should be stored
+        /// </param>
+        /// <throws>  NullPointerException if name or value is <c>null</c> </throws>
+        /// <throws>  IllegalArgumentException in any of the following situations: </throws>
+        /// <summary> <list> 
+        /// <item>the field is neither stored nor indexed</item> 
+        /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
+        /// </list> 
+        /// </summary>
+        public Field(System.String name, System.String value_Renamed, Store store, Index index, TermVector termVector)
+            : this(name, true, value_Renamed, store, index, termVector)
+        {
+        }
+        
+        /// <summary> Create a field by specifying its name, value and how it will
+        /// be saved in the index.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="internName">Whether to .intern() name or not
+        /// </param>
+        /// <param name="value_Renamed">The string to process
+        /// </param>
+        /// <param name="store">Whether <c>value</c> should be stored in the index
+        /// </param>
+        /// <param name="index">Whether the field should be indexed, and if so, if it should
+        /// be tokenized before indexing 
+        /// </param>
+        /// <param name="termVector">Whether term vector should be stored
+        /// </param>
+        /// <throws>  NullPointerException if name or value is <c>null</c> </throws>
+        /// <throws>  IllegalArgumentException in any of the following situations: </throws>
+        /// <summary> <list> 
+        /// <item>the field is neither stored nor indexed</item> 
+        /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
+        /// </list> 
+        /// </summary>
+        public Field(System.String name, bool internName, System.String value_Renamed, Store store, Index index, TermVector termVector)
+        {
+            if (name == null)
+                throw new System.NullReferenceException("name cannot be null");
+            if (value_Renamed == null)
+                throw new System.NullReferenceException("value cannot be null");
+            if (name.Length == 0 && value_Renamed.Length == 0)
+                throw new System.ArgumentException("name and value cannot both be empty");
+            if (index == Index.NO && store == Store.NO)
+                throw new System.ArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored");
+            if (index == Index.NO && termVector != TermVector.NO)
+                throw new System.ArgumentException("cannot store term vector information " + "for a field that is not indexed");
+            
+            if (internName)
+            // field names are optionally interned
+                name = StringHelper.Intern(name);
+            
+            this.name = name;
+            
+            this.fieldsData = value_Renamed;
+
+            this.isStored = store.IsStored();
+
+            this.isIndexed = index.IsIndexed();
+            this.isTokenized = index.IsAnalyzed();
+            this.omitNorms = index.OmitNorms();
+
+            if (index == Index.NO)
+            {
+                this.omitTermFreqAndPositions = false;
+            }
+            
+            this.isBinary = false;
+            
+            SetStoreTermVector(termVector);
+        }
+        
+        /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+        /// not be stored.  The Reader is read only when the Document is added to the index,
+        /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
+        /// has been called.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="reader">The reader with the content
+        /// </param>
+        /// <throws>  NullPointerException if name or reader is <c>null</c> </throws>
+        public Field(System.String name, System.IO.TextReader reader):this(name, reader, TermVector.NO)
+        {
+        }
+        
+        /// <summary> Create a tokenized and indexed field that is not stored, optionally with 
+        /// storing term vectors.  The Reader is read only when the Document is added to the index,
+        /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
+        /// has been called.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="reader">The reader with the content
+        /// </param>
+        /// <param name="termVector">Whether term vector should be stored
+        /// </param>
+        /// <throws>  NullPointerException if name or reader is <c>null</c> </throws>
+        public Field(System.String name, System.IO.TextReader reader, TermVector termVector)
+        {
+            if (name == null)
+                throw new System.NullReferenceException("name cannot be null");
+            if (reader == null)
+                throw new System.NullReferenceException("reader cannot be null");
+            
+            this.name = StringHelper.Intern(name); // field names are interned
+            this.fieldsData = reader;
+            
+            this.isStored = false;
+            
+            this.isIndexed = true;
+            this.isTokenized = true;
+            
+            this.isBinary = false;
+            
+            SetStoreTermVector(termVector);
+        }
+        
+        /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+        /// not be stored. This is useful for pre-analyzed fields.
+        /// The TokenStream is read only when the Document is added to the index,
+        /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
+        /// has been called.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="tokenStream">The TokenStream with the content
+        /// </param>
+        /// <throws>  NullPointerException if name or tokenStream is <c>null</c> </throws>
+        public Field(System.String name, TokenStream tokenStream):this(name, tokenStream, TermVector.NO)
+        {
+        }
+        
+        /// <summary> Create a tokenized and indexed field that is not stored, optionally with 
+        /// storing term vectors.  This is useful for pre-analyzed fields.
+        /// The TokenStream is read only when the Document is added to the index,
+        /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
+        /// has been called.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="tokenStream">The TokenStream with the content
+        /// </param>
+        /// <param name="termVector">Whether term vector should be stored
+        /// </param>
+        /// <throws>  NullPointerException if name or tokenStream is <c>null</c> </throws>
+        public Field(System.String name, TokenStream tokenStream, TermVector termVector)
+        {
+            if (name == null)
+                throw new System.NullReferenceException("name cannot be null");
+            if (tokenStream == null)
+                throw new System.NullReferenceException("tokenStream cannot be null");
+            
+            this.name = StringHelper.Intern(name); // field names are interned
+            this.fieldsData = null;
+            this.tokenStream = tokenStream;
+            
+            this.isStored = false;
+            
+            this.isIndexed = true;
+            this.isTokenized = true;
+            
+            this.isBinary = false;
+            
+            SetStoreTermVector(termVector);
+        }
+        
+        
+        /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="value_Renamed">The binary value
+        /// </param>
+        /// <param name="store">How <c>value</c> should be stored (compressed or not)
+        /// </param>
+        /// <throws>  IllegalArgumentException if store is <c>Store.NO</c>  </throws>
+        public Field(System.String name, byte[] value_Renamed, Store store):this(name, value_Renamed, 0, value_Renamed.Length, store)
+        {
+        }
+        
+        /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+        /// 
+        /// </summary>
+        /// <param name="name">The name of the field
+        /// </param>
+        /// <param name="value_Renamed">The binary value
+        /// </param>
+        /// <param name="offset">Starting offset in value where this Field's bytes are
+        /// </param>
+        /// <param name="length">Number of bytes to use for this Field, starting at offset
+        /// </param>
+        /// <param name="store">How <c>value</c> should be stored (compressed or not)
+        /// </param>
+        /// <throws>  IllegalArgumentException if store is <c>Store.NO</c>  </throws>
+        public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
+        {
+            
+            if (name == null)
+                throw new System.ArgumentException("name cannot be null");
+            if (value_Renamed == null)
+                throw new System.ArgumentException("value cannot be null");
+            
+            this.name = StringHelper.Intern(name); // field names are interned
+            fieldsData = value_Renamed;
+            
+            if (store == Store.NO)
+                throw new System.ArgumentException("binary values can't be unstored");
+
+            isStored = store.IsStored();
+            isIndexed = false;
+            isTokenized = false;
+            omitTermFreqAndPositions = false;
+            omitNorms = true;
+            
+            isBinary = true;
+            binaryLength = length;
+            binaryOffset = offset;
+            
+            SetStoreTermVector(TermVector.NO);
+        }
+    }
+
+    public static class FieldExtensions
+    {
+        public static bool IsStored(this Field.Store store)
+        {
+            switch(store)
+            {
+                case Field.Store.YES:
+                    return true;
+                case Field.Store.NO:
+                    return false;
+                default:
+                    throw new ArgumentOutOfRangeException("store", "Invalid value for Field.Store");
+            }
+        }
+
+        public static bool IsIndexed(this Field.Index index)
+        {
+            switch(index)
+            {
+                case Field.Index.NO:
+                    return false;
+                case Field.Index.ANALYZED:
+                case Field.Index.NOT_ANALYZED:
+                case Field.Index.NOT_ANALYZED_NO_NORMS:
+                case Field.Index.ANALYZED_NO_NORMS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+            }
+        }
+
+        public static bool IsAnalyzed(this Field.Index index)
+        {
+            switch (index)
+            {
+                case Field.Index.NO:
+                case Field.Index.NOT_ANALYZED:
+                case Field.Index.NOT_ANALYZED_NO_NORMS:
+                    return false;
+                case Field.Index.ANALYZED:
+                case Field.Index.ANALYZED_NO_NORMS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+            }
+        }
+
+        public static bool OmitNorms(this Field.Index index)
+        {
+            switch (index)
+            {
+                case Field.Index.ANALYZED:
+                case Field.Index.NOT_ANALYZED:
+                    return false;
+                case Field.Index.NO:
+                case Field.Index.NOT_ANALYZED_NO_NORMS:
+                case Field.Index.ANALYZED_NO_NORMS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+            }
+        }
+
+        public static bool IsStored(this Field.TermVector tv)
+        {
+            switch(tv)
+            {
+                case Field.TermVector.NO:
+                    return false;
+                case Field.TermVector.YES:
+                case Field.TermVector.WITH_OFFSETS:
+                case Field.TermVector.WITH_POSITIONS:
+                case Field.TermVector.WITH_POSITIONS_OFFSETS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+            }
+        }
+
+        public static bool WithPositions(this Field.TermVector tv)
+        {
+            switch (tv)
+            {
+                case Field.TermVector.NO:
+                case Field.TermVector.YES:
+                case Field.TermVector.WITH_OFFSETS:
+                    return false;
+                case Field.TermVector.WITH_POSITIONS:
+                case Field.TermVector.WITH_POSITIONS_OFFSETS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+            }
+        }
+
+        public static bool WithOffsets(this Field.TermVector tv)
+        {
+            switch (tv)
+            {
+                case Field.TermVector.NO:
+                case Field.TermVector.YES:
+                case Field.TermVector.WITH_POSITIONS:
+                    return false;
+                case Field.TermVector.WITH_OFFSETS:
+                case Field.TermVector.WITH_POSITIONS_OFFSETS:
+                    return true;
+                default:
+                    throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+            }
+        }
+
+        public static Field.Index ToIndex(bool indexed, bool analyed)
+        {
+            return ToIndex(indexed, analyed, false);
+        }
+
+        public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
+        {
+
+            // If it is not indexed nothing else matters
+            if (!indexed)
+            {
+                return Field.Index.NO;
+            }
+
+            // typical, non-expert
+            if (!omitNorms)
+            {
+                if (analyzed)
+                {
+                    return Field.Index.ANALYZED;
+                }
+                return Field.Index.NOT_ANALYZED;
+            }
+
+            // Expert: Norms omitted
+            if (analyzed)
+            {
+                return Field.Index.ANALYZED_NO_NORMS;
+            }
+            return Field.Index.NOT_ANALYZED_NO_NORMS;
+        }
+
+        /// <summary>
+        /// Get the best representation of a TermVector given the flags.
+        /// </summary>
+        public static Field.TermVector ToTermVector(bool stored, bool withOffsets, bool withPositions)
+        {
+            // If it is not stored, nothing else matters.
+            if (!stored)
+            {
+                return Field.TermVector.NO;
+            }
+
+            if (withOffsets)
+            {
+                if (withPositions)
+                {
+                    return Field.TermVector.WITH_POSITIONS_OFFSETS;
+                }
+                return Field.TermVector.WITH_OFFSETS;
+            }
+
+            if (withPositions)
+            {
+                return Field.TermVector.WITH_POSITIONS;
+            }
+            return Field.TermVector.YES;
+        }
+    }
 }
\ No newline at end of file