You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2011/11/09 22:03:52 UTC
[Lucene.Net] svn commit: r1199962 [2/14] - in
/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk: src/core/
src/core/Analysis/ src/core/Analysis/Standard/ src/core/Document/
src/core/Index/ src/core/QueryParser/ src/core/Search/
src/core/Search/Function/ src/cor...
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Field.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Field.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Field.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Field.cs Wed Nov 9 21:03:47 2011
@@ -24,625 +24,645 @@ using StringHelper = Lucene.Net.Util.Str
namespace Lucene.Net.Documents
{
-
- /// <summary>A field is a section of a Document. Each field has two parts, a name and a
- /// value. Values may be free text, provided as a String or as a Reader, or they
- /// may be atomic keywords, which are not further processed. Such keywords may
- /// be used to represent dates, urls, etc. Fields are optionally stored in the
- /// index, so that they may be returned with hits on the document.
- /// </summary>
-
- [Serializable]
- public sealed class Field:AbstractField, Fieldable
- {
-
- /// <summary>Specifies whether and how a field should be stored. </summary>
- [Serializable]
- public sealed class Store:Parameter
- {
-
- internal Store(System.String name):base(name)
- {
- }
-
- /// <summary>Store the original field value in the index in a compressed form. This is
- /// useful for long documents and for binary valued fields.
- /// </summary>
- /// <deprecated> Please use <see cref="CompressionTools" /> instead.
- /// For string fields that were previously indexed and stored using compression,
- /// the new way to achieve this is: First add the field indexed-only (no store)
- /// and additionally using the same field name as a binary, stored field
- /// with <see cref="CompressionTools.CompressString(string)" />.
- /// </deprecated>
- public static readonly Store COMPRESS = new Store("COMPRESS");
-
- /// <summary>Store the original field value in the index. This is useful for short texts
- /// like a document's title which should be displayed with the results. The
- /// value is stored in its original form, i.e. no analyzer is used before it is
- /// stored.
- /// </summary>
- public static readonly Store YES = new Store("YES");
-
- /// <summary>Do not store the field value in the index. </summary>
- public static readonly Store NO = new Store("NO");
- }
-
- /// <summary>Specifies whether and how a field should be indexed. </summary>
- [Serializable]
- public sealed class Index:Parameter
- {
-
- internal Index(System.String name):base(name)
- {
- }
-
- /// <summary>Do not index the field value. This field can thus not be searched,
- /// but one can still access its contents provided it is
- /// <see cref="Field.Store">stored</see>.
- /// </summary>
- public static readonly Index NO = new Index("NO");
-
- /// <summary>Index the tokens produced by running the field's
- /// value through an Analyzer. This is useful for
- /// common text.
- /// </summary>
- public static readonly Index ANALYZED = new Index("ANALYZED");
-
- /// <deprecated> this has been renamed to <see cref="ANALYZED" />
- /// </deprecated>
- [Obsolete("this has been renamed to ANALYZED")]
- public static readonly Index TOKENIZED;
-
- /// <summary>Index the field's value without using an Analyzer, so it can be searched.
- /// As no analyzer is used the value will be stored as a single term. This is
- /// useful for unique Ids like product numbers.
- /// </summary>
- public static readonly Index NOT_ANALYZED = new Index("NOT_ANALYZED");
-
- /// <deprecated> This has been renamed to <see cref="NOT_ANALYZED" />
- /// </deprecated>
- [Obsolete("This has been renamed to NOT_ANALYZED")]
- public static readonly Index UN_TOKENIZED;
-
- /// <summary>Expert: Index the field's value without an Analyzer,
- /// and also disable the storing of norms. Note that you
- /// can also separately enable/disable norms by calling
+
+ /// <summary>A field is a section of a Document. Each field has two parts, a name and a
+ /// value. Values may be free text, provided as a String or as a Reader, or they
+ /// may be atomic keywords, which are not further processed. Such keywords may
+ /// be used to represent dates, urls, etc. Fields are optionally stored in the
+ /// index, so that they may be returned with hits on the document.
+ /// </summary>
+
+ [Serializable]
+ public sealed class Field:AbstractField, Fieldable
+ {
+ /// <summary>Specifies whether and how a field should be stored. </summary>
+ public enum Store
+ {
+ /// <summary>Store the original field value in the index. This is useful for short texts
+ /// like a document's title which should be displayed with the results. The
+ /// value is stored in its original form, i.e. no analyzer is used before it is
+ /// stored.
+ /// </summary>
+ YES,
+
+ /// <summary>Do not store the field value in the index. </summary>
+ NO
+ }
+
+ /// <summary>Specifies whether and how a field should be indexed. </summary>
+
+ public enum Index
+ {
+ /// <summary>Do not index the field value. This field can thus not be searched,
+ /// but one can still access its contents provided it is
+ /// <see cref="Field.Store">stored</see>.
+ /// </summary>
+ NO,
+
+ /// <summary>Index the tokens produced by running the field's
+ /// value through an Analyzer. This is useful for
+ /// common text.
+ /// </summary>
+ ANALYZED,
+
+ /// <summary>Index the field's value without using an Analyzer, so it can be searched.
+ /// As no analyzer is used the value will be stored as a single term. This is
+ /// useful for unique Ids like product numbers.
+ /// </summary>
+ NOT_ANALYZED,
+
+ /// <summary>Expert: Index the field's value without an Analyzer,
+ /// and also disable the storing of norms. Note that you
+ /// can also separately enable/disable norms by calling
/// <see cref="AbstractField.SetOmitNorms" />. No norms means that
- /// index-time field and document boosting and field
- /// length normalization are disabled. The benefit is
- /// less memory usage as norms take up one byte of RAM
- /// per indexed field for every document in the index,
- /// during searching. Note that once you index a given
- /// field <i>with</i> norms enabled, disabling norms will
- /// have no effect. In other words, for this to have the
- /// above described effect on a field, all instances of
- /// that field must be indexed with NOT_ANALYZED_NO_NORMS
- /// from the beginning.
- /// </summary>
- public static readonly Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
-
- /// <deprecated> This has been renamed to
- /// <see cref="NOT_ANALYZED_NO_NORMS" />
- /// </deprecated>
- [Obsolete("This has been renamed to NOT_ANALYZED_NO_NORMS")]
- public static readonly Index NO_NORMS;
-
- /// <summary>Expert: Index the tokens produced by running the
- /// field's value through an Analyzer, and also
- /// separately disable the storing of norms. See
- /// <see cref="NOT_ANALYZED_NO_NORMS" /> for what norms are
- /// and why you may want to disable them.
- /// </summary>
- public static readonly Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
- static Index()
- {
- TOKENIZED = ANALYZED;
- UN_TOKENIZED = NOT_ANALYZED;
- NO_NORMS = NOT_ANALYZED_NO_NORMS;
- }
- }
-
- /// <summary>Specifies whether and how a field should have term vectors. </summary>
- [Serializable]
- public sealed class TermVector:Parameter
- {
-
- internal TermVector(System.String name):base(name)
- {
- }
-
- /// <summary>Do not store term vectors. </summary>
- public static readonly TermVector NO = new TermVector("NO");
-
- /// <summary>Store the term vectors of each document. A term vector is a list
- /// of the document's terms and their number of occurrences in that document.
- /// </summary>
- public static readonly TermVector YES = new TermVector("YES");
-
- /// <summary> Store the term vector + token position information
- ///
- /// </summary>
- /// <seealso cref="YES">
- /// </seealso>
- public static readonly TermVector WITH_POSITIONS = new TermVector("WITH_POSITIONS");
-
- /// <summary> Store the term vector + Token offset information
- ///
- /// </summary>
- /// <seealso cref="YES">
- /// </seealso>
- public static readonly TermVector WITH_OFFSETS = new TermVector("WITH_OFFSETS");
-
- /// <summary> Store the term vector + Token position and offset information
- ///
- /// </summary>
- /// <seealso cref="YES">
- /// </seealso>
- /// <seealso cref="WITH_POSITIONS">
- /// </seealso>
- /// <seealso cref="WITH_OFFSETS">
- /// </seealso>
- public static readonly TermVector WITH_POSITIONS_OFFSETS = new TermVector("WITH_POSITIONS_OFFSETS");
- }
-
-
- /// <summary>The value of the field as a String, or null. If null, the Reader value or
- /// binary value is used. Exactly one of stringValue(),
- /// readerValue(), and getBinaryValue() must be set.
- /// </summary>
- public override System.String StringValue()
- {
- return fieldsData is System.String?(System.String) fieldsData:null;
- }
-
- /// <summary>The value of the field as a Reader, or null. If null, the String value or
- /// binary value is used. Exactly one of stringValue(),
- /// readerValue(), and getBinaryValue() must be set.
- /// </summary>
- public override System.IO.TextReader ReaderValue()
- {
- return fieldsData is System.IO.TextReader?(System.IO.TextReader) fieldsData:null;
- }
-
- /// <summary>The value of the field in Binary, or null. If null, the Reader value,
- /// or String value is used. Exactly one of stringValue(),
- /// readerValue(), and getBinaryValue() must be set.
- /// </summary>
- /// <deprecated> This method must allocate a new byte[] if
- /// the <see cref="AbstractField.GetBinaryOffset()" /> is non-zero
- /// or <see cref="AbstractField.GetBinaryLength()" /> is not the
- /// full length of the byte[]. Please use <see cref="AbstractField.GetBinaryValue()" />
- /// instead, which simply
- /// returns the byte[].
- /// </deprecated>
- [Obsolete("This method must allocate a new byte[] if the AbstractField.GetBinaryOffset() is non-zero or AbstractField.GetBinaryLength() is not the full length of the byte[]. Please use AbstractField.GetBinaryValue() instead, which simply returns the byte[].")]
- public override byte[] BinaryValue()
- {
- if (!isBinary)
- return null;
- byte[] data = (byte[]) fieldsData;
- if (binaryOffset == 0 && data.Length == binaryLength)
- return data; //Optimization
-
- byte[] ret = new byte[binaryLength];
- Array.Copy(data, binaryOffset, ret, 0, binaryLength);
- return ret;
- }
-
- /// <summary>The TokesStream for this field to be used when indexing, or null. If null, the Reader value
- /// or String value is analyzed to produce the indexed tokens.
- /// </summary>
- public override TokenStream TokenStreamValue()
- {
- return tokenStream;
- }
-
-
- /// <summary><p/>Expert: change the value of this field. This can
- /// be used during indexing to re-use a single Field
- /// instance to improve indexing speed by avoiding GC cost
- /// of new'ing and reclaiming Field instances. Typically
- /// a single <see cref="Document" /> instance is re-used as
- /// well. This helps most on small documents.<p/>
- ///
- /// <p/>Each Field instance should only be used once
- /// within a single <see cref="Document" /> instance. See <a
- /// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
- /// for details.<p/>
- /// </summary>
- public void SetValue(System.String value_Renamed)
- {
- if (isBinary)
- {
- throw new System.ArgumentException("cannot set a String value on a binary field");
- }
- fieldsData = value_Renamed;
- }
-
- /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
- public void SetValue(System.IO.TextReader value_Renamed)
- {
- if (isBinary)
- {
- throw new System.ArgumentException("cannot set a Reader value on a binary field");
- }
- if (isStored)
- {
- throw new System.ArgumentException("cannot set a Reader value on a stored field");
- }
- fieldsData = value_Renamed;
- }
-
- /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
- public void SetValue(byte[] value_Renamed)
- {
- if (!isBinary)
- {
- throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
- }
- fieldsData = value_Renamed;
- binaryLength = value_Renamed.Length;
- binaryOffset = 0;
- }
-
- /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
- public void SetValue(byte[] value_Renamed, int offset, int length)
- {
- if (!isBinary)
- {
- throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
- }
- fieldsData = value_Renamed;
- binaryLength = length;
- binaryOffset = offset;
- }
-
-
- /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>.</summary>
- /// <deprecated> use <see cref="SetTokenStream" />
- /// </deprecated>
- [Obsolete("use SetTokenStream ")]
- public void SetValue(TokenStream value_Renamed)
- {
- if (isBinary)
- {
- throw new System.ArgumentException("cannot set a TokenStream value on a binary field");
- }
- if (isStored)
- {
- throw new System.ArgumentException("cannot set a TokenStream value on a stored field");
- }
- fieldsData = null;
- tokenStream = value_Renamed;
- }
-
- /// <summary>Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
- /// May be combined with stored values from stringValue() or binaryValue()
- /// </summary>
- public void SetTokenStream(TokenStream tokenStream)
- {
- this.isIndexed = true;
- this.isTokenized = true;
- this.tokenStream = tokenStream;
- }
-
- /// <summary> Create a field by specifying its name, value and how it will
- /// be saved in the index. Term vectors will not be stored in the index.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="value_Renamed">The string to process
- /// </param>
- /// <param name="store">Whether <c>value</c> should be stored in the index
- /// </param>
- /// <param name="index">Whether the field should be indexed, and if so, if it should
- /// be tokenized before indexing
- /// </param>
- /// <throws> NullPointerException if name or value is <c>null</c> </throws>
- /// <throws> IllegalArgumentException if the field is neither stored nor indexed </throws>
- public Field(System.String name, System.String value_Renamed, Store store, Index index):this(name, value_Renamed, store, index, TermVector.NO)
- {
- }
-
- /// <summary> Create a field by specifying its name, value and how it will
- /// be saved in the index.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="value_Renamed">The string to process
- /// </param>
- /// <param name="store">Whether <c>value</c> should be stored in the index
- /// </param>
- /// <param name="index">Whether the field should be indexed, and if so, if it should
- /// be tokenized before indexing
- /// </param>
- /// <param name="termVector">Whether term vector should be stored
- /// </param>
- /// <throws> NullPointerException if name or value is <c>null</c> </throws>
- /// <throws> IllegalArgumentException in any of the following situations: </throws>
- /// <summary> <list>
- /// <item>the field is neither stored nor indexed</item>
- /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
- /// </list>
- /// </summary>
- public Field(System.String name, System.String value_Renamed, Store store, Index index, TermVector termVector):this(name, true, value_Renamed, store, index, termVector)
- {
- }
-
- /// <summary> Create a field by specifying its name, value and how it will
- /// be saved in the index.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="internName">Whether to .intern() name or not
- /// </param>
- /// <param name="value_Renamed">The string to process
- /// </param>
- /// <param name="store">Whether <c>value</c> should be stored in the index
- /// </param>
- /// <param name="index">Whether the field should be indexed, and if so, if it should
- /// be tokenized before indexing
- /// </param>
- /// <param name="termVector">Whether term vector should be stored
- /// </param>
- /// <throws> NullPointerException if name or value is <c>null</c> </throws>
- /// <throws> IllegalArgumentException in any of the following situations: </throws>
- /// <summary> <list>
- /// <item>the field is neither stored nor indexed</item>
- /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
- /// </list>
- /// </summary>
- public Field(System.String name, bool internName, System.String value_Renamed, Store store, Index index, TermVector termVector)
- {
- if (name == null)
- throw new System.NullReferenceException("name cannot be null");
- if (value_Renamed == null)
- throw new System.NullReferenceException("value cannot be null");
- if (name.Length == 0 && value_Renamed.Length == 0)
- throw new System.ArgumentException("name and value cannot both be empty");
- if (index == Index.NO && store == Store.NO)
- throw new System.ArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored");
- if (index == Index.NO && termVector != TermVector.NO)
- throw new System.ArgumentException("cannot store term vector information " + "for a field that is not indexed");
-
- if (internName)
- // field names are optionally interned
- name = StringHelper.Intern(name);
-
- this.name = name;
-
- this.fieldsData = value_Renamed;
-
- if (store == Store.YES)
- {
- this.isStored = true;
- this.isCompressed = false;
- }
- else if (store == Store.COMPRESS)
- {
- this.isStored = true;
- this.isCompressed = true;
- }
- else if (store == Store.NO)
- {
- this.isStored = false;
- this.isCompressed = false;
- }
- else
- {
- throw new System.ArgumentException("unknown store parameter " + store);
- }
-
- if (index == Index.NO)
- {
- this.isIndexed = false;
- this.isTokenized = false;
- this.omitTermFreqAndPositions = false;
- this.omitNorms = true;
- }
- else if (index == Index.ANALYZED)
- {
- this.isIndexed = true;
- this.isTokenized = true;
- }
- else if (index == Index.NOT_ANALYZED)
- {
- this.isIndexed = true;
- this.isTokenized = false;
- }
- else if (index == Index.NOT_ANALYZED_NO_NORMS)
- {
- this.isIndexed = true;
- this.isTokenized = false;
- this.omitNorms = true;
- }
- else if (index == Index.ANALYZED_NO_NORMS)
- {
- this.isIndexed = true;
- this.isTokenized = true;
- this.omitNorms = true;
- }
- else
- {
- throw new System.ArgumentException("unknown index parameter " + index);
- }
-
- this.isBinary = false;
-
- SetStoreTermVector(termVector);
- }
-
- /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
- /// not be stored. The Reader is read only when the Document is added to the index,
- /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
- /// has been called.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="reader">The reader with the content
- /// </param>
- /// <throws> NullPointerException if name or reader is <c>null</c> </throws>
- public Field(System.String name, System.IO.TextReader reader):this(name, reader, TermVector.NO)
- {
- }
-
- /// <summary> Create a tokenized and indexed field that is not stored, optionally with
- /// storing term vectors. The Reader is read only when the Document is added to the index,
- /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
- /// has been called.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="reader">The reader with the content
- /// </param>
- /// <param name="termVector">Whether term vector should be stored
- /// </param>
- /// <throws> NullPointerException if name or reader is <c>null</c> </throws>
- public Field(System.String name, System.IO.TextReader reader, TermVector termVector)
- {
- if (name == null)
- throw new System.NullReferenceException("name cannot be null");
- if (reader == null)
- throw new System.NullReferenceException("reader cannot be null");
-
- this.name = StringHelper.Intern(name); // field names are interned
- this.fieldsData = reader;
-
- this.isStored = false;
- this.isCompressed = false;
-
- this.isIndexed = true;
- this.isTokenized = true;
-
- this.isBinary = false;
-
- SetStoreTermVector(termVector);
- }
-
- /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
- /// not be stored. This is useful for pre-analyzed fields.
- /// The TokenStream is read only when the Document is added to the index,
- /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
- /// has been called.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="tokenStream">The TokenStream with the content
- /// </param>
- /// <throws> NullPointerException if name or tokenStream is <c>null</c> </throws>
- public Field(System.String name, TokenStream tokenStream):this(name, tokenStream, TermVector.NO)
- {
- }
-
- /// <summary> Create a tokenized and indexed field that is not stored, optionally with
- /// storing term vectors. This is useful for pre-analyzed fields.
- /// The TokenStream is read only when the Document is added to the index,
- /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
- /// has been called.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="tokenStream">The TokenStream with the content
- /// </param>
- /// <param name="termVector">Whether term vector should be stored
- /// </param>
- /// <throws> NullPointerException if name or tokenStream is <c>null</c> </throws>
- public Field(System.String name, TokenStream tokenStream, TermVector termVector)
- {
- if (name == null)
- throw new System.NullReferenceException("name cannot be null");
- if (tokenStream == null)
- throw new System.NullReferenceException("tokenStream cannot be null");
-
- this.name = StringHelper.Intern(name); // field names are interned
- this.fieldsData = null;
- this.tokenStream = tokenStream;
-
- this.isStored = false;
- this.isCompressed = false;
-
- this.isIndexed = true;
- this.isTokenized = true;
-
- this.isBinary = false;
-
- SetStoreTermVector(termVector);
- }
-
-
- /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="value_Renamed">The binary value
- /// </param>
- /// <param name="store">How <c>value</c> should be stored (compressed or not)
- /// </param>
- /// <throws> IllegalArgumentException if store is <c>Store.NO</c> </throws>
- public Field(System.String name, byte[] value_Renamed, Store store):this(name, value_Renamed, 0, value_Renamed.Length, store)
- {
- }
-
- /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
- ///
- /// </summary>
- /// <param name="name">The name of the field
- /// </param>
- /// <param name="value_Renamed">The binary value
- /// </param>
- /// <param name="offset">Starting offset in value where this Field's bytes are
- /// </param>
- /// <param name="length">Number of bytes to use for this Field, starting at offset
- /// </param>
- /// <param name="store">How <c>value</c> should be stored (compressed or not)
- /// </param>
- /// <throws> IllegalArgumentException if store is <c>Store.NO</c> </throws>
- public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
- {
-
- if (name == null)
- throw new System.ArgumentException("name cannot be null");
- if (value_Renamed == null)
- throw new System.ArgumentException("value cannot be null");
-
- this.name = StringHelper.Intern(name); // field names are interned
- fieldsData = value_Renamed;
-
- if (store == Store.YES)
- {
- isStored = true;
- isCompressed = false;
- }
- else if (store == Store.COMPRESS)
- {
- isStored = true;
- isCompressed = true;
- }
- else if (store == Store.NO)
- throw new System.ArgumentException("binary values can't be unstored");
- else
- {
- throw new System.ArgumentException("unknown store parameter " + store);
- }
-
- isIndexed = false;
- isTokenized = false;
- omitTermFreqAndPositions = false;
- omitNorms = true;
-
- isBinary = true;
- binaryLength = length;
- binaryOffset = offset;
-
- SetStoreTermVector(TermVector.NO);
- }
- }
+ /// index-time field and document boosting and field
+ /// length normalization are disabled. The benefit is
+ /// less memory usage as norms take up one byte of RAM
+ /// per indexed field for every document in the index,
+ /// during searching. Note that once you index a given
+ /// field <i>with</i> norms enabled, disabling norms will
+ /// have no effect. In other words, for this to have the
+ /// above described effect on a field, all instances of
+ /// that field must be indexed with NOT_ANALYZED_NO_NORMS
+ /// from the beginning.
+ /// </summary>
+ NOT_ANALYZED_NO_NORMS,
+
+ /// <summary>Expert: Index the tokens produced by running the
+ /// field's value through an Analyzer, and also
+ /// separately disable the storing of norms. See
+ /// <see cref="NOT_ANALYZED_NO_NORMS" /> for what norms are
+ /// and why you may want to disable them.
+ /// </summary>
+ ANALYZED_NO_NORMS,
+ }
+
+ /// <summary>Specifies whether and how a field should have term vectors. </summary>
+ public enum TermVector
+ {
+ /// <summary>Do not store term vectors. </summary>
+ NO,
+
+ /// <summary>Store the term vectors of each document. A term vector is a list
+ /// of the document's terms and their number of occurrences in that document.
+ /// </summary>
+ YES,
+
+ /// <summary> Store the term vector + token position information
+ ///
+ /// </summary>
+ /// <seealso cref="YES">
+ /// </seealso>
+ WITH_POSITIONS,
+
+ /// <summary> Store the term vector + Token offset information
+ ///
+ /// </summary>
+ /// <seealso cref="YES">
+ /// </seealso>
+ WITH_OFFSETS,
+
+ /// <summary> Store the term vector + Token position and offset information
+ ///
+ /// </summary>
+ /// <seealso cref="YES">
+ /// </seealso>
+ /// <seealso cref="WITH_POSITIONS">
+ /// </seealso>
+ /// <seealso cref="WITH_OFFSETS">
+ /// </seealso>
+ WITH_POSITIONS_OFFSETS,
+ }
+
+
+ /// <summary>The value of the field as a String, or null. If null, the Reader value or
+ /// binary value is used. Exactly one of stringValue(),
+ /// readerValue(), and getBinaryValue() must be set.
+ /// </summary>
+ public override System.String StringValue()
+ {
+ return fieldsData is System.String?(System.String) fieldsData:null;
+ }
+
+ /// <summary>The value of the field as a Reader, or null. If null, the String value or
+ /// binary value is used. Exactly one of stringValue(),
+ /// readerValue(), and getBinaryValue() must be set.
+ /// </summary>
+ public override System.IO.TextReader ReaderValue()
+ {
+ return fieldsData is System.IO.TextReader?(System.IO.TextReader) fieldsData:null;
+ }
+
+ /// <summary>The TokesStream for this field to be used when indexing, or null. If null, the Reader value
+ /// or String value is analyzed to produce the indexed tokens.
+ /// </summary>
+ public override TokenStream TokenStreamValue()
+ {
+ return tokenStream;
+ }
+
+
+ /// <summary><p/>Expert: change the value of this field. This can
+ /// be used during indexing to re-use a single Field
+ /// instance to improve indexing speed by avoiding GC cost
+ /// of new'ing and reclaiming Field instances. Typically
+ /// a single <see cref="Document" /> instance is re-used as
+ /// well. This helps most on small documents.<p/>
+ ///
+ /// <p/>Each Field instance should only be used once
+ /// within a single <see cref="Document" /> instance. See <a
+ /// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
+ /// for details.<p/>
+ /// </summary>
+ public void SetValue(System.String value_Renamed)
+ {
+ if (isBinary)
+ {
+ throw new System.ArgumentException("cannot set a String value on a binary field");
+ }
+ fieldsData = value_Renamed;
+ }
+
+ /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+ public void SetValue(System.IO.TextReader value_Renamed)
+ {
+ if (isBinary)
+ {
+ throw new System.ArgumentException("cannot set a Reader value on a binary field");
+ }
+ if (isStored)
+ {
+ throw new System.ArgumentException("cannot set a Reader value on a stored field");
+ }
+ fieldsData = value_Renamed;
+ }
+
+ /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+ public void SetValue(byte[] value_Renamed)
+ {
+ if (!isBinary)
+ {
+ throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+ }
+ fieldsData = value_Renamed;
+ binaryLength = value_Renamed.Length;
+ binaryOffset = 0;
+ }
+
+ /// <summary>Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+ public void SetValue(byte[] value_Renamed, int offset, int length)
+ {
+ if (!isBinary)
+ {
+ throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+ }
+ fieldsData = value_Renamed;
+ binaryLength = length;
+ binaryOffset = offset;
+ }
+
+ /// <summary>Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
+ /// May be combined with stored values from stringValue() or GetBinaryValue()
+ /// </summary>
+ public void SetTokenStream(TokenStream tokenStream)
+ {
+ this.isIndexed = true;
+ this.isTokenized = true;
+ this.tokenStream = tokenStream;
+ }
+
+ /// <summary> Create a field by specifying its name, value and how it will
+ /// be saved in the index. Term vectors will not be stored in the index.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="value_Renamed">The string to process
+ /// </param>
+ /// <param name="store">Whether <c>value</c> should be stored in the index
+ /// </param>
+ /// <param name="index">Whether the field should be indexed, and if so, if it should
+ /// be tokenized before indexing
+ /// </param>
+ /// <throws> NullPointerException if name or value is <c>null</c> </throws>
+ /// <throws> IllegalArgumentException if the field is neither stored nor indexed </throws>
+ public Field(System.String name, System.String value_Renamed, Store store, Index index)
+ : this(name, value_Renamed, store, index, TermVector.NO)
+ {
+ }
+
+ /// <summary> Create a field by specifying its name, value and how it will
+ /// be saved in the index.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="value_Renamed">The string to process
+ /// </param>
+ /// <param name="store">Whether <c>value</c> should be stored in the index
+ /// </param>
+ /// <param name="index">Whether the field should be indexed, and if so, if it should
+ /// be tokenized before indexing
+ /// </param>
+ /// <param name="termVector">Whether term vector should be stored
+ /// </param>
+ /// <throws> NullPointerException if name or value is <c>null</c> </throws>
+ /// <throws> IllegalArgumentException in any of the following situations: </throws>
+ /// <summary> <list>
+ /// <item>the field is neither stored nor indexed</item>
+ /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
+ /// </list>
+ /// </summary>
+ public Field(System.String name, System.String value_Renamed, Store store, Index index, TermVector termVector)
+ : this(name, true, value_Renamed, store, index, termVector)
+ {
+ }
+
+ /// <summary> Create a field by specifying its name, value and how it will
+ /// be saved in the index.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="internName">Whether to .intern() name or not
+ /// </param>
+ /// <param name="value_Renamed">The string to process
+ /// </param>
+ /// <param name="store">Whether <c>value</c> should be stored in the index
+ /// </param>
+ /// <param name="index">Whether the field should be indexed, and if so, if it should
+ /// be tokenized before indexing
+ /// </param>
+ /// <param name="termVector">Whether term vector should be stored
+ /// </param>
+ /// <throws> NullPointerException if name or value is <c>null</c> </throws>
+ /// <throws> IllegalArgumentException in any of the following situations: </throws>
+ /// <summary> <list>
+ /// <item>the field is neither stored nor indexed</item>
+ /// <item>the field is not indexed but termVector is <c>TermVector.YES</c></item>
+ /// </list>
+ /// </summary>
+ public Field(System.String name, bool internName, System.String value_Renamed, Store store, Index index, TermVector termVector)
+ {
+ if (name == null)
+ throw new System.NullReferenceException("name cannot be null");
+ if (value_Renamed == null)
+ throw new System.NullReferenceException("value cannot be null");
+ if (name.Length == 0 && value_Renamed.Length == 0)
+ throw new System.ArgumentException("name and value cannot both be empty");
+ if (index == Index.NO && store == Store.NO)
+ throw new System.ArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored");
+ if (index == Index.NO && termVector != TermVector.NO)
+ throw new System.ArgumentException("cannot store term vector information " + "for a field that is not indexed");
+
+ if (internName)
+ // field names are optionally interned
+ name = StringHelper.Intern(name);
+
+ this.name = name;
+
+ this.fieldsData = value_Renamed;
+
+ this.isStored = store.IsStored();
+
+ this.isIndexed = index.IsIndexed();
+ this.isTokenized = index.IsAnalyzed();
+ this.omitNorms = index.OmitNorms();
+
+ if (index == Index.NO)
+ {
+ this.omitTermFreqAndPositions = false;
+ }
+
+ this.isBinary = false;
+
+ SetStoreTermVector(termVector);
+ }
+
+ /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+ /// not be stored. The Reader is read only when the Document is added to the index,
+ /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
+ /// has been called.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="reader">The reader with the content
+ /// </param>
+ /// <throws> NullPointerException if name or reader is <c>null</c> </throws>
+ public Field(System.String name, System.IO.TextReader reader):this(name, reader, TermVector.NO)
+ {
+ }
+
+ /// <summary> Create a tokenized and indexed field that is not stored, optionally with
+ /// storing term vectors. The Reader is read only when the Document is added to the index,
+ /// i.e. you may not close the Reader until <see cref="IndexWriter.AddDocument(Document)" />
+ /// has been called.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="reader">The reader with the content
+ /// </param>
+ /// <param name="termVector">Whether term vector should be stored
+ /// </param>
+ /// <throws> NullPointerException if name or reader is <c>null</c> </throws>
+ public Field(System.String name, System.IO.TextReader reader, TermVector termVector)
+ {
+ if (name == null)
+ throw new System.NullReferenceException("name cannot be null");
+ if (reader == null)
+ throw new System.NullReferenceException("reader cannot be null");
+
+ this.name = StringHelper.Intern(name); // field names are interned
+ this.fieldsData = reader;
+
+ this.isStored = false;
+
+ this.isIndexed = true;
+ this.isTokenized = true;
+
+ this.isBinary = false;
+
+ SetStoreTermVector(termVector);
+ }
+
+ /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+ /// not be stored. This is useful for pre-analyzed fields.
+ /// The TokenStream is read only when the Document is added to the index,
+ /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
+ /// has been called.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="tokenStream">The TokenStream with the content
+ /// </param>
+ /// <throws> NullPointerException if name or tokenStream is <c>null</c> </throws>
+ public Field(System.String name, TokenStream tokenStream):this(name, tokenStream, TermVector.NO)
+ {
+ }
+
+ /// <summary> Create a tokenized and indexed field that is not stored, optionally with
+ /// storing term vectors. This is useful for pre-analyzed fields.
+ /// The TokenStream is read only when the Document is added to the index,
+ /// i.e. you may not close the TokenStream until <see cref="IndexWriter.AddDocument(Document)" />
+ /// has been called.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="tokenStream">The TokenStream with the content
+ /// </param>
+ /// <param name="termVector">Whether term vector should be stored
+ /// </param>
+ /// <throws> NullPointerException if name or tokenStream is <c>null</c> </throws>
+ public Field(System.String name, TokenStream tokenStream, TermVector termVector)
+ {
+ if (name == null)
+ throw new System.NullReferenceException("name cannot be null");
+ if (tokenStream == null)
+ throw new System.NullReferenceException("tokenStream cannot be null");
+
+ this.name = StringHelper.Intern(name); // field names are interned
+ this.fieldsData = null;
+ this.tokenStream = tokenStream;
+
+ this.isStored = false;
+
+ this.isIndexed = true;
+ this.isTokenized = true;
+
+ this.isBinary = false;
+
+ SetStoreTermVector(termVector);
+ }
+
+
+ /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="value_Renamed">The binary value
+ /// </param>
+ /// <param name="store">How <c>value</c> should be stored (compressed or not)
+ /// </param>
+ /// <throws> IllegalArgumentException if store is <c>Store.NO</c> </throws>
+ public Field(System.String name, byte[] value_Renamed, Store store):this(name, value_Renamed, 0, value_Renamed.Length, store)
+ {
+ }
+
+ /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+ ///
+ /// </summary>
+ /// <param name="name">The name of the field
+ /// </param>
+ /// <param name="value_Renamed">The binary value
+ /// </param>
+ /// <param name="offset">Starting offset in value where this Field's bytes are
+ /// </param>
+ /// <param name="length">Number of bytes to use for this Field, starting at offset
+ /// </param>
+ /// <param name="store">How <c>value</c> should be stored (compressed or not)
+ /// </param>
+ /// <throws> IllegalArgumentException if store is <c>Store.NO</c> </throws>
+ public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
+ {
+
+ if (name == null)
+ throw new System.ArgumentException("name cannot be null");
+ if (value_Renamed == null)
+ throw new System.ArgumentException("value cannot be null");
+
+ this.name = StringHelper.Intern(name); // field names are interned
+ fieldsData = value_Renamed;
+
+ if (store == Store.NO)
+ throw new System.ArgumentException("binary values can't be unstored");
+
+ isStored = store.IsStored();
+ isIndexed = false;
+ isTokenized = false;
+ omitTermFreqAndPositions = false;
+ omitNorms = true;
+
+ isBinary = true;
+ binaryLength = length;
+ binaryOffset = offset;
+
+ SetStoreTermVector(TermVector.NO);
+ }
+ }
+
+ public static class FieldExtensions
+ {
+ public static bool IsStored(this Field.Store store)
+ {
+ switch(store)
+ {
+ case Field.Store.YES:
+ return true;
+ case Field.Store.NO:
+ return false;
+ default:
+ throw new ArgumentOutOfRangeException("store", "Invalid value for Field.Store");
+ }
+ }
+
+ public static bool IsIndexed(this Field.Index index)
+ {
+ switch(index)
+ {
+ case Field.Index.NO:
+ return false;
+ case Field.Index.ANALYZED:
+ case Field.Index.NOT_ANALYZED:
+ case Field.Index.NOT_ANALYZED_NO_NORMS:
+ case Field.Index.ANALYZED_NO_NORMS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+ }
+ }
+
+ public static bool IsAnalyzed(this Field.Index index)
+ {
+ switch (index)
+ {
+ case Field.Index.NO:
+ case Field.Index.NOT_ANALYZED:
+ case Field.Index.NOT_ANALYZED_NO_NORMS:
+ return false;
+ case Field.Index.ANALYZED:
+ case Field.Index.ANALYZED_NO_NORMS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+ }
+ }
+
+ public static bool OmitNorms(this Field.Index index)
+ {
+ switch (index)
+ {
+ case Field.Index.ANALYZED:
+ case Field.Index.NOT_ANALYZED:
+ return false;
+ case Field.Index.NO:
+ case Field.Index.NOT_ANALYZED_NO_NORMS:
+ case Field.Index.ANALYZED_NO_NORMS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("index", "Invalid value for Field.Index");
+ }
+ }
+
+ public static bool IsStored(this Field.TermVector tv)
+ {
+ switch(tv)
+ {
+ case Field.TermVector.NO:
+ return false;
+ case Field.TermVector.YES:
+ case Field.TermVector.WITH_OFFSETS:
+ case Field.TermVector.WITH_POSITIONS:
+ case Field.TermVector.WITH_POSITIONS_OFFSETS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+ }
+ }
+
+ public static bool WithPositions(this Field.TermVector tv)
+ {
+ switch (tv)
+ {
+ case Field.TermVector.NO:
+ case Field.TermVector.YES:
+ case Field.TermVector.WITH_OFFSETS:
+ return false;
+ case Field.TermVector.WITH_POSITIONS:
+ case Field.TermVector.WITH_POSITIONS_OFFSETS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+ }
+ }
+
+ public static bool WithOffsets(this Field.TermVector tv)
+ {
+ switch (tv)
+ {
+ case Field.TermVector.NO:
+ case Field.TermVector.YES:
+ case Field.TermVector.WITH_POSITIONS:
+ return false;
+ case Field.TermVector.WITH_OFFSETS:
+ case Field.TermVector.WITH_POSITIONS_OFFSETS:
+ return true;
+ default:
+ throw new ArgumentOutOfRangeException("tv", "Invalid value for Field.TermVector");
+ }
+ }
+
+ public static Field.Index ToIndex(bool indexed, bool analyed)
+ {
+ return ToIndex(indexed, analyed, false);
+ }
+
+ public static Field.Index ToIndex(bool indexed, bool analyzed, bool omitNorms)
+ {
+
+ // If it is not indexed nothing else matters
+ if (!indexed)
+ {
+ return Field.Index.NO;
+ }
+
+ // typical, non-expert
+ if (!omitNorms)
+ {
+ if (analyzed)
+ {
+ return Field.Index.ANALYZED;
+ }
+ return Field.Index.NOT_ANALYZED;
+ }
+
+ // Expert: Norms omitted
+ if (analyzed)
+ {
+ return Field.Index.ANALYZED_NO_NORMS;
+ }
+ return Field.Index.NOT_ANALYZED_NO_NORMS;
+ }
+
+ /// <summary>
+ /// Get the best representation of a TermVector given the flags.
+ /// </summary>
+ public static Field.TermVector ToTermVector(bool stored, bool withOffsets, bool withPositions)
+ {
+ // If it is not stored, nothing else matters.
+ if (!stored)
+ {
+ return Field.TermVector.NO;
+ }
+
+ if (withOffsets)
+ {
+ if (withPositions)
+ {
+ return Field.TermVector.WITH_POSITIONS_OFFSETS;
+ }
+ return Field.TermVector.WITH_OFFSETS;
+ }
+
+ if (withPositions)
+ {
+ return Field.TermVector.WITH_POSITIONS;
+ }
+ return Field.TermVector.YES;
+ }
+ }
}
\ No newline at end of file
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Fieldable.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Fieldable.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/Fieldable.cs Wed Nov 9 21:03:47 2011
@@ -22,7 +22,6 @@ using FieldInvertState = Lucene.Net.Inde
namespace Lucene.Net.Documents
{
-
/// <summary> Synonymous with <see cref="Field" />.
///
/// <p/><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
@@ -65,7 +64,7 @@ namespace Lucene.Net.Documents
///
/// <p/>Note: this value is not stored directly with the document in the index.
/// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
- /// <see cref="Lucene.Net.Search.Hits.Doc(int)" /> may thus not have the same value present as when
+ /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
/// this field was indexed.
///
/// </summary>
@@ -81,7 +80,7 @@ namespace Lucene.Net.Documents
/// <summary>The value of the field as a String, or null.
/// <p/>
/// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
- /// unless isBinary()==true, in which case binaryValue() will be used.
+ /// unless isBinary()==true, in which case GetBinaryValue() will be used.
///
/// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
/// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
@@ -94,11 +93,6 @@ namespace Lucene.Net.Documents
/// </seealso>
System.IO.TextReader ReaderValue();
- /// <summary>The value of the field in Binary, or null.</summary>
- /// <seealso cref="StringValue()">
- /// </seealso>
- byte[] BinaryValue();
-
/// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
/// <seealso cref="StringValue()">
/// </seealso>
@@ -120,9 +114,6 @@ namespace Lucene.Net.Documents
/// </summary>
bool IsTokenized();
- /// <summary>True if the value of the field is stored and compressed within the index </summary>
- bool IsCompressed();
-
/// <summary>True if the term or terms used to index this field are stored as a term
/// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
/// These methods do not provide access to the original content of the field,
@@ -155,18 +146,8 @@ namespace Lucene.Net.Documents
/// </summary>
void SetOmitNorms(bool omitNorms);
- /// <deprecated> Renamed to <see cref="AbstractField.SetOmitTermFreqAndPositions" />
- /// </deprecated>
- [Obsolete("Renamed to AbstractField.SetOmitTermFreqAndPositions")]
- void SetOmitTf(bool omitTf);
-
- /// <deprecated> Renamed to <see cref="AbstractField.GetOmitTermFreqAndPositions" />
- /// </deprecated>
- [Obsolete("Renamed to AbstractField.GetOmitTermFreqAndPositions")]
- bool GetOmitTf();
-
/// <summary> Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
- /// it's values via <see cref="StringValue()" /> or <see cref="BinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
+ /// it's values via <see cref="StringValue()" /> or <see cref="GetBinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
/// retrieved the <see cref="Document" /> is still open.
///
/// </summary>
@@ -204,7 +185,7 @@ namespace Lucene.Net.Documents
/// About reuse: if you pass in the result byte[] and it is
/// used, likely the underlying implementation will hold
/// onto this byte[] and return it in future calls to
- /// <see cref="BinaryValue()" /> or <see cref="GetBinaryValue()" />.
+ /// <see cref="GetBinaryValue()" /> or <see cref="GetBinaryValue()" />.
/// So if you subsequently re-use the same byte[] elsewhere
/// it will alter this Fieldable's value.
/// </summary>
@@ -215,5 +196,23 @@ namespace Lucene.Net.Documents
/// <returns> reference to the Field value as byte[].
/// </returns>
byte[] GetBinaryValue(byte[] result);
+
+ /// <seealso cref="SetOmitTermFreqAndPositions"/>
+ bool GetOmitTermFreqAndPositions();
+
+ /// Expert:
+ /// <para>
+ /// If set, omit term freq, positions and payloads from
+ /// postings for this field.
+ /// </para>
+ /// <para>
+ /// <b>NOTE</b>: While this option reduces storage space
+ /// required in the index, it also means any query
+ /// requiring positional information, such as
+ /// <see cref="Lucene.Net.Search.PhraseQuery"/> or
+ /// <see cref="Lucene.Net.Search.Spans.SpanQuery"/>
+ /// subclasses will silently fail to find results.
+ /// </para>
+ void SetOmitTermFreqAndPositions(bool omitTermFreqAndPositions);
}
}
\ No newline at end of file
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/NumericField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/NumericField.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/NumericField.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Document/NumericField.cs Wed Nov 9 21:03:47 2011
@@ -70,8 +70,7 @@ namespace Lucene.Net.Documents
/// <c>NumericField</c>, use <see cref="NumericRangeQuery" /> or <see cref="NumericRangeFilter" />
///. To sort according to a
/// <c>NumericField</c>, use the normal numeric sort types, eg
- /// <see cref="SortField.INT" /> (note that <see cref="SortField.AUTO" />
- /// will not work with these fields). <c>NumericField</c> values
+ /// <see cref="SortField.INT" /> <c>NumericField</c> values
/// can also be loaded directly from <see cref="FieldCache" />.<p/>
///
/// <p/>By default, a <c>NumericField</c>'s value is not stored but
@@ -218,12 +217,6 @@ namespace Lucene.Net.Documents
}
/// <summary>Returns always <c>null</c> for numeric fields </summary>
- public override byte[] BinaryValue()
- {
- return null;
- }
-
- /// <summary>Returns always <c>null</c> for numeric fields </summary>
public override byte[] GetBinaryValue(byte[] result)
{
return null;
@@ -256,7 +249,7 @@ namespace Lucene.Net.Documents
public NumericField SetLongValue(long value_Renamed)
{
tokenStream.SetLongValue(value_Renamed);
- fieldsData = (long) value_Renamed;
+ fieldsData = value_Renamed;
return this;
}
@@ -269,7 +262,7 @@ namespace Lucene.Net.Documents
public NumericField SetIntValue(int value_Renamed)
{
tokenStream.SetIntValue(value_Renamed);
- fieldsData = (System.Int32) value_Renamed;
+ fieldsData = value_Renamed;
return this;
}
@@ -282,7 +275,7 @@ namespace Lucene.Net.Documents
public NumericField SetDoubleValue(double value_Renamed)
{
tokenStream.SetDoubleValue(value_Renamed);
- fieldsData = (double) value_Renamed;
+ fieldsData = value_Renamed;
return this;
}
@@ -295,7 +288,7 @@ namespace Lucene.Net.Documents
public NumericField SetFloatValue(float value_Renamed)
{
tokenStream.SetFloatValue(value_Renamed);
- fieldsData = (float) value_Renamed;
+ fieldsData = value_Renamed;
return this;
}
}
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/BufferedDeletes.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/BufferedDeletes.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/BufferedDeletes.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/BufferedDeletes.cs Wed Nov 9 21:03:47 2011
@@ -16,6 +16,8 @@
*/
using System;
+using System.Collections.Generic;
+using Lucene.Net.Search;
namespace Lucene.Net.Index
{
@@ -31,9 +33,9 @@ namespace Lucene.Net.Index
class BufferedDeletes
{
internal int numTerms;
- internal System.Collections.IDictionary terms = null;
- internal System.Collections.Hashtable queries = new System.Collections.Hashtable();
- internal System.Collections.ArrayList docIDs = new System.Collections.ArrayList();
+ internal IDictionary<Term,Num> terms = null;
+ internal IDictionary<Query, int> queries = new SupportClass.HashMap<Query, int>();
+ internal List<int> docIDs = new List<int>();
internal long bytesUsed;
internal bool doTermSort;
@@ -42,11 +44,12 @@ namespace Lucene.Net.Index
this.doTermSort = doTermSort;
if (doTermSort)
{
- terms = new System.Collections.Generic.SortedDictionary<object, object>();
+ //TODO: This isn't quite the same as a TreeMap
+ terms = new SortedDictionary<Term, Num>();
}
else
{
- terms = new System.Collections.Hashtable();
+ terms = new SupportClass.HashMap<Term, Num>();
}
}
@@ -92,16 +95,14 @@ namespace Lucene.Net.Index
{
numTerms += in_Renamed.numTerms;
bytesUsed += in_Renamed.bytesUsed;
-
- System.Collections.ArrayList keys = new System.Collections.ArrayList(in_Renamed.terms.Keys);
- System.Collections.ArrayList values = new System.Collections.ArrayList(in_Renamed.terms.Values);
- for (int i=0; i < keys.Count; i++)
- terms[keys[i]] = values[i];
-
- keys = new System.Collections.ArrayList(in_Renamed.queries.Keys);
- values = new System.Collections.ArrayList(in_Renamed.queries.Values);
- for (int i=0; i < keys.Count; i++)
- queries[keys[i]] = values[i];
+ foreach (KeyValuePair<Term, Num> term in in_Renamed.terms)
+ {
+ terms.Add(term);
+ }
+ foreach (KeyValuePair<Query, int> term in in_Renamed.queries)
+ {
+ queries.Add(term);
+ }
docIDs.AddRange(in_Renamed.docIDs);
in_Renamed.Clear();
@@ -133,24 +134,22 @@ namespace Lucene.Net.Index
lock (this)
{
- System.Collections.IDictionary newDeleteTerms;
+ IDictionary<Term, Num> newDeleteTerms;
// Remap delete-by-term
if (terms.Count > 0)
{
if (doTermSort)
{
- newDeleteTerms = new System.Collections.Generic.SortedDictionary<object, object>();
+ newDeleteTerms = new SortedDictionary<Term, Num>();
}
else
{
- newDeleteTerms = new System.Collections.Hashtable();
+ newDeleteTerms = new SupportClass.HashMap<Term, Num>();
}
- System.Collections.IEnumerator iter = new System.Collections.Hashtable(terms).GetEnumerator();
- while (iter.MoveNext())
+ foreach(var entry in terms)
{
- System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
- Num num = (Num) entry.Value;
+ Num num = entry.Value;
newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
}
}
@@ -158,33 +157,29 @@ namespace Lucene.Net.Index
newDeleteTerms = null;
// Remap delete-by-docID
- System.Collections.ArrayList newDeleteDocIDs;
+ List<int> newDeleteDocIDs;
if (docIDs.Count > 0)
{
- newDeleteDocIDs = new System.Collections.ArrayList(docIDs.Count);
- System.Collections.IEnumerator iter = docIDs.GetEnumerator();
- while (iter.MoveNext())
+ newDeleteDocIDs = new List<int>(docIDs.Count);
+ foreach(int num in docIDs)
{
- System.Int32 num = (System.Int32) iter.Current;
- newDeleteDocIDs.Add((System.Int32) mapper.Remap(num));
+ newDeleteDocIDs.Add(mapper.Remap(num));
}
}
else
newDeleteDocIDs = null;
// Remap delete-by-query
- System.Collections.Hashtable newDeleteQueries;
+ SupportClass.HashMap<Query, int> newDeleteQueries;
if (queries.Count > 0)
{
- newDeleteQueries = new System.Collections.Hashtable(queries.Count);
- System.Collections.IEnumerator iter = new System.Collections.Hashtable(queries).GetEnumerator();
- while (iter.MoveNext())
+ newDeleteQueries = new SupportClass.HashMap<Query, int>(queries.Count);
+ foreach(var entry in queries)
{
- System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
- System.Int32 num = (System.Int32) entry.Value;
- newDeleteQueries[entry.Key] = (System.Int32) mapper.Remap(num);
+ int num = (int)entry.Value;
+ newDeleteQueries[entry.Key] = mapper.Remap(num);
}
}
else
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteBlockPool.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteBlockPool.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteBlockPool.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteBlockPool.cs Wed Nov 9 21:03:47 2011
@@ -48,7 +48,7 @@ namespace Lucene.Net.Index
public /*internal*/ abstract class Allocator
{
public /*internal*/ abstract void RecycleByteBlocks(byte[][] blocks, int start, int end);
- public /*internal*/ abstract void RecycleByteBlocks(System.Collections.ArrayList blocks);
+ public /*internal*/ abstract void RecycleByteBlocks(IList<byte[]> blocks);
public /*internal*/ abstract byte[] GetByteBlock(bool trackAllocations);
}
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteSliceReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteSliceReader.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteSliceReader.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ByteSliceReader.cs Wed Nov 9 21:03:47 2011
@@ -28,7 +28,7 @@ namespace Lucene.Net.Index
* each slice until we hit the end of that slice at which
* point we read the forwarding address of the next slice
* and then jump to it.*/
- public sealed class ByteSliceReader:IndexInput
+ public sealed class ByteSliceReader : IndexInput
{
internal ByteBlockPool pool;
internal int bufferUpto;
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CheckIndex.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CheckIndex.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CheckIndex.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CheckIndex.cs Wed Nov 9 21:03:47 2011
@@ -16,7 +16,7 @@
*/
using System;
-
+using System.Collections.Generic;
using AbstractField = Lucene.Net.Documents.AbstractField;
using Document = Lucene.Net.Documents.Document;
using Directory = Lucene.Net.Store.Directory;
@@ -40,14 +40,6 @@ namespace Lucene.Net.Index
/// </summary>
public class CheckIndex
{
-
- /// <summary>Default PrintStream for all CheckIndex instances.</summary>
- /// <deprecated> Use <see cref="SetInfoStream" /> per instance,
- /// instead.
- /// </deprecated>
- [Obsolete("Use SetInfoStream per instance,instead.")]
- public static System.IO.StreamWriter out_Renamed = null;
-
private System.IO.StreamWriter infoStream;
private Directory dir;
@@ -85,13 +77,13 @@ namespace Lucene.Net.Index
/// <summary>Empty unless you passed specific segments list to check as optional 3rd argument.</summary>
/// <seealso cref="CheckIndex.CheckIndex_Renamed_Method(System.Collections.IList)">
/// </seealso>
- public System.Collections.IList segmentsChecked = new System.Collections.ArrayList();
+ public List<string> segmentsChecked = new List<string>();
/// <summary>True if the index was created with a newer version of Lucene than the CheckIndex tool. </summary>
public bool toolOutOfDate;
/// <summary>List of <see cref="SegmentInfoStatus" /> instances, detailing status of each segment. </summary>
- public System.Collections.IList segmentInfos = new System.Collections.ArrayList();
+ public IList<SegmentInfoStatus> segmentInfos = new List<SegmentInfoStatus>();
/// <summary>Directory index is in. </summary>
public Directory dir;
@@ -115,7 +107,7 @@ namespace Lucene.Net.Index
public bool partial;
/// <summary>Holds the userData of the last commit in the index </summary>
- public System.Collections.Generic.IDictionary<string, string> userData;
+ public IDictionary<string, string> userData;
/// <summary>Holds the status of each segment in the index.
/// See <see cref="SegmentInfos" />.
@@ -186,7 +178,7 @@ namespace Lucene.Net.Index
/// debugging details that IndexWriter records into
/// each segment it creates
/// </summary>
- public System.Collections.Generic.IDictionary<string, string> diagnostics;
+ public IDictionary<string, string> diagnostics;
/// <summary>Status for testing of field norms (null if field norms could not be tested). </summary>
public FieldNormStatus fieldNormStatus;
@@ -260,7 +252,7 @@ namespace Lucene.Net.Index
public CheckIndex(Directory dir)
{
this.dir = dir;
- infoStream = out_Renamed;
+ infoStream = null;
}
/// <summary>Set infoStream where messages should go. If null, no
@@ -298,29 +290,6 @@ namespace Lucene.Net.Index
}
}
- /// <summary>Returns true if index is clean, else false. </summary>
- /// <deprecated> Please instantiate a CheckIndex and then use <see cref="CheckIndex_Renamed_Method()" /> instead
- /// </deprecated>
- [Obsolete("Please instantiate a CheckIndex and then use CheckIndex() instead")]
- public static bool Check(Directory dir, bool doFix)
- {
- return Check(dir, doFix, null);
- }
-
- /// <summary>Returns true if index is clean, else false.</summary>
- /// <deprecated> Please instantiate a CheckIndex and then use <see cref="CheckIndex_Renamed_Method(System.Collections.IList)" /> instead
- /// </deprecated>
- [Obsolete("Please instantiate a CheckIndex and then use CheckIndex(List) instead")]
- public static bool Check(Directory dir, bool doFix, System.Collections.IList onlySegments)
- {
- CheckIndex checker = new CheckIndex(dir);
- Status status = checker.CheckIndex_Renamed_Method(onlySegments);
- if (doFix && !status.clean)
- checker.FixIndex(status);
-
- return status.clean;
- }
-
/// <summary>Returns a <see cref="Status" /> instance detailing
/// the state of the index.
///
@@ -350,7 +319,7 @@ namespace Lucene.Net.Index
/// you only call this when the index is not opened by any
/// writer.
/// </param>
- public virtual Status CheckIndex_Renamed_Method(System.Collections.IList onlySegments)
+ public virtual Status CheckIndex_Renamed_Method(List<string> onlySegments)
{
System.Globalization.NumberFormatInfo nf = System.Globalization.CultureInfo.CurrentCulture.NumberFormat;
SegmentInfos sis = new SegmentInfos();
@@ -458,19 +427,14 @@ namespace Lucene.Net.Index
result.partial = true;
if (infoStream != null)
infoStream.Write("\nChecking only these segments:");
- System.Collections.IEnumerator it = onlySegments.GetEnumerator();
- while (it.MoveNext())
+ foreach(string s in onlySegments)
{
if (infoStream != null)
{
- infoStream.Write(" " + it.Current);
+ infoStream.Write(" " + s);
}
}
- System.Collections.IEnumerator e = onlySegments.GetEnumerator();
- while (e.MoveNext() == true)
- {
- result.segmentsChecked.Add(e.Current);
- }
+ result.segmentsChecked.AddRange(onlySegments);
Msg(":");
}
@@ -510,7 +474,7 @@ namespace Lucene.Net.Index
segInfoStat.numFiles = info.Files().Count;
Msg(System.String.Format(nf, " size (MB)={0:f}", new System.Object[] { (info.SizeInBytes() / (1024.0 * 1024.0)) }));
segInfoStat.sizeMB = info.SizeInBytes() / (1024.0 * 1024.0);
- System.Collections.Generic.IDictionary<string, string> diagnostics = info.GetDiagnostics();
+ IDictionary<string, string> diagnostics = info.GetDiagnostics();
segInfoStat.diagnostics = diagnostics;
if (diagnostics.Count > 0)
{
@@ -541,7 +505,7 @@ namespace Lucene.Net.Index
}
if (infoStream != null)
infoStream.Write(" test: open reader.........");
- reader = SegmentReader.Get(info);
+ reader = SegmentReader.Get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
segInfoStat.openReaderPassed = true;
@@ -580,7 +544,7 @@ namespace Lucene.Net.Index
{
infoStream.Write(" test: fields..............");
}
- System.Collections.Generic.ICollection<string> fieldNames = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+ ICollection<string> fieldNames = reader.GetFieldNames(IndexReader.FieldOption.ALL);
Msg("OK [" + fieldNames.Count + " fields]");
segInfoStat.numFields = fieldNames.Count;
@@ -652,7 +616,7 @@ namespace Lucene.Net.Index
}
/// <summary> Test field norms.</summary>
- private Status.FieldNormStatus TestFieldNorms(System.Collections.Generic.ICollection<string> fieldNames, SegmentReader reader)
+ private Status.FieldNormStatus TestFieldNorms(ICollection<string> fieldNames, SegmentReader reader)
{
Status.FieldNormStatus status = new Status.FieldNormStatus();
@@ -663,11 +627,10 @@ namespace Lucene.Net.Index
{
infoStream.Write(" test: field norms.........");
}
- System.Collections.IEnumerator it = fieldNames.GetEnumerator();
+
byte[] b = new byte[reader.MaxDoc()];
- while (it.MoveNext())
+ foreach(string fieldName in fieldNames)
{
- System.String fieldName = (System.String) it.Current;
if (reader.HasNorms(fieldName))
{
reader.Norms(fieldName, b, 0);
@@ -752,6 +715,7 @@ namespace Lucene.Net.Index
{
throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
}
+ lastPos = pos;
}
}
@@ -943,7 +907,7 @@ namespace Lucene.Net.Index
{
bool doFix = false;
- System.Collections.IList onlySegments = new System.Collections.ArrayList();
+ List<string> onlySegments = new List<string>();
System.String indexPath = null;
int i = 0;
while (i < args.Length)
@@ -997,7 +961,7 @@ namespace Lucene.Net.Index
Directory dir = null;
try
{
- dir = FSDirectory.Open(new System.IO.FileInfo(indexPath));
+ dir = FSDirectory.Open(new System.IO.DirectoryInfo(indexPath));
}
catch (System.Exception t)
{
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CompoundFileReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CompoundFileReader.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CompoundFileReader.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/CompoundFileReader.cs Wed Nov 9 21:03:47 2011
@@ -16,7 +16,7 @@
*/
using System;
-
+using System.Linq;
using BufferedIndexInput = Lucene.Net.Store.BufferedIndexInput;
using Directory = Lucene.Net.Store.Directory;
using IndexInput = Lucene.Net.Store.IndexInput;
@@ -30,12 +30,8 @@ namespace Lucene.Net.Index
/// <summary> Class for accessing a compound stream.
/// This class implements a directory, but is limited to only read operations.
/// Directory methods that would normally modify data throw an exception.
- ///
- ///
/// </summary>
- /// <version> $Id: CompoundFileReader.java 673371 2008-07-02 11:57:27Z mikemccand $
- /// </version>
- public class CompoundFileReader:Directory
+ public class CompoundFileReader : Directory
{
private int readBufferSize;
@@ -52,7 +48,7 @@ namespace Lucene.Net.Index
private System.String fileName;
private IndexInput stream;
- private System.Collections.Hashtable entries = new System.Collections.Hashtable();
+ private SupportClass.HashMap<string, FileEntry> entries = new SupportClass.HashMap<string, FileEntry>();
public CompoundFileReader(Directory dir, System.String name):this(dir, name, BufferedIndexInput.BUFFER_SIZE)
@@ -160,7 +156,7 @@ namespace Lucene.Net.Index
if (stream == null)
throw new System.IO.IOException("Stream closed");
- FileEntry entry = (FileEntry) entries[id];
+ FileEntry entry = entries[id];
if (entry == null)
throw new System.IO.IOException("No sub-file with id " + id + " found");
@@ -169,12 +165,9 @@ namespace Lucene.Net.Index
}
/// <summary>Returns an array of strings, one for each file in the directory. </summary>
- [Obsolete("Lucene.Net-2.9.1. This method overrides obsolete member Lucene.Net.Store.Directory.List()")]
- public override System.String[] List()
+ public override System.String[] ListAll()
{
- System.String[] res = new System.String[entries.Count];
- entries.Keys.CopyTo(res, 0);
- return res;
+ return entries.Keys.ToArray();
}
/// <summary>Returns true iff a file with the given name exists. </summary>
@@ -204,8 +197,7 @@ namespace Lucene.Net.Index
/// <summary>Not implemented</summary>
/// <throws> UnsupportedOperationException </throws>
- [Obsolete("Lucene.Net-2.9.1. This method overrides obsolete member Lucene.Net.Store.Directory.RenameFile(string, string)")]
- public override void RenameFile(System.String from, System.String to)
+ public void RenameFile(System.String from, System.String to)
{
throw new System.NotSupportedException();
}
@@ -214,7 +206,7 @@ namespace Lucene.Net.Index
/// <throws> IOException if the file does not exist </throws>
public override long FileLength(System.String name)
{
- FileEntry e = (FileEntry) entries[name];
+ FileEntry e = entries[name];
if (e == null)
throw new System.IO.IOException("File " + name + " does not exist");
return e.length;
Modified: incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ConcurrentMergeScheduler.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ConcurrentMergeScheduler.cs?rev=1199962&r1=1199961&r2=1199962&view=diff
==============================================================================
--- incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ConcurrentMergeScheduler.cs (original)
+++ incubator/lucene.net/branches/Lucene.Net.3.0.3/trunk/src/core/Index/ConcurrentMergeScheduler.cs Wed Nov 9 21:03:47 2011
@@ -16,7 +16,7 @@
*/
using System;
-
+using System.Collections.Generic;
using Directory = Lucene.Net.Store.Directory;
namespace Lucene.Net.Index
@@ -36,8 +36,8 @@ namespace Lucene.Net.Index
{
private int mergeThreadPriority = - 1;
-
- protected internal System.Collections.IList mergeThreads = new System.Collections.ArrayList();
+
+ protected internal IList<MergeThread> mergeThreads = new List<MergeThread>();
// Max number of threads allowed to be merging at once
private int maxThreadCount = 1;
@@ -92,7 +92,7 @@ namespace Lucene.Net.Index
}
}
- /// <summary>Return the priority that merge threads run at. </summary>
+ /// <summary>Set the priority that merge threads run at. </summary>
public virtual void SetMergeThreadPriority(int pri)
{
lock (this)
@@ -104,7 +104,7 @@ namespace Lucene.Net.Index
int numThreads = MergeThreadCount();
for (int i = 0; i < numThreads; i++)
{
- MergeThread merge = (MergeThread) mergeThreads[i];
+ MergeThread merge = mergeThreads[i];
merge.SetThreadPriority(pri);
}
}
@@ -153,7 +153,7 @@ namespace Lucene.Net.Index
if (Verbose())
{
for (int i = 0; i < count; i++)
- Message(" " + i + ": " + ((MergeThread) mergeThreads[i]));
+ Message(" " + i + ": " + mergeThreads[i]);
}
try
@@ -162,10 +162,11 @@ namespace Lucene.Net.Index
}
catch (System.Threading.ThreadInterruptedException ie)
{
- // In 3.0 we will change this to throw
- // InterruptedException instead
- SupportClass.ThreadClass.Current().Interrupt();
- throw new System.SystemException(ie.Message, ie);
+ //// In 3.0 we will change this to throw
+ //// InterruptedException instead
+ //SupportClass.ThreadClass.Current().Interrupt();
+ //throw new System.SystemException(ie.Message, ie);
+ throw;
}
}
}
@@ -175,26 +176,13 @@ namespace Lucene.Net.Index
{
lock (this)
{
- return MergeThreadCount(false);
- }
- }
-
- private int MergeThreadCount(bool excludeDone)
- {
- lock (this)
- {
int count = 0;
int numThreads = mergeThreads.Count;
for (int i = 0; i < numThreads; i++)
{
- MergeThread t = (MergeThread)mergeThreads[i];
- if (t.IsAlive)
+ if (mergeThreads[i].IsAlive)
{
- MergePolicy.OneMerge runningMerge = t.GetRunningMerge();
- if (!excludeDone || (runningMerge != null && !runningMerge.mergeDone))
- {
count++;
- }
}
}
return count;
@@ -204,7 +192,7 @@ namespace Lucene.Net.Index
public override void Merge(IndexWriter writer)
{
- // TODO: enable this once we are on JRE 1.5
+ // TODO: .NET doesn't support this - Look into it
// assert !Thread.holdsLock(writer);
this.writer = writer;
@@ -230,7 +218,6 @@ namespace Lucene.Net.Index
// pending merges, until it's empty:
while (true)
{
-
// TODO: we could be careful about which merges to do in
// the BG (eg maybe the "biggest" ones) vs FG, which
// merges to do first (the easiest ones?), etc.
@@ -253,7 +240,7 @@ namespace Lucene.Net.Index
lock (this)
{
MergeThread merger;
- while (MergeThreadCount(true) >= maxThreadCount)
+ while (MergeThreadCount() >= maxThreadCount)
{
if (Verbose())
Message(" too many merge threads running; stalling...");
@@ -263,16 +250,18 @@ namespace Lucene.Net.Index
}
catch (System.Threading.ThreadInterruptedException ie)
{
- // In 3.0 we will change this to throw
- // InterruptedException instead
- SupportClass.ThreadClass.Current().Interrupt();
- throw new System.SystemException(ie.Message, ie);
+ //// In 3.0 we will change this to throw
+ //// InterruptedException instead
+ //SupportClass.ThreadClass.Current().Interrupt();
+ //throw new System.SystemException(ie.Message, ie);
+ throw;
}
}
if (Verbose())
Message(" consider merge " + merge.SegString(dir));
-
+
+ System.Diagnostics.Debug.Assert(mergeThreadCount < maxThreadCount);
// OK to spawn a new merge thread to handle this
// merge:
@@ -462,9 +451,10 @@ namespace Lucene.Net.Index
}
catch (System.Threading.ThreadInterruptedException ie)
{
- SupportClass.ThreadClass.Current().Interrupt();
- // In 3.0 this will throw InterruptedException
- throw new System.SystemException(ie.Message, ie);
+ //SupportClass.ThreadClass.Current().Interrupt();
+ //// In 3.0 this will throw InterruptedException
+ //throw new System.SystemException(ie.Message, ie);
+ throw;
}
throw new MergePolicy.MergeException(exc, dir);
}
@@ -478,13 +468,13 @@ namespace Lucene.Net.Index
{
throw new System.SystemException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
}
- lock (allInstances.SyncRoot)
+ lock (allInstances)
{
int count = allInstances.Count;
// Make sure all outstanding threads are done so we see
// any exceptions they may produce:
for (int i = 0; i < count; i++)
- ((ConcurrentMergeScheduler) allInstances[i]).Sync();
+ allInstances[i].Sync();
bool v = anyExceptions;
anyExceptions = false;
return v;
@@ -493,7 +483,7 @@ namespace Lucene.Net.Index
public static void ClearUnhandledExceptions()
{
- lock (allInstances.SyncRoot)
+ lock (allInstances)
{
anyExceptions = false;
}
@@ -502,19 +492,19 @@ namespace Lucene.Net.Index
/// <summary>Used for testing </summary>
private void AddMyself()
{
- lock (allInstances.SyncRoot)
+ lock (allInstances)
{
int size = allInstances.Count;
int upto = 0;
for (int i = 0; i < size; i++)
{
- ConcurrentMergeScheduler other = (ConcurrentMergeScheduler) allInstances[i];
+ ConcurrentMergeScheduler other = allInstances[i];
if (!(other.closed && 0 == other.MergeThreadCount()))
// Keep this one for now: it still has threads or
// may spawn new threads
allInstances[upto++] = other;
}
- ((System.Collections.IList) ((System.Collections.ArrayList) allInstances).GetRange(upto, allInstances.Count - upto)).Clear();
+ allInstances.RemoveRange(upto, allInstances.Count - upto);
allInstances.Add(this);
}
}
@@ -534,10 +524,10 @@ namespace Lucene.Net.Index
}
/// <summary>Used for testing </summary>
- private static System.Collections.IList allInstances;
+ private static List<ConcurrentMergeScheduler> allInstances;
public static void SetTestMode()
{
- allInstances = new System.Collections.ArrayList();
+ allInstances = new List<ConcurrentMergeScheduler>();
}
}
}
\ No newline at end of file