You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by cc...@apache.org on 2012/03/12 23:29:37 UTC
svn commit: r1299911 [2/14] - in /incubator/lucene.net/trunk: src/core/
src/core/Analysis/ src/core/Analysis/Standard/
src/core/Analysis/Tokenattributes/ src/core/Document/ src/core/Index/
src/core/Messages/ src/core/QueryParser/ src/core/Search/ src/c...
Modified: incubator/lucene.net/trunk/src/core/Document/Fieldable.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/Fieldable.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/Fieldable.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/Fieldable.cs Mon Mar 12 22:29:26 2012
@@ -16,7 +16,7 @@
*/
using System;
-
+using System.IO;
using TokenStream = Lucene.Net.Analysis.TokenStream;
using FieldInvertState = Lucene.Net.Index.FieldInvertState;
@@ -32,154 +32,144 @@ namespace Lucene.Net.Documents
///
///
/// </summary>
- public interface Fieldable
+ public interface IFieldable
{
- /// <summary>Sets the boost factor hits on this field. This value will be
- /// multiplied into the score of all hits on this this field of this
- /// document.
- ///
- /// <p/>The boost is multiplied by <see cref="Lucene.Net.Documents.Document.GetBoost()" /> of the document
- /// containing this field. If a document has multiple fields with the same
- /// name, all such values are multiplied together. This product is then
- /// used to compute the norm factor for the field. By
- /// default, in the <see cref="Lucene.Net.Search.Similarity.ComputeNorm(String,Lucene.Net.Index.FieldInvertState)"/>
- /// method, the boost value is multiplied
- /// by the <see cref="Lucene.Net.Search.Similarity.LengthNorm(String,int)"/>
- /// and then rounded by <see cref="Lucene.Net.Search.Similarity.EncodeNorm(float)" /> before it is stored in the
- /// index. One should attempt to ensure that this product does not overflow
- /// the range of that encoding.
- ///
- /// </summary>
- /// <seealso cref="Lucene.Net.Documents.Document.SetBoost(float)">
- /// </seealso>
- /// <seealso cref="Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
- /// </seealso>
- /// <seealso cref="Lucene.Net.Search.Similarity.EncodeNorm(float)">
- /// </seealso>
- void SetBoost(float boost);
-
- /// <summary>Returns the boost factor for hits for this field.
- ///
- /// <p/>The default value is 1.0.
- ///
- /// <p/>Note: this value is not stored directly with the document in the index.
- /// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
- /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
- /// this field was indexed.
- ///
- /// </summary>
- /// <seealso cref="SetBoost(float)">
- /// </seealso>
- float GetBoost();
-
- /// <summary>Returns the name of the field as an interned string.
- /// For example "date", "title", "body", ...
- /// </summary>
- System.String Name();
-
- /// <summary>The value of the field as a String, or null.
- /// <p/>
- /// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
- /// unless isBinary()==true, in which case GetBinaryValue() will be used.
- ///
- /// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
- /// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
- /// else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens.
- /// </summary>
- System.String StringValue();
-
- /// <summary>The value of the field as a Reader, which can be used at index time to generate indexed tokens.</summary>
- /// <seealso cref="StringValue()">
- /// </seealso>
- System.IO.TextReader ReaderValue();
-
- /// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
- /// <seealso cref="StringValue()">
- /// </seealso>
- TokenStream TokenStreamValue();
-
- /// <summary>True if the value of the field is to be stored in the index for return
- /// with search hits.
- /// </summary>
- bool IsStored();
-
- /// <summary>True if the value of the field is to be indexed, so that it may be
- /// searched on.
- /// </summary>
- bool IsIndexed();
-
- /// <summary>True if the value of the field should be tokenized as text prior to
- /// indexing. Un-tokenized fields are indexed as a single word and may not be
- /// Reader-valued.
- /// </summary>
- bool IsTokenized();
-
- /// <summary>True if the term or terms used to index this field are stored as a term
- /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
- /// These methods do not provide access to the original content of the field,
- /// only to terms used to index it. If the original content must be
- /// preserved, use the <c>stored</c> attribute instead.
- ///
- /// </summary>
- /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
- /// </seealso>
- bool IsTermVectorStored();
-
- /// <summary> True if terms are stored as term vector together with their offsets
- /// (start and end positon in source text).
- /// </summary>
- bool IsStoreOffsetWithTermVector();
-
- /// <summary> True if terms are stored as term vector together with their token positions.</summary>
- bool IsStorePositionWithTermVector();
-
- /// <summary>True if the value of the field is stored as binary </summary>
- bool IsBinary();
-
- /// <summary>True if norms are omitted for this indexed field </summary>
- bool GetOmitNorms();
-
- /// <summary>Expert:
- ///
- /// If set, omit normalization factors associated with this indexed field.
- /// This effectively disables indexing boosts and length normalization for this field.
- /// </summary>
- void SetOmitNorms(bool omitNorms);
-
- /// <summary> Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
- /// it's values via <see cref="StringValue()" /> or <see cref="GetBinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
- /// retrieved the <see cref="Document" /> is still open.
- ///
- /// </summary>
- /// <returns> true if this field can be loaded lazily
- /// </returns>
- bool IsLazy();
-
- /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
- /// returned value is undefined
- /// </summary>
- /// <returns> index of the first character in byte[] segment that represents this Field value
- /// </returns>
- int GetBinaryOffset();
-
- /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
- /// returned value is undefined
- /// </summary>
- /// <returns> length of byte[] segment that represents this Field value
- /// </returns>
- int GetBinaryLength();
-
- /// <summary> Return the raw byte[] for the binary field. Note that
- /// you must also call <see cref="GetBinaryLength" /> and <see cref="GetBinaryOffset" />
- /// to know which range of bytes in this
- /// returned array belong to the field.
- /// </summary>
- /// <returns> reference to the Field value as byte[].
- /// </returns>
- byte[] GetBinaryValue();
-
- /// <summary> Return the raw byte[] for the binary field. Note that
- /// you must also call <see cref="GetBinaryLength" /> and <see cref="GetBinaryOffset" />
+ /// <summary>Gets or sets the boost factor for hits for this field. This value will be
+ /// multiplied into the score of all hits on this this field of this
+ /// document.
+ ///
+ /// <p/>The boost is multiplied by <see cref="Lucene.Net.Documents.Document.Boost" /> of the document
+ /// containing this field. If a document has multiple fields with the same
+ /// name, all such values are multiplied together. This product is then
+ /// used to compute the norm factor for the field. By
+ /// default, in the <see cref="Lucene.Net.Search.Similarity.ComputeNorm(String,Lucene.Net.Index.FieldInvertState)"/>
+ /// method, the boost value is multiplied
+ /// by the <see cref="Lucene.Net.Search.Similarity.LengthNorm(String,int)"/>
+ /// and then rounded by <see cref="Lucene.Net.Search.Similarity.EncodeNorm(float)" /> before it is stored in the
+ /// index. One should attempt to ensure that this product does not overflow
+ /// the range of that encoding.
+ ///
+ /// <p/>The default value is 1.0.
+ ///
+ /// <p/>Note: this value is not stored directly with the document in the index.
+ /// Documents returned from <see cref="Lucene.Net.Index.IndexReader.Document(int)" /> and
+ /// <see cref="Lucene.Net.Search.Searcher.Doc(int)" /> may thus not have the same value present as when
+ /// this field was indexed.
+ ///
+ /// </summary>
+ /// <seealso cref="Lucene.Net.Documents.Document.SetBoost(float)">
+ /// </seealso>
+ /// <seealso cref="Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
+ /// </seealso>
+ /// <seealso cref="Lucene.Net.Search.Similarity.EncodeNorm(float)">
+ /// </seealso>
+ float Boost { get; set; }
+
+ /// <summary>Returns the name of the field as an interned string.
+ /// For example "date", "title", "body", ...
+ /// </summary>
+ string Name { get; }
+
+ /// <summary>The value of the field as a String, or null.
+ /// <p/>
+ /// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
+ /// unless isBinary()==true, in which case GetBinaryValue() will be used.
+ ///
+ /// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
+ /// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
+ /// else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens.
+ /// </summary>
+ string StringValue { get; }
+
+ /// <summary>The value of the field as a Reader, which can be used at index time to generate indexed tokens.</summary>
+ /// <seealso cref="StringValue()">
+ /// </seealso>
+ TextReader ReaderValue { get; }
+
+ /// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
+ /// <seealso cref="StringValue()">
+ /// </seealso>
+ TokenStream TokenStreamValue { get; }
+
+ /// <summary>True if the value of the field is to be stored in the index for return
+ /// with search hits.
+ /// </summary>
+ bool IsStored { get; }
+
+ /// <summary>True if the value of the field is to be indexed, so that it may be
+ /// searched on.
+ /// </summary>
+ bool IsIndexed { get; }
+
+ /// <summary>True if the value of the field should be tokenized as text prior to
+ /// indexing. Un-tokenized fields are indexed as a single word and may not be
+ /// Reader-valued.
+ /// </summary>
+ bool IsTokenized { get; }
+
+ /// <summary>True if the term or terms used to index this field are stored as a term
+ /// vector, available from <see cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int,String)" />.
+ /// These methods do not provide access to the original content of the field,
+ /// only to terms used to index it. If the original content must be
+ /// preserved, use the <c>stored</c> attribute instead.
+ ///
+ /// </summary>
+ /// <seealso cref="Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
+ /// </seealso>
+ bool IsTermVectorStored { get; }
+
+ /// <summary> True if terms are stored as term vector together with their offsets
+ /// (start and end positon in source text).
+ /// </summary>
+ bool IsStoreOffsetWithTermVector { get; }
+
+ /// <summary> True if terms are stored as term vector together with their token positions.</summary>
+ bool IsStorePositionWithTermVector { get; }
+
+ /// <summary>True if the value of the field is stored as binary </summary>
+ bool IsBinary { get; }
+
+ /// <summary>
+ /// True if norms are omitted for this indexed field.
+ /// <para>
+ /// Expert:
+ /// If set, omit normalization factors associated with this indexed field.
+ /// This effectively disables indexing boosts and length normalization for this field.
+ /// </para>
+ /// </summary>
+ bool OmitNorms { get; set; }
+
+
+ /// <summary> Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
+ /// it's values via <see cref="StringValue()" /> or <see cref="GetBinaryValue()" /> is only valid as long as the <see cref="Lucene.Net.Index.IndexReader" /> that
+ /// retrieved the <see cref="Document" /> is still open.
+ ///
+ /// </summary>
+ /// <value> true if this field can be loaded lazily </value>
+ bool IsLazy { get; }
+
+ /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
+ /// returned value is undefined
+ /// </summary>
+ /// <value> index of the first character in byte[] segment that represents this Field value </value>
+ int BinaryOffset { get; }
+
+ /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
+ /// returned value is undefined
+ /// </summary>
+ /// <value> length of byte[] segment that represents this Field value </value>
+ int BinaryLength { get; }
+
+ /// <summary> Return the raw byte[] for the binary field. Note that
+ /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
+ /// to know which range of bytes in this
+ /// returned array belong to the field.
+ /// </summary>
+ /// <value> reference to the Field value as byte[]. </value>
+ byte[] BinaryValue { get; }
+
+ /// <summary> Return the raw byte[] for the binary field. Note that
+ /// you must also call <see cref="BinaryLength" /> and <see cref="BinaryOffset" />
/// to know which range of bytes in this
/// returned array belong to the field.<p/>
/// About reuse: if you pass in the result byte[] and it is
@@ -197,22 +187,19 @@ namespace Lucene.Net.Documents
/// </returns>
byte[] GetBinaryValue(byte[] result);
- /// <seealso cref="SetOmitTermFreqAndPositions"/>
- bool GetOmitTermFreqAndPositions();
-
- /// Expert:
- /// <para>
- /// If set, omit term freq, positions and payloads from
- /// postings for this field.
- /// </para>
- /// <para>
- /// <b>NOTE</b>: While this option reduces storage space
- /// required in the index, it also means any query
- /// requiring positional information, such as
- /// <see cref="Lucene.Net.Search.PhraseQuery"/> or
- /// <see cref="Lucene.Net.Search.Spans.SpanQuery"/>
- /// subclasses will silently fail to find results.
- /// </para>
- void SetOmitTermFreqAndPositions(bool omitTermFreqAndPositions);
+ /// Expert:
+ /// <para>
+ /// If set, omit term freq, positions and payloads from
+ /// postings for this field.
+ /// </para>
+ /// <para>
+ /// <b>NOTE</b>: While this option reduces storage space
+ /// required in the index, it also means any query
+ /// requiring positional information, such as
+ /// <see cref="Lucene.Net.Search.PhraseQuery"/> or
+ /// <see cref="Lucene.Net.Search.Spans.SpanQuery"/>
+ /// subclasses will silently fail to find results.
+ /// </para>
+ bool OmitTermFreqAndPositions { set; get; }
}
}
\ No newline at end of file
Modified: incubator/lucene.net/trunk/src/core/Document/NumericField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Document/NumericField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Document/NumericField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Document/NumericField.cs Mon Mar 12 22:29:26 2012
@@ -16,6 +16,7 @@
*/
using System;
+using System.IO;
using Lucene.Net.Search;
using NumericTokenStream = Lucene.Net.Analysis.NumericTokenStream;
using TokenStream = Lucene.Net.Analysis.TokenStream;
@@ -129,7 +130,7 @@ namespace Lucene.Net.Documents
/// <p/><b>NOTE:</b> This class is only used during
/// indexing. When retrieving the stored field value from a
/// <see cref="Document" /> instance after search, you will get a
- /// conventional <see cref="Fieldable" /> instance where the numeric
+ /// conventional <see cref="IFieldable" /> instance where the numeric
/// values are returned as <see cref="String" />s (according to
/// <c>toString(value)</c> of the used data type).
///
@@ -204,41 +205,41 @@ namespace Lucene.Net.Documents
/// </param>
public NumericField(System.String name, int precisionStep, Field.Store store, bool index):base(name, store, index?Field.Index.ANALYZED_NO_NORMS:Field.Index.NO, Field.TermVector.NO)
{
- SetOmitTermFreqAndPositions(true);
+ OmitTermFreqAndPositions = true;
tokenStream = new NumericTokenStream(precisionStep);
}
-
- /// <summary>Returns a <see cref="NumericTokenStream" /> for indexing the numeric value. </summary>
- public override TokenStream TokenStreamValue()
- {
- return IsIndexed()?tokenStream:null;
- }
-
- /// <summary>Returns always <c>null</c> for numeric fields </summary>
+
+ /// <summary>Returns a <see cref="NumericTokenStream" /> for indexing the numeric value. </summary>
+ public override TokenStream TokenStreamValue
+ {
+ get { return IsIndexed ? tokenStream : null; }
+ }
+
+ /// <summary>Returns always <c>null</c> for numeric fields </summary>
public override byte[] GetBinaryValue(byte[] result)
{
return null;
}
-
- /// <summary>Returns always <c>null</c> for numeric fields </summary>
- public override System.IO.TextReader ReaderValue()
- {
- return null;
- }
-
- /// <summary>Returns the numeric value as a string (how it is stored, when <see cref="Field.Store.YES" /> is chosen). </summary>
- public override System.String StringValue()
- {
- return (fieldsData == null)?null:fieldsData.ToString();
- }
-
- /// <summary>Returns the current numeric value as a subclass of <see cref="Number" />, <c>null</c> if not yet initialized. </summary>
- public System.ValueType GetNumericValue()
- {
- return (System.ValueType) fieldsData;
- }
-
- /// <summary> Initializes the field with the supplied <c>long</c> value.</summary>
+
+ /// <summary>Returns always <c>null</c> for numeric fields </summary>
+ public override TextReader ReaderValue
+ {
+ get { return null; }
+ }
+
+ /// <summary>Returns the numeric value as a string (how it is stored, when <see cref="Field.Store.YES" /> is chosen). </summary>
+ public override string StringValue
+ {
+ get { return (fieldsData == null) ? null : fieldsData.ToString(); }
+ }
+
+ /// <summary>Returns the current numeric value as a subclass of <see cref="Number" />, <c>null</c> if not yet initialized. </summary>
+ public ValueType NumericValue
+ {
+ get { return (System.ValueType) fieldsData; }
+ }
+
+ /// <summary> Initializes the field with the supplied <c>long</c> value.</summary>
/// <param name="value_Renamed">the numeric value
/// </param>
/// <returns> this instance, because of this you can use it the following way:
Modified: incubator/lucene.net/trunk/src/core/FileDiffs.txt
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/FileDiffs.txt?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/FileDiffs.txt (original)
+++ incubator/lucene.net/trunk/src/core/FileDiffs.txt Mon Mar 12 22:29:26 2012
@@ -9,80 +9,4 @@ store\NIOFSDirectory.java - Text files a
util\DummyConcurrentLock.java - New in 3.x (NOT NEEDED IN .NET? Can just use new Object() for a dummy lock) (used in MultiSearcher)
util\NamedThreadFactory.java - New in 3.x
-util\ThreadInterruptedException.java - new in 3.x (NOT NEEDED IN .NET?)
-
-LUCENENET-468
-
-Analyzer - Implemented Dispose
-CharFilter - Implemented Dispose
-CharReader
-TokenFilter
-Tokenizer
-TokenStream
-AbstractAllTermDocs
-ByteSliceReader
-CompoundFileReader
-CompundFileWriter
-ConcurrentMergeScheduler
-DirectoryReader
-DocumentsWriter
-FieldsReader
-FieldsWriter
-FilterIndexReader
-FormatPostingsDocsWriter
-FormatPostingsPositionsWriter
-FormatPostingsTermsWriter
-IndexFileDeleter
-IndexReader
-IdnexWriter
-InvertedDocConsumer
-InvertedDocEndConsumer
-LogMergePolicy
-MergePolicy
-MergeScheduler
-MultiLevelSkipListReader
-MultipleTermPositions
-NormsWriter
-ParallelReader
-ReusableStringReader
-SegmentMergeInfo
-SegmentMergeQueue
-SegmentMerger
-SegmentReader
-SegmentTermDocs
-SegmentTermEnum
-SegmentTermPositions
-SerialMergeScheduler
-StoredFieldsWriter
-TermDocs
-TermEnum
-TermInfosReader
-TermInfosWriter
-TermVectorsReader
-TermVectorsWriter
-FilteredTermEnum
-FuzzyTermEnum
-indexSearcher
-MultiSearcher
-NumericRangeQuery
-Searchable
-BufferedIndexOutput
-CheckSumIndexInput
-CheckSumIndexOutput
-Directory
-FileSwitchDirectory
-FSDirectory
-FileSwitchDirectory
-FSDirectory
-IndexInput
-IndexOutput
-MMapDirectory
-NIOFSDirectory
-RAMDirectory
-RAMInputStream
-RAMOutputStream
-SimpleFSDirectory
-Cache
-SimpleMapCache
-CloseableThreadLocal
-
+util\ThreadInterruptedException.java - new in 3.x (NOT NEEDED IN .NET?)
\ No newline at end of file
Modified: incubator/lucene.net/trunk/src/core/Index/AllTermDocs.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/AllTermDocs.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/AllTermDocs.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/AllTermDocs.cs Mon Mar 12 22:29:26 2012
@@ -26,7 +26,7 @@ namespace Lucene.Net.Index
{
protected internal BitVector deletedDocs;
- protected internal AllTermDocs(SegmentReader parent) : base(parent.MaxDoc())
+ protected internal AllTermDocs(SegmentReader parent) : base(parent.MaxDoc)
{
lock (parent)
{
Modified: incubator/lucene.net/trunk/src/core/Index/ByteSliceReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/ByteSliceReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/ByteSliceReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/ByteSliceReader.cs Mon Mar 12 22:29:26 2012
@@ -156,12 +156,13 @@ namespace Lucene.Net.Index
}
}
}
-
- public override long GetFilePointer()
- {
- throw new System.SystemException("not implemented");
- }
- public override long Length()
+
+ public override long FilePointer
+ {
+ get { throw new System.SystemException("not implemented"); }
+ }
+
+ public override long Length()
{
throw new System.SystemException("not implemented");
}
Modified: incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CheckIndex.cs Mon Mar 12 22:29:26 2012
@@ -410,11 +410,11 @@ namespace Lucene.Net.Index
result.segmentsFileName = segmentsFileName;
result.numSegments = numSegments;
result.segmentFormat = sFormat;
- result.userData = sis.GetUserData();
+ result.userData = sis.UserData;
System.String userDataString;
- if (sis.GetUserData().Count > 0)
+ if (sis.UserData.Count > 0)
{
- userDataString = " userData=" + CollectionsHelper.CollectionToString(sis.GetUserData());
+ userDataString = " userData=" + CollectionsHelper.CollectionToString(sis.UserData);
}
else
{
@@ -467,30 +467,30 @@ namespace Lucene.Net.Index
try
{
- Msg(" compound=" + info.GetUseCompoundFile());
- segInfoStat.compound = info.GetUseCompoundFile();
- Msg(" hasProx=" + info.GetHasProx());
- segInfoStat.hasProx = info.GetHasProx();
+ Msg(" compound=" + info.UseCompoundFile);
+ segInfoStat.compound = info.UseCompoundFile;
+ Msg(" hasProx=" + info.HasProx);
+ segInfoStat.hasProx = info.HasProx;
Msg(" numFiles=" + info.Files().Count);
segInfoStat.numFiles = info.Files().Count;
Msg(System.String.Format(nf, " size (MB)={0:f}", new System.Object[] { (info.SizeInBytes() / (1024.0 * 1024.0)) }));
segInfoStat.sizeMB = info.SizeInBytes() / (1024.0 * 1024.0);
- IDictionary<string, string> diagnostics = info.GetDiagnostics();
+ IDictionary<string, string> diagnostics = info.Diagnostics;
segInfoStat.diagnostics = diagnostics;
if (diagnostics.Count > 0)
{
Msg(" diagnostics = " + CollectionsHelper.CollectionToString(diagnostics));
}
- int docStoreOffset = info.GetDocStoreOffset();
+ int docStoreOffset = info.DocStoreOffset;
if (docStoreOffset != - 1)
{
Msg(" docStoreOffset=" + docStoreOffset);
segInfoStat.docStoreOffset = docStoreOffset;
- Msg(" docStoreSegment=" + info.GetDocStoreSegment());
- segInfoStat.docStoreSegment = info.GetDocStoreSegment();
- Msg(" docStoreIsCompoundFile=" + info.GetDocStoreIsCompoundFile());
- segInfoStat.docStoreCompoundFile = info.GetDocStoreIsCompoundFile();
+ Msg(" docStoreSegment=" + info.DocStoreSegment);
+ segInfoStat.docStoreSegment = info.DocStoreSegment;
+ Msg(" docStoreIsCompoundFile=" + info.DocStoreIsCompoundFile);
+ segInfoStat.docStoreCompoundFile = info.DocStoreIsCompoundFile;
}
System.String delFileName = info.GetDelFileName();
if (delFileName == null)
@@ -510,17 +510,17 @@ namespace Lucene.Net.Index
segInfoStat.openReaderPassed = true;
- int numDocs = reader.NumDocs();
+ int numDocs = reader.NumDocs;
toLoseDocCount = numDocs;
- if (reader.HasDeletions())
+ if (reader.HasDeletions)
{
if (reader.deletedDocs.Count() != info.GetDelCount())
{
throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
}
- if (reader.deletedDocs.Count() > reader.MaxDoc())
+ if (reader.deletedDocs.Count() > reader.MaxDoc)
{
- throw new System.SystemException("too many deleted docs: maxDoc()=" + reader.MaxDoc() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
+ throw new System.SystemException("too many deleted docs: maxDoc()=" + reader.MaxDoc + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
}
if (info.docCount - numDocs != info.GetDelCount())
{
@@ -537,8 +537,8 @@ namespace Lucene.Net.Index
}
Msg("OK");
}
- if (reader.MaxDoc() != info.docCount)
- throw new System.SystemException("SegmentReader.maxDoc() " + reader.MaxDoc() + " != SegmentInfos.docCount " + info.docCount);
+ if (reader.MaxDoc != info.docCount)
+ throw new System.SystemException("SegmentReader.maxDoc() " + reader.MaxDoc + " != SegmentInfos.docCount " + info.docCount);
// Test getFieldNames()
if (infoStream != null)
@@ -629,7 +629,7 @@ namespace Lucene.Net.Index
infoStream.Write(" test: field norms.........");
}
- byte[] b = new byte[reader.MaxDoc()];
+ byte[] b = new byte[reader.MaxDoc];
foreach(string fieldName in fieldNames)
{
if (reader.HasNorms(fieldName))
@@ -672,7 +672,7 @@ namespace Lucene.Net.Index
// Used only to count up # deleted docs for this term
MySegmentTermDocs myTermDocs = new MySegmentTermDocs(reader);
- int maxDoc = reader.MaxDoc();
+ int maxDoc = reader.MaxDoc;
while (termEnum.Next())
{
@@ -723,7 +723,7 @@ namespace Lucene.Net.Index
// Now count how many deleted docs occurred in
// this term:
int delCount;
- if (reader.HasDeletions())
+ if (reader.HasDeletions)
{
myTermDocs.Seek(term);
while (myTermDocs.Next())
@@ -781,7 +781,7 @@ namespace Lucene.Net.Index
}
// Validate docCount
- if (status.docCount != reader.NumDocs())
+ if (status.docCount != reader.NumDocs)
{
throw new System.SystemException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
}
Modified: incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CompoundFileReader.cs Mon Mar 12 22:29:26 2012
@@ -286,7 +286,7 @@ namespace Lucene.Net.Index
/// </param>
public override void ReadInternal(byte[] b, int offset, int len)
{
- long start = GetFilePointer();
+ long start = FilePointer;
if (start + len > length)
throw new System.IO.IOException("read past EOF");
base_Renamed.Seek(fileOffset + start);
Modified: incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/CompoundFileWriter.cs Mon Mar 12 22:29:26 2012
@@ -187,7 +187,7 @@ namespace Lucene.Net.Index
long totalSize = 0;
foreach (FileEntry fe in entries)
{
- fe.directoryOffset = os.GetFilePointer();
+ fe.directoryOffset = os.FilePointer;
os.WriteLong(0); // for now
os.WriteString(fe.file);
totalSize += directory.FileLength(fe.file);
@@ -199,7 +199,7 @@ namespace Lucene.Net.Index
// searching. It also uncovers a disk-full
// situation earlier and hopefully without
// actually filling disk to 100%:
- long finalLength = totalSize + os.GetFilePointer();
+ long finalLength = totalSize + os.FilePointer;
os.SetLength(finalLength);
// Open the files and copy their data into the stream.
@@ -207,7 +207,7 @@ namespace Lucene.Net.Index
byte[] buffer = new byte[16384];
foreach (FileEntry fe in entries)
{
- fe.dataOffset = os.GetFilePointer();
+ fe.dataOffset = os.FilePointer;
CopyFile(fe, os, buffer);
}
@@ -218,7 +218,7 @@ namespace Lucene.Net.Index
os.WriteLong(fe.dataOffset);
}
- System.Diagnostics.Debug.Assert(finalLength == os.Length());
+ System.Diagnostics.Debug.Assert(finalLength == os.Length);
// Close the output stream. Set the os to null before trying to
// close so that if an exception occurs during the close, the
@@ -251,7 +251,7 @@ namespace Lucene.Net.Index
IndexInput is_Renamed = null;
try
{
- long startPtr = os.GetFilePointer();
+ long startPtr = os.FilePointer;
is_Renamed = directory.OpenInput(source.file);
long length = is_Renamed.Length();
@@ -275,7 +275,7 @@ namespace Lucene.Net.Index
throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")");
// Verify that the output length diff is equal to original file
- long endPtr = os.GetFilePointer();
+ long endPtr = os.FilePointer;
long diff = endPtr - startPtr;
if (diff != length)
throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
Modified: incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/ConcurrentMergeScheduler.cs Mon Mar 12 22:29:26 2012
@@ -15,7 +15,6 @@
* limitations under the License.
*/
-using System;
using System.Collections.Generic;
using Lucene.Net.Support;
using Directory = Lucene.Net.Store.Directory;
@@ -74,26 +73,6 @@ namespace Lucene.Net.Index
}
get { return _maxThreadCount; }
}
-
- /// <summary>Sets the max # simultaneous threads that may be
- /// running. If a merge is necessary yet we already have
- /// this many threads running, the incoming thread (that
- /// is calling add/updateDocument) will block until
- /// a merge thread has completed.
- /// </summary>
- [Obsolete("Use MaxThreadCount property instead.")]
- public virtual void SetMaxThreadCount(int count)
- {
- MaxThreadCount = count;
- }
-
- /// <summary>Get the max # simultaneous threads that may be</summary>
- /// <seealso cref="SetMaxThreadCount" />
- [Obsolete("Use MaxThreadCount property instead.")]
- public virtual int GetMaxThreadCount()
- {
- return MaxThreadCount;
- }
/// <summary>Return the priority that merge threads run at. By
/// default the priority is 1 plus the priority of (ie,
@@ -356,16 +335,19 @@ namespace Lucene.Net.Index
runningMerge = merge;
}
}
-
- public virtual MergePolicy.OneMerge GetRunningMerge()
- {
- lock (this)
- {
- return runningMerge;
- }
- }
-
- public virtual void SetThreadPriority(int pri)
+
+ public virtual MergePolicy.OneMerge RunningMerge
+ {
+ get
+ {
+ lock (this)
+ {
+ return runningMerge;
+ }
+ }
+ }
+
+ public virtual void SetThreadPriority(int pri)
{
try
{
@@ -446,7 +428,7 @@ namespace Lucene.Net.Index
public override System.String ToString()
{
- MergePolicy.OneMerge merge = GetRunningMerge();
+ MergePolicy.OneMerge merge = RunningMerge;
if (merge == null)
merge = startMerge;
return "merge thread: " + merge.SegString(Enclosing_Instance.dir);
Modified: incubator/lucene.net/trunk/src/core/Index/DefaultSkipListWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DefaultSkipListWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DefaultSkipListWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DefaultSkipListWriter.cs Mon Mar 12 22:29:26 2012
@@ -70,9 +70,9 @@ namespace Lucene.Net.Index
this.curDoc = doc;
this.curStorePayloads = storePayloads;
this.curPayloadLength = payloadLength;
- this.curFreqPointer = freqOutput.GetFilePointer();
+ this.curFreqPointer = freqOutput.FilePointer;
if (proxOutput != null)
- this.curProxPointer = proxOutput.GetFilePointer();
+ this.curProxPointer = proxOutput.FilePointer;
}
protected internal override void ResetSkip()
@@ -80,9 +80,9 @@ namespace Lucene.Net.Index
base.ResetSkip();
for (int i = 0; i < lastSkipDoc.Length; i++) lastSkipDoc[i] = 0;
for (int i = 0; i < lastSkipPayloadLength.Length; i++) lastSkipPayloadLength[i] = -1; // we don't have to write the first length in the skip list
- for (int i = 0; i < lastSkipFreqPointer.Length; i++) lastSkipFreqPointer[i] = freqOutput.GetFilePointer();
+ for (int i = 0; i < lastSkipFreqPointer.Length; i++) lastSkipFreqPointer[i] = freqOutput.FilePointer;
if (proxOutput != null)
- for (int i = 0; i < lastSkipProxPointer.Length; i++) lastSkipProxPointer[i] = proxOutput.GetFilePointer();
+ for (int i = 0; i < lastSkipProxPointer.Length; i++) lastSkipProxPointer[i] = proxOutput.FilePointer;
}
protected internal override void WriteSkipData(int level, IndexOutput skipBuffer)
Modified: incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DirectoryReader.cs Mon Mar 12 22:29:26 2012
@@ -16,6 +16,7 @@
*/
using System;
+using System.Collections.Generic;
using Lucene.Net.Support;
using Document = Lucene.Net.Documents.Document;
using FieldSelector = Lucene.Net.Documents.FieldSelector;
@@ -261,7 +262,7 @@ namespace Lucene.Net.Index
// create a Map SegmentName->SegmentReader
for (int i = 0; i < oldReaders.Length; i++)
{
- segmentReaders[oldReaders[i].GetSegmentName()] = i;
+ segmentReaders[oldReaders[i].SegmentName] = i;
}
}
@@ -289,7 +290,7 @@ namespace Lucene.Net.Index
try
{
SegmentReader newReader;
- if (newReaders[i] == null || infos.Info(i).GetUseCompoundFile() != newReaders[i].GetSegmentInfo().GetUseCompoundFile())
+ if (newReaders[i] == null || infos.Info(i).UseCompoundFile != newReaders[i].SegmentInfo.UseCompoundFile)
{
// We should never see a totally new segment during cloning
@@ -365,14 +366,14 @@ namespace Lucene.Net.Index
byte[] oldBytes = entry.Value;
- byte[] bytes = new byte[MaxDoc()];
+ byte[] bytes = new byte[MaxDoc];
for (int i = 0; i < subReaders.Length; i++)
{
- int oldReaderIndex = segmentReaders[subReaders[i].GetSegmentName()];
+ int oldReaderIndex = segmentReaders[subReaders[i].SegmentName];
// this SegmentReader was not re-opened, we can copy all of its norms
- if (segmentReaders.ContainsKey(subReaders[i].GetSegmentName()) &&
+ if (segmentReaders.ContainsKey(subReaders[i].SegmentName) &&
(oldReaders[oldReaderIndex] == subReaders[i]
|| oldReaders[oldReaderIndex].norms[field] == subReaders[i].norms[field]))
{
@@ -399,9 +400,9 @@ namespace Lucene.Net.Index
for (int i = 0; i < subReaders.Length; i++)
{
starts[i] = maxDoc;
- maxDoc += subReaders[i].MaxDoc(); // compute maxDocs
+ maxDoc += subReaders[i].MaxDoc; // compute maxDocs
- if (subReaders[i].HasDeletions())
+ if (subReaders[i].HasDeletions)
hasDeletions = true;
}
starts[subReaders.Length] = maxDoc;
@@ -523,7 +524,7 @@ namespace Lucene.Net.Index
System.Diagnostics.Debug.Assert(writeLock != null);
// so no other writer holds the write lock, which
// means no changes could have been done to the index:
- System.Diagnostics.Debug.Assert(IsCurrent());
+ System.Diagnostics.Debug.Assert(IsCurrent);
if (openReadOnly)
{
@@ -534,7 +535,7 @@ namespace Lucene.Net.Index
return this;
}
}
- else if (IsCurrent())
+ else if (IsCurrent)
{
if (openReadOnly != readOnly)
{
@@ -549,9 +550,9 @@ namespace Lucene.Net.Index
}
else
{
- if (directory != commit.GetDirectory())
+ if (directory != commit.Directory)
throw new System.IO.IOException("the specified commit does not match the specified Directory");
- if (segmentInfos != null && commit.GetSegmentsFileName().Equals(segmentInfos.GetCurrentSegmentFileName()))
+ if (segmentInfos != null && commit.SegmentsFileName.Equals(segmentInfos.GetCurrentSegmentFileName()))
{
if (readOnly != openReadOnly)
{
@@ -607,14 +608,16 @@ namespace Lucene.Net.Index
}
-
/// <summary>Version number when this IndexReader was opened. </summary>
- public override long GetVersion()
+ public override long Version
{
- EnsureOpen();
- return segmentInfos.GetVersion();
+ get
+ {
+ EnsureOpen();
+ return segmentInfos.Version;
+ }
}
-
+
public override TermFreqVector[] GetTermFreqVectors(int n)
{
EnsureOpen();
@@ -643,38 +646,46 @@ namespace Lucene.Net.Index
int i = ReaderIndex(docNumber); // find segment num
subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
}
-
+
/// <summary> Checks is the index is optimized (if it has a single segment and no deletions)</summary>
- /// <returns> <c>true</c> if the index is optimized; <c>false</c> otherwise
- /// </returns>
- public override bool IsOptimized()
+ /// <value> <c>true</c> if the index is optimized; <c>false</c> otherwise </value>
+ public override bool IsOptimized
{
- EnsureOpen();
- return segmentInfos.Count == 1 && !HasDeletions();
+ get
+ {
+ EnsureOpen();
+ return segmentInfos.Count == 1 && !HasDeletions;
+ }
}
-
- public override int NumDocs()
+
+ public override int NumDocs
{
- // Don't call ensureOpen() here (it could affect performance)
- // NOTE: multiple threads may wind up init'ing
- // numDocs... but that's harmless
- if (numDocs == - 1)
+ get
{
- // check cache
- int n = 0; // cache miss--recompute
- for (int i = 0; i < subReaders.Length; i++)
- n += subReaders[i].NumDocs(); // sum from readers
- numDocs = n;
+ // Don't call ensureOpen() here (it could affect performance)
+ // NOTE: multiple threads may wind up init'ing
+ // numDocs... but that's harmless
+ if (numDocs == - 1)
+ {
+ // check cache
+ int n = 0; // cache miss--recompute
+ for (int i = 0; i < subReaders.Length; i++)
+ n += subReaders[i].NumDocs; // sum from readers
+ numDocs = n;
+ }
+ return numDocs;
}
- return numDocs;
}
-
- public override int MaxDoc()
+
+ public override int MaxDoc
{
- // Don't call ensureOpen() here (it could affect performance)
- return maxDoc;
+ get
+ {
+ // Don't call ensureOpen() here (it could affect performance)
+ return maxDoc;
+ }
}
-
+
// inherit javadoc
public override Document Document(int n, FieldSelector fieldSelector)
{
@@ -689,13 +700,16 @@ namespace Lucene.Net.Index
int i = ReaderIndex(n); // find segment num
return subReaders[i].IsDeleted(n - starts[i]); // dispatch to segment reader
}
-
- public override bool HasDeletions()
+
+ public override bool HasDeletions
{
- // Don't call ensureOpen() here (it could affect performance)
- return hasDeletions;
+ get
+ {
+ // Don't call ensureOpen() here (it could affect performance)
+ return hasDeletions;
+ }
}
-
+
protected internal override void DoDelete(int n)
{
numDocs = - 1; // invalidate cache
@@ -768,7 +782,7 @@ namespace Lucene.Net.Index
if (!HasNorms(field))
return null;
- bytes = new byte[MaxDoc()];
+ bytes = new byte[MaxDoc];
for (int i = 0; i < subReaders.Length; i++)
subReaders[i].Norms(field, bytes, starts[i]);
normsCache[field] = bytes; // update cache
@@ -791,7 +805,7 @@ namespace Lucene.Net.Index
else if (bytes != null)
{
// cache hit
- Array.Copy(bytes, 0, result, offset, MaxDoc());
+ Array.Copy(bytes, 0, result, offset, MaxDoc);
}
else
{
@@ -910,12 +924,12 @@ namespace Lucene.Net.Index
{
if (hasChanges)
{
- segmentInfos.SetUserData(commitUserData);
+ segmentInfos.UserData = commitUserData;
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
IndexFileDeleter deleter = new IndexFileDeleter(directory, deletionPolicy == null?new KeepOnlyLastCommitDeletionPolicy():deletionPolicy, segmentInfos, null, null, synced);
- segmentInfos.UpdateGeneration(deleter.GetLastSegmentInfos());
+ segmentInfos.UpdateGeneration(deleter.LastSegmentInfos);
// Checkpoint the state we are about to change, in
// case we have to roll back:
@@ -966,7 +980,7 @@ namespace Lucene.Net.Index
deleter.Checkpoint(segmentInfos, true);
deleter.Dispose();
- maxIndexVersion = segmentInfos.GetVersion();
+ maxIndexVersion = segmentInfos.Version;
if (writeLock != null)
{
@@ -995,26 +1009,32 @@ namespace Lucene.Net.Index
}
}
- public override System.Collections.Generic.IDictionary<string, string> GetCommitUserData()
+ public override IDictionary<string, string> CommitUserData
{
- EnsureOpen();
- return segmentInfos.GetUserData();
- }
-
- public override bool IsCurrent()
- {
- EnsureOpen();
- if (writer == null || writer.IsClosed())
+ get
{
- // we loaded SegmentInfos from the directory
- return SegmentInfos.ReadCurrentVersion(directory) == segmentInfos.GetVersion();
+ EnsureOpen();
+ return segmentInfos.UserData;
}
- else
+ }
+
+ public override bool IsCurrent
+ {
+ get
{
- return writer.NrtIsCurrent(segmentInfosStart);
+ EnsureOpen();
+ if (writer == null || writer.IsClosed())
+ {
+ // we loaded SegmentInfos from the directory
+ return SegmentInfos.ReadCurrentVersion(directory) == segmentInfos.Version;
+ }
+ else
+ {
+ return writer.NrtIsCurrent(segmentInfosStart);
+ }
}
}
-
+
protected internal override void DoClose()
{
lock (this)
@@ -1062,12 +1082,12 @@ namespace Lucene.Net.Index
}
return fieldSet;
}
-
- public override IndexReader[] GetSequentialSubReaders()
+
+ public override IndexReader[] SequentialSubReaders
{
- return subReaders;
+ get { return subReaders; }
}
-
+
/// <summary>Returns the directory this index resides in. </summary>
public override Directory Directory()
{
@@ -1076,21 +1096,21 @@ namespace Lucene.Net.Index
// this method on the closed original reader
return directory;
}
-
- public override int GetTermInfosIndexDivisor()
+
+ public override int TermInfosIndexDivisor
{
- return termInfosIndexDivisor;
+ get { return termInfosIndexDivisor; }
}
-
+
/// <summary> Expert: return the IndexCommit that this reader has opened.
/// <p/>
/// <p/><b>WARNING</b>: this API is new and experimental and may suddenly change.<p/>
/// </summary>
- public override IndexCommit GetIndexCommit()
+ public override IndexCommit IndexCommit
{
- return new ReaderCommit(segmentInfos, directory);
+ get { return new ReaderCommit(segmentInfos, directory); }
}
-
+
/// <seealso cref="Lucene.Net.Index.IndexReader.ListCommits">
/// </seealso>
public static new System.Collections.Generic.ICollection<IndexCommit> ListCommits(Directory dir)
@@ -1101,7 +1121,7 @@ namespace Lucene.Net.Index
SegmentInfos latest = new SegmentInfos();
latest.Read(dir);
- long currentGen = latest.GetGeneration();
+ long currentGen = latest.Generation;
commits.Add(new ReaderCommit(latest, dir));
@@ -1154,10 +1174,10 @@ namespace Lucene.Net.Index
{
segmentsFileName = infos.GetCurrentSegmentFileName();
this.dir = dir;
- userData = infos.GetUserData();
+ userData = infos.UserData;
files = infos.Files(dir, true);
- version = infos.GetVersion();
- generation = infos.GetGeneration();
+ version = infos.Version;
+ generation = infos.Generation;
isOptimized = infos.Count == 1 && !infos.Info(0).HasDeletions();
}
public override string ToString()
@@ -1169,40 +1189,40 @@ namespace Lucene.Net.Index
{
return isOptimized;
}
-
- public override System.String GetSegmentsFileName()
+
+ public override string SegmentsFileName
{
- return segmentsFileName;
+ get { return segmentsFileName; }
}
- public override System.Collections.Generic.ICollection<string> GetFileNames()
+ public override ICollection<string> FileNames
{
- return files;
+ get { return files; }
}
-
- public override Directory GetDirectory()
+
+ public override Directory Directory
{
- return dir;
+ get { return dir; }
}
-
- public override long GetVersion()
+
+ public override long Version
{
- return version;
+ get { return version; }
}
-
- public override long GetGeneration()
+
+ public override long Generation
{
- return generation;
+ get { return generation; }
}
-
- public override bool IsDeleted()
+
+ public override bool IsDeleted
{
- return false;
+ get { return false; }
}
- public override System.Collections.Generic.IDictionary<string, string> GetUserData()
+ public override IDictionary<string, string> UserData
{
- return userData;
+ get { return userData; }
}
public override void Delete()
@@ -1532,12 +1552,12 @@ namespace Lucene.Net.Index
{
return ((TermPositions) current).NextPosition();
}
-
- public virtual int GetPayloadLength()
+
+ public virtual int PayloadLength
{
- return ((TermPositions) current).GetPayloadLength();
+ get { return ((TermPositions) current).PayloadLength; }
}
-
+
public virtual byte[] GetPayload(byte[] data, int offset)
{
return ((TermPositions) current).GetPayload(data, offset);
@@ -1545,9 +1565,10 @@ namespace Lucene.Net.Index
// TODO: Remove warning after API has been finalized
- public virtual bool IsPayloadAvailable()
+
+ public virtual bool IsPayloadAvailable
{
- return ((TermPositions) current).IsPayloadAvailable();
+ get { return ((TermPositions) current).IsPayloadAvailable; }
}
}
}
Modified: incubator/lucene.net/trunk/src/core/Index/DocFieldConsumerPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocFieldConsumerPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocFieldConsumerPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocFieldConsumerPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,8 +16,7 @@
*/
using System;
-
-using Fieldable = Lucene.Net.Documents.Fieldable;
+using Lucene.Net.Documents;
namespace Lucene.Net.Index
{
@@ -25,7 +24,7 @@ namespace Lucene.Net.Index
abstract class DocFieldConsumerPerField
{
/// <summary>Processes all occurrences of a single field </summary>
- public abstract void ProcessFields(Fieldable[] fields, int count);
+ public abstract void ProcessFields(IFieldable[] fields, int count);
public abstract void Abort();
}
}
\ No newline at end of file
Modified: incubator/lucene.net/trunk/src/core/Index/DocFieldConsumersPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocFieldConsumersPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocFieldConsumersPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocFieldConsumersPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,8 +16,7 @@
*/
using System;
-
-using Fieldable = Lucene.Net.Documents.Fieldable;
+using Lucene.Net.Documents;
namespace Lucene.Net.Index
{
@@ -36,7 +35,7 @@ namespace Lucene.Net.Index
this.two = two;
}
- public override void ProcessFields(Fieldable[] fields, int count)
+ public override void ProcessFields(IFieldable[] fields, int count)
{
one.ProcessFields(fields, count);
two.ProcessFields(fields, count);
Modified: incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,8 +16,7 @@
*/
using System;
-
-using Fieldable = Lucene.Net.Documents.Fieldable;
+using Lucene.Net.Documents;
namespace Lucene.Net.Index
{
@@ -34,7 +33,7 @@ namespace Lucene.Net.Index
internal int lastGen = - 1;
internal int fieldCount;
- internal Fieldable[] fields = new Fieldable[1];
+ internal IFieldable[] fields = new IFieldable[1];
public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
{
Modified: incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerThread.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerThread.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerThread.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocFieldProcessorPerThread.cs Mon Mar 12 22:29:26 2012
@@ -16,9 +16,9 @@
*/
using System;
+using Lucene.Net.Documents;
using Lucene.Net.Support;
using Document = Lucene.Net.Documents.Document;
-using Fieldable = Lucene.Net.Documents.Fieldable;
using ArrayUtil = Lucene.Net.Util.ArrayUtil;
namespace Lucene.Net.Index
@@ -185,7 +185,7 @@ namespace Lucene.Net.Index
int thisFieldGen = fieldGen++;
- System.Collections.Generic.IList<Fieldable> docFields = doc.GetFields();
+ System.Collections.Generic.IList<IFieldable> docFields = doc.GetFields();
int numDocFields = docFields.Count;
// Absorb any new fields first seen in this document.
@@ -195,8 +195,8 @@ namespace Lucene.Net.Index
for (int i = 0; i < numDocFields; i++)
{
- Fieldable field = docFields[i];
- System.String fieldName = field.Name();
+ IFieldable field = docFields[i];
+ System.String fieldName = field.Name;
// Make sure we have a PerField allocated
int hashPos = fieldName.GetHashCode() & hashMask;
@@ -212,9 +212,9 @@ namespace Lucene.Net.Index
// needs to be more "pluggable" such that if I want
// to have a new "thing" my Fields can do, I can
// easily add it
- FieldInfo fi = fieldInfos.Add(fieldName, field.IsIndexed(), field.IsTermVectorStored(),
- field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(),
- field.GetOmitNorms(), false, field.GetOmitTermFreqAndPositions());
+ FieldInfo fi = fieldInfos.Add(fieldName, field.IsIndexed, field.IsTermVectorStored,
+ field.IsStorePositionWithTermVector, field.IsStoreOffsetWithTermVector,
+ field.OmitNorms, false, field.OmitTermFreqAndPositions);
fp = new DocFieldProcessorPerField(this, fi);
fp.next = fieldHash[hashPos];
@@ -226,9 +226,9 @@ namespace Lucene.Net.Index
}
else
{
- fp.fieldInfo.Update(field.IsIndexed(), field.IsTermVectorStored(),
- field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(),
- field.GetOmitNorms(), false, field.GetOmitTermFreqAndPositions());
+ fp.fieldInfo.Update(field.IsIndexed, field.IsTermVectorStored,
+ field.IsStorePositionWithTermVector, field.IsStoreOffsetWithTermVector,
+ field.OmitNorms, false, field.OmitTermFreqAndPositions);
}
if (thisFieldGen != fp.lastGen)
@@ -251,13 +251,13 @@ namespace Lucene.Net.Index
if (fp.fieldCount == fp.fields.Length)
{
- Fieldable[] newArray = new Fieldable[fp.fields.Length * 2];
+ IFieldable[] newArray = new IFieldable[fp.fields.Length * 2];
Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
fp.fields = newArray;
}
fp.fields[fp.fieldCount++] = field;
- if (field.IsStored())
+ if (field.IsStored)
{
fieldsWriter.AddField(field, fp.fieldInfo);
}
Modified: incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocInverterPerField.cs Mon Mar 12 22:29:26 2012
@@ -16,11 +16,10 @@
*/
using System;
-
+using Lucene.Net.Documents;
using TokenStream = Lucene.Net.Analysis.TokenStream;
using OffsetAttribute = Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
using PositionIncrementAttribute = Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
-using Fieldable = Lucene.Net.Documents.Fieldable;
namespace Lucene.Net.Index
{
@@ -59,10 +58,10 @@ namespace Lucene.Net.Index
endConsumer.Abort();
}
- public override void ProcessFields(Fieldable[] fields, int count)
+ public override void ProcessFields(IFieldable[] fields, int count)
{
- fieldState.Reset(docState.doc.GetBoost());
+ fieldState.Reset(docState.doc.Boost);
int maxFieldLength = docState.maxFieldLength;
@@ -71,12 +70,12 @@ namespace Lucene.Net.Index
for (int i = 0; i < count; i++)
{
- Fieldable field = fields[i];
+ IFieldable field = fields[i];
// TODO FI: this should be "genericized" to querying
// consumer if it wants to see this particular field
// tokenized.
- if (field.IsIndexed() && doInvert)
+ if (field.IsIndexed && doInvert)
{
bool anyToken;
@@ -84,10 +83,10 @@ namespace Lucene.Net.Index
if (fieldState.length > 0)
fieldState.position += docState.analyzer.GetPositionIncrementGap(fieldInfo.name);
- if (!field.IsTokenized())
+ if (!field.IsTokenized)
{
// un-tokenized field
- System.String stringValue = field.StringValue();
+ System.String stringValue = field.StringValue;
int valueLength = stringValue.Length;
perThread.singleToken.Reinit(stringValue, 0, valueLength);
fieldState.attributeSource = perThread.singleToken;
@@ -113,7 +112,7 @@ namespace Lucene.Net.Index
{
// tokenized field
TokenStream stream;
- TokenStream streamValue = field.TokenStreamValue();
+ TokenStream streamValue = field.TokenStreamValue;
if (streamValue != null)
stream = streamValue;
@@ -122,13 +121,13 @@ namespace Lucene.Net.Index
// the field does not have a TokenStream,
// so we have to obtain one from the analyzer
System.IO.TextReader reader; // find or make Reader
- System.IO.TextReader readerValue = field.ReaderValue();
+ System.IO.TextReader readerValue = field.ReaderValue;
if (readerValue != null)
reader = readerValue;
else
{
- System.String stringValue = field.StringValue();
+ System.String stringValue = field.StringValue;
if (stringValue == null)
throw new System.ArgumentException("field must have either TokenStream, String or Reader value");
perThread.stringReader.Init(stringValue);
@@ -170,7 +169,7 @@ namespace Lucene.Net.Index
if (!hasMoreTokens)
break;
- int posIncr = posIncrAttribute.GetPositionIncrement();
+ int posIncr = posIncrAttribute.PositionIncrement;
fieldState.position += posIncr;
if (fieldState.position > 0)
{
@@ -222,7 +221,7 @@ namespace Lucene.Net.Index
if (anyToken)
fieldState.offset += docState.analyzer.GetOffsetGap(field);
- fieldState.boost *= field.GetBoost();
+ fieldState.boost *= field.Boost;
}
// LUCENE-2387: don't hang onto the field, so GC can
Modified: incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/DocumentsWriter.cs Mon Mar 12 22:29:26 2012
@@ -263,7 +263,7 @@ namespace Lucene.Net.Index
{
if (buffers.Count > 0)
{
- SetLength(0);
+ Length = 0;
// Recycle the blocks
enclosingInstance.perDocAllocator.RecycleByteBlocks(buffers);
@@ -347,7 +347,7 @@ namespace Lucene.Net.Index
InitBlock();
this.directory = directory;
this.writer = writer;
- this.similarity = writer.GetSimilarity();
+ this.similarity = writer.Similarity;
flushedDocCount = writer.MaxDoc();
consumer = indexingChain.GetChain(this);
@@ -434,52 +434,51 @@ namespace Lucene.Net.Index
}
}
}
-
- /// <summary>Set max buffered docs, which means we will flush by
- /// doc count instead of by RAM usage.
- /// </summary>
- internal void SetMaxBufferedDocs(int count)
- {
- maxBufferedDocs = count;
- }
-
- internal int GetMaxBufferedDocs()
- {
- return maxBufferedDocs;
- }
-
- /// <summary>Get current segment name we are writing. </summary>
- internal System.String GetSegment()
- {
- return segment;
- }
-
- /// <summary>Returns how many docs are currently buffered in RAM. </summary>
- internal int GetNumDocsInRAM()
- {
- return numDocsInRAM;
- }
-
- /// <summary>Returns the current doc store segment we are writing
- /// to.
- /// </summary>
- internal System.String GetDocStoreSegment()
- {
- lock (this)
- {
- return docStoreSegment;
- }
- }
-
- /// <summary>Returns the doc offset into the shared doc store for
- /// the current buffered docs.
- /// </summary>
- internal int GetDocStoreOffset()
- {
- return docStoreOffset;
- }
-
- /// <summary>Closes the current open doc stores an returns the doc
+
+ /// <summary>Gets or sets max buffered docs, which means we will flush by
+ /// doc count instead of by RAM usage.
+ /// </summary>
+ internal int MaxBufferedDocs
+ {
+ get { return maxBufferedDocs; }
+ set { maxBufferedDocs = value; }
+ }
+
+ /// <summary>Get current segment name we are writing. </summary>
+ internal string Segment
+ {
+ get { return segment; }
+ }
+
+ /// <summary>Returns how many docs are currently buffered in RAM. </summary>
+ internal int NumDocsInRAM
+ {
+ get { return numDocsInRAM; }
+ }
+
+ /// <summary>Returns the current doc store segment we are writing
+ /// to.
+ /// </summary>
+ internal string DocStoreSegment
+ {
+ get
+ {
+ lock (this)
+ {
+ return docStoreSegment;
+ }
+ }
+ }
+
+ /// <summary>Returns the doc offset into the shared doc store for
+ /// the current buffered docs.
+ /// </summary>
+ internal int DocStoreOffset
+ {
+ get { return docStoreOffset; }
+ }
+
+ /// <summary>Closes the current open doc stores an returns the doc
/// store segment name. This returns null if there are *
/// no buffered documents.
/// </summary>
@@ -733,21 +732,25 @@ namespace Lucene.Net.Index
return true;
}
}
-
- internal bool AnyChanges()
- {
- lock (this)
- {
- return numDocsInRAM != 0 || deletesInRAM.numTerms != 0 || deletesInRAM.docIDs.Count != 0 || deletesInRAM.queries.Count != 0;
- }
- }
-
- private void InitFlushState(bool onlyDocStore)
+
+ internal bool AnyChanges
+ {
+ get
+ {
+ lock (this)
+ {
+ return numDocsInRAM != 0 || deletesInRAM.numTerms != 0 || deletesInRAM.docIDs.Count != 0 ||
+ deletesInRAM.queries.Count != 0;
+ }
+ }
+ }
+
+ private void InitFlushState(bool onlyDocStore)
{
lock (this)
{
InitSegmentName(onlyDocStore);
- flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.GetTermIndexInterval());
+ flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.TermIndexInterval);
}
}
@@ -1289,7 +1292,7 @@ namespace Lucene.Net.Index
try
{
any |= ApplyDeletes(reader, docStart);
- docStart += reader.MaxDoc();
+ docStart += reader.MaxDoc;
}
finally
{
@@ -1322,7 +1325,7 @@ namespace Lucene.Net.Index
{
lock (this)
{
- int docEnd = docIDStart + reader.MaxDoc();
+ int docEnd = docIDStart + reader.MaxDoc;
bool any = false;
System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
Modified: incubator/lucene.net/trunk/src/core/Index/FieldInfos.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldInfos.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldInfos.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldInfos.cs Mon Mar 12 22:29:26 2012
@@ -16,9 +16,9 @@
*/
using System;
+using Lucene.Net.Documents;
using Lucene.Net.Support;
using Document = Lucene.Net.Documents.Document;
-using Fieldable = Lucene.Net.Documents.Fieldable;
using Directory = Lucene.Net.Store.Directory;
using IndexInput = Lucene.Net.Store.IndexInput;
using IndexOutput = Lucene.Net.Store.IndexOutput;
@@ -134,12 +134,12 @@ namespace Lucene.Net.Index
{
lock (this)
{
- System.Collections.Generic.IList<Fieldable> fields = doc.GetFields();
- foreach(Fieldable field in fields)
+ System.Collections.Generic.IList<IFieldable> fields = doc.GetFields();
+ foreach(IFieldable field in fields)
{
- Add(field.Name(), field.IsIndexed(), field.IsTermVectorStored(),
- field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms(),
- false, field.GetOmitTermFreqAndPositions());
+ Add(field.Name, field.IsIndexed, field.IsTermVectorStored,
+ field.IsStorePositionWithTermVector, field.IsStoreOffsetWithTermVector, field.OmitNorms,
+ false, field.OmitTermFreqAndPositions);
}
}
}
@@ -475,9 +475,9 @@ namespace Lucene.Net.Index
AddInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
}
- if (input.GetFilePointer() != input.Length())
+ if (input.FilePointer != input.Length())
{
- throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.GetFilePointer() + " vs size " + input.Length());
+ throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.FilePointer + " vs size " + input.Length());
}
}
}
Modified: incubator/lucene.net/trunk/src/core/Index/FieldInvertState.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldInvertState.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldInvertState.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldInvertState.cs Mon Mar 12 22:29:26 2012
@@ -63,53 +63,48 @@ namespace Lucene.Net.Index
boost = docBoost;
attributeSource = null;
}
-
- /// <summary> Get the last processed term position.</summary>
- /// <returns> the position
- /// </returns>
- public int GetPosition()
- {
- return position;
- }
-
- /// <summary> Get total number of terms in this field.</summary>
- /// <returns> the length
- /// </returns>
- public int GetLength()
- {
- return length;
- }
-
- /// <summary> Get the number of terms with <c>positionIncrement == 0</c>.</summary>
- /// <returns> the numOverlap
- /// </returns>
- public int GetNumOverlap()
- {
- return numOverlap;
- }
-
- /// <summary> Get end offset of the last processed term.</summary>
- /// <returns> the offset
- /// </returns>
- public int GetOffset()
- {
- return offset;
- }
-
- /// <summary> Get boost value. This is the cumulative product of
- /// document boost and field boost for all field instances
- /// sharing the same field name.
- /// </summary>
- /// <returns> the boost
- /// </returns>
- public float GetBoost()
- {
- return boost;
- }
-
- public AttributeSource GetAttributeSource()
- {
- return attributeSource;
- }
+
+ /// <summary> Get the last processed term position.</summary>
+ /// <value> the position </value>
+ public int Position
+ {
+ get { return position; }
+ }
+
+ /// <summary> Get total number of terms in this field.</summary>
+ /// <value> the length </value>
+ public int Length
+ {
+ get { return length; }
+ }
+
+ /// <summary> Get the number of terms with <c>positionIncrement == 0</c>.</summary>
+ /// <value> the numOverlap </value>
+ public int NumOverlap
+ {
+ get { return numOverlap; }
+ }
+
+ /// <summary> Get end offset of the last processed term.</summary>
+ /// <value> the offset </value>
+ public int Offset
+ {
+ get { return offset; }
+ }
+
+ /// <summary> Get boost value. This is the cumulative product of
+ /// document boost and field boost for all field instances
+ /// sharing the same field name.
+ /// </summary>
+ /// <value> the boost </value>
+ public float Boost
+ {
+ get { return boost; }
+ }
+
+ public AttributeSource AttributeSource
+ {
+ get { return attributeSource; }
+ }
}
}
\ No newline at end of file
Modified: incubator/lucene.net/trunk/src/core/Index/FieldSortedTermVectorMapper.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldSortedTermVectorMapper.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldSortedTermVectorMapper.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldSortedTermVectorMapper.cs Mon Mar 12 22:29:26 2012
@@ -60,21 +60,20 @@ namespace Lucene.Net.Index
currentField = field;
fieldToTerms[field] = currentSet;
}
-
- /// <summary> Get the mapping between fields and terms, sorted by the comparator
- ///
- /// </summary>
- /// <returns> A map between field names and <see cref="System.Collections.Generic.SortedDictionary{Object,Object}" />s per field. SortedSet entries are <see cref="TermVectorEntry" />
- /// </returns>
- public virtual IDictionary<string, SortedSet<TermVectorEntry>> GetFieldToTerms()
- {
- return fieldToTerms;
- }
+ /// <summary> Get the mapping between fields and terms, sorted by the comparator
+ ///
+ /// </summary>
+ /// <value> A map between field names and <see cref="System.Collections.Generic.SortedDictionary{Object,Object}" />s per field. SortedSet entries are <see cref="TermVectorEntry" /> </value>
+ public virtual IDictionary<string, SortedSet<TermVectorEntry>> FieldToTerms
+ {
+ get { return fieldToTerms; }
+ }
- public virtual IComparer<TermVectorEntry> GetComparator()
- {
- return comparator;
- }
+
+ public virtual IComparer<TermVectorEntry> Comparator
+ {
+ get { return comparator; }
+ }
}
}
\ No newline at end of file
Modified: incubator/lucene.net/trunk/src/core/Index/FieldsReader.cs
URL: http://svn.apache.org/viewvc/incubator/lucene.net/trunk/src/core/Index/FieldsReader.cs?rev=1299911&r1=1299910&r2=1299911&view=diff
==============================================================================
--- incubator/lucene.net/trunk/src/core/Index/FieldsReader.cs (original)
+++ incubator/lucene.net/trunk/src/core/Index/FieldsReader.cs Mon Mar 12 22:29:26 2012
@@ -16,6 +16,7 @@
*/
using System;
+using System.IO;
using Lucene.Net.Support;
using Lucene.Net.Util;
using TokenStream = Lucene.Net.Analysis.TokenStream;
@@ -323,7 +324,7 @@ namespace Lucene.Net.Index
{
if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed)
{
- fieldsStream.Seek(fieldsStream.GetFilePointer() + toRead);
+ fieldsStream.Seek(fieldsStream.FilePointer + toRead);
}
else
{
@@ -337,7 +338,7 @@ namespace Lucene.Net.Index
if (binary)
{
int toRead = fieldsStream.ReadVInt();
- long pointer = fieldsStream.GetFilePointer();
+ long pointer = fieldsStream.FilePointer;
//was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
doc.Add(new LazyField(this, fi.name, Field.Store.YES, toRead, pointer, binary, compressed));
@@ -354,17 +355,17 @@ namespace Lucene.Net.Index
if (compressed)
{
int toRead = fieldsStream.ReadVInt();
- long pointer = fieldsStream.GetFilePointer();
+ long pointer = fieldsStream.FilePointer;
f = new LazyField(this, fi.name, store, toRead, pointer, binary, compressed);
//skip over the part that we aren't loading
fieldsStream.Seek(pointer + toRead);
- f.SetOmitNorms(fi.omitNorms);
- f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+ f.OmitNorms = fi.omitNorms;
+ f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
}
else
{
int length = fieldsStream.ReadVInt();
- long pointer = fieldsStream.GetFilePointer();
+ long pointer = fieldsStream.FilePointer;
//Skip ahead of where we are by the length of what is stored
if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
{
@@ -375,8 +376,8 @@ namespace Lucene.Net.Index
fieldsStream.SkipChars(length);
}
f = new LazyField(this, fi.name, store, index, termVector, length, pointer, binary, compressed);
- f.SetOmitNorms(fi.omitNorms);
- f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+ f.OmitNorms = fi.omitNorms;
+ f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
}
doc.Add(f);
@@ -414,14 +415,14 @@ namespace Lucene.Net.Index
byte[] b = new byte[toRead];
fieldsStream.ReadBytes(b, 0, b.Length);
f = new Field(fi.name, false, System.Text.Encoding.GetEncoding("UTF-8").GetString(Uncompress(b)), store, index, termVector);
- f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
- f.SetOmitNorms(fi.omitNorms);
+ f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
+ f.OmitNorms = fi.omitNorms;
}
else
{
f = new Field(fi.name, false, fieldsStream.ReadString(), store, index, termVector);
- f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
- f.SetOmitNorms(fi.omitNorms);
+ f.OmitTermFreqAndPositions = fi.omitTermFreqAndPositions;
+ f.OmitNorms = fi.omitNorms;
}
doc.Add(f);
@@ -447,7 +448,7 @@ namespace Lucene.Net.Index
/// loaded.
/// </summary>
[Serializable]
- private class LazyField : AbstractField, Fieldable
+ private class LazyField : AbstractField, IFieldable
{
private void InitBlock(FieldsReader enclosingInstance)
{
@@ -501,77 +502,87 @@ namespace Lucene.Net.Index
}
return localFieldsStream;
}
-
- /// <summary>The value of the field as a Reader, or null. If null, the String value,
- /// binary value, or TokenStream value is used. Exactly one of StringValue(),
- /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
- /// </summary>
- public override System.IO.TextReader ReaderValue()
- {
- Enclosing_Instance.EnsureOpen();
- return null;
- }
-
- /// <summary>The value of the field as a TokenStream, or null. If null, the Reader value,
- /// String value, or binary value is used. Exactly one of StringValue(),
- /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
- /// </summary>
- public override TokenStream TokenStreamValue()
- {
- Enclosing_Instance.EnsureOpen();
- return null;
- }
-
- /// <summary>The value of the field as a String, or null. If null, the Reader value,
- /// binary value, or TokenStream value is used. Exactly one of StringValue(),
- /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
- /// </summary>
- public override System.String StringValue()
- {
- Enclosing_Instance.EnsureOpen();
- if (isBinary)
- return null;
- else
- {
- if (fieldsData == null)
- {
- IndexInput localFieldsStream = GetFieldStream();
- try
- {
- localFieldsStream.Seek(pointer);
- if (isCompressed)
- {
- byte[] b = new byte[toRead];
- localFieldsStream.ReadBytes(b, 0, b.Length);
- fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(Enclosing_Instance.Uncompress(b));
- }
- else
- {
- if (Enclosing_Instance.format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
- {
- byte[] bytes = new byte[toRead];
- localFieldsStream.ReadBytes(bytes, 0, toRead);
- fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(bytes);
- }
- else
- {
- //read in chars b/c we already know the length we need to read
- char[] chars = new char[toRead];
- localFieldsStream.ReadChars(chars, 0, toRead);
- fieldsData = new System.String(chars);
- }
- }
- }
- catch (System.IO.IOException e)
- {
- throw new FieldReaderException(e);
- }
- }
- return (System.String) fieldsData;
- }
- }
-
- public long GetPointer()
+
+ /// <summary>The value of the field as a Reader, or null. If null, the String value,
+ /// binary value, or TokenStream value is used. Exactly one of StringValue(),
+ /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
+ /// </summary>
+ public override TextReader ReaderValue
+ {
+ get
+ {
+ Enclosing_Instance.EnsureOpen();
+ return null;
+ }
+ }
+
+ /// <summary>The value of the field as a TokenStream, or null. If null, the Reader value,
+ /// String value, or binary value is used. Exactly one of StringValue(),
+ /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
+ /// </summary>
+ public override TokenStream TokenStreamValue
+ {
+ get
+ {
+ Enclosing_Instance.EnsureOpen();
+ return null;
+ }
+ }
+
+ /// <summary>The value of the field as a String, or null. If null, the Reader value,
+ /// binary value, or TokenStream value is used. Exactly one of StringValue(),
+ /// ReaderValue(), GetBinaryValue(), and TokenStreamValue() must be set.
+ /// </summary>
+ public override string StringValue
+ {
+ get
+ {
+ Enclosing_Instance.EnsureOpen();
+ if (isBinary)
+ return null;
+ else
+ {
+ if (fieldsData == null)
+ {
+ IndexInput localFieldsStream = GetFieldStream();
+ try
+ {
+ localFieldsStream.Seek(pointer);
+ if (isCompressed)
+ {
+ byte[] b = new byte[toRead];
+ localFieldsStream.ReadBytes(b, 0, b.Length);
+ fieldsData =
+ System.Text.Encoding.GetEncoding("UTF-8").GetString(Enclosing_Instance.Uncompress(b));
+ }
+ else
+ {
+ if (Enclosing_Instance.format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+ {
+ byte[] bytes = new byte[toRead];
+ localFieldsStream.ReadBytes(bytes, 0, toRead);
+ fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(bytes);
+ }
+ else
+ {
+ //read in chars b/c we already know the length we need to read
+ char[] chars = new char[toRead];
+ localFieldsStream.ReadChars(chars, 0, toRead);
+ fieldsData = new System.String(chars);
+ }
+ }
+ }
+ catch (System.IO.IOException e)
+ {
+ throw new FieldReaderException(e);
+ }
+ }
+ return (System.String) fieldsData;
+ }
+ }
+ }
+
+ public long GetPointer()
{
Enclosing_Instance.EnsureOpen();
return pointer;