You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/06 00:12:08 UTC
[35/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML
documentation comments (excluding sub-namespaces)
Lucene.Net.Codecs: Fixed XML documentation comments (excluding sub-namespaces)
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/a08ae945
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/a08ae945
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/a08ae945
Branch: refs/heads/master
Commit: a08ae9451435e960821b6a45c9134c40ce40cc3c
Parents: d4e4498
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 05:12:52 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:39 2017 +0700
----------------------------------------------------------------------
CONTRIBUTING.md | 20 +-
src/Lucene.Net/Codecs/BlockTermState.cs | 14 +-
src/Lucene.Net/Codecs/BlockTreeTermsReader.cs | 92 ++--
src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs | 172 +++---
src/Lucene.Net/Codecs/Codec.cs | 28 +-
src/Lucene.Net/Codecs/CodecUtil.cs | 124 +++--
src/Lucene.Net/Codecs/DocValuesConsumer.cs | 525 ++-----------------
src/Lucene.Net/Codecs/DocValuesFormat.cs | 10 +-
src/Lucene.Net/Codecs/DocValuesProducer.cs | 49 +-
src/Lucene.Net/Codecs/FieldInfosFormat.cs | 13 +-
src/Lucene.Net/Codecs/FieldInfosReader.cs | 9 +-
src/Lucene.Net/Codecs/FieldInfosWriter.cs | 9 +-
src/Lucene.Net/Codecs/FieldsConsumer.cs | 35 +-
src/Lucene.Net/Codecs/FieldsProducer.cs | 18 +-
src/Lucene.Net/Codecs/FilterCodec.cs | 38 +-
src/Lucene.Net/Codecs/LiveDocsFormat.cs | 13 +-
.../Codecs/Lucene41/Lucene41PostingsFormat.cs | 4 +-
.../Codecs/MappingMultiDocsAndPositionsEnum.cs | 8 +-
src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs | 8 +-
.../Codecs/MultiLevelSkipListReader.cs | 51 +-
.../Codecs/MultiLevelSkipListWriter.cs | 38 +-
src/Lucene.Net/Codecs/NormsFormat.cs | 14 +-
src/Lucene.Net/Codecs/PostingsBaseFormat.cs | 16 +-
src/Lucene.Net/Codecs/PostingsConsumer.cs | 48 +-
src/Lucene.Net/Codecs/PostingsFormat.cs | 16 +-
src/Lucene.Net/Codecs/PostingsReaderBase.cs | 44 +-
src/Lucene.Net/Codecs/PostingsWriterBase.cs | 55 +-
src/Lucene.Net/Codecs/SegmentInfoFormat.cs | 18 +-
src/Lucene.Net/Codecs/SegmentInfoReader.cs | 17 +-
src/Lucene.Net/Codecs/SegmentInfoWriter.cs | 10 +-
src/Lucene.Net/Codecs/StoredFieldsFormat.cs | 12 +-
src/Lucene.Net/Codecs/StoredFieldsReader.cs | 26 +-
src/Lucene.Net/Codecs/StoredFieldsWriter.cs | 67 +--
src/Lucene.Net/Codecs/TermStats.cs | 8 +-
src/Lucene.Net/Codecs/TermVectorsFormat.cs | 12 +-
src/Lucene.Net/Codecs/TermVectorsReader.cs | 25 +-
src/Lucene.Net/Codecs/TermVectorsWriter.cs | 109 ++--
src/Lucene.Net/Codecs/TermsConsumer.cs | 48 +-
38 files changed, 745 insertions(+), 1078 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ccfa22a..dafe5a8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -51,9 +51,25 @@ helpers to help with that, see for examples see our [Java style methods to avoid
### Documentation Comments == up for grabs:
1. Lucene.Net.Core (project)
- 1. Codecs (namespace)
- 2. Util.Packed (namespace)
+ 1. Codecs.Compressing (namespace)
+ 2. Codecs.Lucene3x (namespace)
+ 3. Codecs.Lucene40 (namespace)
+ 4. Codecs.Lucene41 (namespace)
+ 5. Codecs.Lucene42 (namespace)
+ 6. Codecs.Lucene45 (namespace)
+ 7. Codecs.Lucene46 (namespace)
+ 8. Codecs.PerField (namespace)
+ 9. Util.Packed (namespace)
2. Lucene.Net.Codecs (project)
+ 1. Appending (namespace)
+ 2. BlockTerms (namespace)
+ 3. Bloom (namespace)
+ 4. DiskDV (namespace)
+ 5. IntBlock (namespace)
+ 6. Memory (namespace)
+ 7. Pulsing (namespace)
+ 8. Sep (namespace)
+ 9. SimpleText (namespace)
See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTermState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTermState.cs b/src/Lucene.Net/Codecs/BlockTermState.cs
index acd8723..e799921 100644
--- a/src/Lucene.Net/Codecs/BlockTermState.cs
+++ b/src/Lucene.Net/Codecs/BlockTermState.cs
@@ -24,32 +24,32 @@ namespace Lucene.Net.Codecs
using TermState = Lucene.Net.Index.TermState;
/// <summary>
- /// Holds all state required for <seealso cref="PostingsReaderBase"/>
- /// to produce a <seealso cref="DocsEnum"/> without re-seeking the
+ /// Holds all state required for <see cref="PostingsReaderBase"/>
+ /// to produce a <see cref="DocsEnum"/> without re-seeking the
/// terms dict.
/// </summary>
public class BlockTermState : OrdTermState
{
/// <summary>
- /// how many docs have this term </summary>
+ /// How many docs have this term? </summary>
public int DocFreq { get; set; }
/// <summary>
- /// total number of occurrences of this term </summary>
+ /// Total number of occurrences of this term. </summary>
public long TotalTermFreq { get; set; }
/// <summary>
- /// the term's ord in the current block </summary>
+ /// The term's ord in the current block. </summary>
public int TermBlockOrd { get; set; }
/// <summary>
- /// fp into the terms dict primary file (_X.tim) that holds this term </summary>
+ /// File pointer into the terms dict primary file (_X.tim) that holds this term. </summary>
// TODO: update BTR to nuke this
public long BlockFilePointer { get; set; }
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal BlockTermState()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
index fedc3b0..b4a73bf 100644
--- a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
+++ b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
@@ -54,32 +54,32 @@ namespace Lucene.Net.Codecs
/// <summary>
/// A block-based terms index and dictionary that assigns
- /// terms to variable length blocks according to how they
- /// share prefixes. The terms index is a prefix trie
- /// whose leaves are term blocks. The advantage of this
- /// approach is that seekExact is often able to
- /// determine a term cannot exist without doing any IO, and
- /// intersection with Automata is very fast. Note that this
- /// terms dictionary has it's own fixed terms index (ie, it
- /// does not support a pluggable terms index
- /// implementation).
+ /// terms to variable length blocks according to how they
+ /// share prefixes. The terms index is a prefix trie
+ /// whose leaves are term blocks. The advantage of this
+ /// approach is that SeekExact() is often able to
+ /// determine a term cannot exist without doing any IO, and
+ /// intersection with Automata is very fast. Note that this
+ /// terms dictionary has it's own fixed terms index (ie, it
+ /// does not support a pluggable terms index
+ /// implementation).
///
- /// <p><b>NOTE</b>: this terms dictionary does not support
- /// index divisor when opening an IndexReader. Instead, you
- /// can change the min/maxItemsPerBlock during indexing.</p>
+ /// <para><b>NOTE</b>: this terms dictionary does not support
+ /// index divisor when opening an IndexReader. Instead, you
+ /// can change the min/maxItemsPerBlock during indexing.</para>
///
- /// <p>The data structure used by this implementation is very
- /// similar to a burst trie
- /// (http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.3499),
- /// but with added logic to break up too-large blocks of all
- /// terms sharing a given prefix into smaller ones.</p>
+ /// <para>The data structure used by this implementation is very
+ /// similar to a burst trie
+ /// (http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.3499),
+ /// but with added logic to break up too-large blocks of all
+ /// terms sharing a given prefix into smaller ones.</para>
///
- /// <p>Use <seealso cref="Lucene.Net.Index.CheckIndex"/> with the <code>-verbose</code>
- /// option to see summary statistics on the blocks in the
- /// dictionary.
- ///
- /// See <seealso cref="BlockTreeTermsWriter"/>.
+ /// <para>Use <see cref="Lucene.Net.Index.CheckIndex"/> with the <c>-verbose</c>
+ /// option to see summary statistics on the blocks in the
+ /// dictionary.</para>
///
+ /// See <see cref="BlockTreeTermsWriter"/>.
+ /// <para/>
/// @lucene.experimental
/// </summary>
public class BlockTreeTermsReader : FieldsProducer
@@ -242,7 +242,7 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Seek {@code input} to the directory offset. </summary>
+ /// Seek <paramref name="input"/> to the directory offset. </summary>
protected internal virtual void SeekDir(IndexInput input, long dirOffset)
{
if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM)
@@ -263,6 +263,9 @@ namespace Lucene.Net.Codecs
// return "0x" + Integer.toHexString(v);
// }
+ /// <summary>
+ /// Disposes all resources used by this object.
+ /// </summary>
protected override void Dispose(bool disposing)
{
if (disposing)
@@ -323,7 +326,7 @@ namespace Lucene.Net.Codecs
/// <summary>
/// BlockTree statistics for a single field
- /// returned by <seealso cref="FieldReader#computeStats()"/>.
+ /// returned by <see cref="FieldReader.ComputeStats()"/>.
/// </summary>
public class Stats
{
@@ -353,7 +356,7 @@ namespace Lucene.Net.Codecs
/// <summary>
/// The number of floor blocks (meta-blocks larger than the
- /// allowed {@code maxItemsPerBlock}) in the terms file.
+ /// allowed <c>maxItemsPerBlock</c>) in the terms file.
/// </summary>
public int FloorBlockCount { get; set; }
@@ -403,14 +406,14 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Total number of bytes used to store term stats (not
- /// including what the <seealso cref="PostingsBaseFormat"/>
- /// stores.
+ /// including what the <see cref="PostingsBaseFormat"/>
+ /// stores.
/// </summary>
public long TotalBlockStatsBytes { get; set; }
/// <summary>
- /// Total bytes stored by the <seealso cref="PostingsBaseFormat"/>,
- /// plus the other few vInts stored in the frame.
+ /// Total bytes stored by the <see cref="PostingsBaseFormat"/>,
+ /// plus the other few vInts stored in the frame.
/// </summary>
public long TotalBlockOtherBytes { get; set; }
@@ -538,7 +541,7 @@ namespace Lucene.Net.Codecs
internal BytesRef NO_OUTPUT;
/// <summary>
- /// BlockTree's implementation of <seealso cref="GetTerms"/>. </summary>
+ /// BlockTree's implementation of <see cref="GetTerms(string)"/>. </summary>
public sealed class FieldReader : Terms
{
private readonly BlockTreeTermsReader outerInstance;
@@ -2808,16 +2811,17 @@ namespace Lucene.Net.Codecs
LoadBlock();
}
- /* Does initial decode of next block of terms; this
- doesn't actually decode the docFreq, totalTermFreq,
- postings details (frq/prx offset, etc.) metadata;
- it just loads them as byte[] blobs which are then
- decoded on-demand if the metadata is ever requested
- for any term in this block. this enables terms-only
- intensive consumes (eg certain MTQs, respelling) to
- not pay the price of decoding metadata they won't
- use. */
-
+ /// <summary>
+ /// Does initial decode of next block of terms; this
+ /// doesn't actually decode the docFreq, totalTermFreq,
+ /// postings details (frq/prx offset, etc.) metadata;
+ /// it just loads them as byte[] blobs which are then
+ /// decoded on-demand if the metadata is ever requested
+ /// for any term in this block. this enables terms-only
+ /// intensive consumes (eg certain MTQs, respelling) to
+ /// not pay the price of decoding metadata they won't
+ /// use.
+ /// </summary>
internal void LoadBlock()
{
// Clone the IndexInput lazily, so that consumers
@@ -3144,9 +3148,11 @@ namespace Lucene.Net.Codecs
return true;
}
- // Scans to sub-block that has this target fp; only
- // called by next(); NOTE: does not set
- // startBytePos/suffix as a side effect
+ /// <summary>
+ /// Scans to sub-block that has this target fp; only
+ /// called by Next(); NOTE: does not set
+ /// startBytePos/suffix as a side effect
+ /// </summary>
public void ScanToSubBlock(long subFP)
{
Debug.Assert(!isLeafBlock);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs b/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
index 4b5df96..a0aafe4 100644
--- a/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
+++ b/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
@@ -71,120 +71,121 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Block-based terms index and dictionary writer.
- /// <p>
+ /// <para/>
/// Writes terms dict and index, block-encoding (column
/// stride) each term's metadata for each set of terms
/// between two index terms.
- /// <p>
+ /// <para/>
/// Files:
- /// <ul>
- /// <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
- /// <li><tt>.tip</tt>: <a href="#Termindex">Term Index</a></li>
- /// </ul>
- /// <p>
+ /// <list type="bullet">
+ /// <item><term>.tim:</term> <description><a href="#Termdictionary">Term Dictionary</a></description></item>
+ /// <item><term>.tip:</term> <description><a href="#Termindex">Term Index</a></description></item>
+ /// </list>
+ /// <para/>
/// <a name="Termdictionary" id="Termdictionary"></a>
/// <h3>Term Dictionary</h3>
///
- /// <p>The .tim file contains the list of terms in each
+ /// <para>The .tim file contains the list of terms in each
/// field along with per-term statistics (such as docfreq)
/// and per-term metadata (typically pointers to the postings list
/// for that term in the inverted index).
- /// </p>
+ /// </para>
///
- /// <p>The .tim is arranged in blocks: with blocks containing
+ /// <para>The .tim is arranged in blocks: with blocks containing
/// a variable number of entries (by default 25-48), where
/// each entry is either a term or a reference to a
- /// sub-block.</p>
+ /// sub-block.</para>
///
- /// <p>NOTE: The term dictionary can plug into different postings implementations:
+ /// <para>NOTE: The term dictionary can plug into different postings implementations:
/// the postings writer/reader are actually responsible for encoding
- /// and decoding the Postings Metadata and Term Metadata sections.</p>
+ /// and decoding the Postings Metadata and Term Metadata sections.</para>
///
- /// <ul>
- /// <li>TermsDict (.tim) --> Header, <i>PostingsHeader</i>, NodeBlock<sup>NumBlocks</sup>,
- /// FieldSummary, DirOffset, Footer</li>
- /// <li>NodeBlock --> (OuterNode | InnerNode)</li>
- /// <li>OuterNode --> EntryCount, SuffixLength, Byte<sup>SuffixLength</sup>, StatsLength, < TermStats ><sup>EntryCount</sup>, MetaLength, <<i>TermMetadata</i>><sup>EntryCount</sup></li>
- /// <li>InnerNode --> EntryCount, SuffixLength[,Sub?], Byte<sup>SuffixLength</sup>, StatsLength, < TermStats ? ><sup>EntryCount</sup>, MetaLength, <<i>TermMetadata ? </i>><sup>EntryCount</sup></li>
- /// <li>TermStats --> DocFreq, TotalTermFreq </li>
- /// <li>FieldSummary --> NumFields, <FieldNumber, NumTerms, RootCodeLength, Byte<sup>RootCodeLength</sup>,
- /// SumTotalTermFreq?, SumDocFreq, DocCount><sup>NumFields</sup></li>
- /// <li>Header --> <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
- /// <li>DirOffset --> <seealso cref="DataOutput#writeLong Uint64"/></li>
- /// <li>EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength,NumFields,
- /// FieldNumber,RootCodeLength,DocCount --> <seealso cref="DataOutput#writeVInt VInt"/></li>
- /// <li>TotalTermFreq,NumTerms,SumTotalTermFreq,SumDocFreq -->
- /// <seealso cref="DataOutput#writeVLong VLong"/></li>
- /// <li>Footer --> <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
- /// </ul>
- /// <p>Notes:</p>
- /// <ul>
- /// <li>Header is a <seealso cref="CodecUtil#writeHeader CodecHeader"/> storing the version information
- /// for the BlockTree implementation.</li>
- /// <li>DirOffset is a pointer to the FieldSummary section.</li>
- /// <li>DocFreq is the count of documents which contain the term.</li>
- /// <li>TotalTermFreq is the total number of occurrences of the term. this is encoded
- /// as the difference between the total number of occurrences and the DocFreq.</li>
- /// <li>FieldNumber is the fields number from <seealso cref="fieldInfos"/>. (.fnm)</li>
- /// <li>NumTerms is the number of unique terms for the field.</li>
- /// <li>RootCode points to the root block for the field.</li>
- /// <li>SumDocFreq is the total number of postings, the number of term-document pairs across
- /// the entire field.</li>
- /// <li>DocCount is the number of documents that have at least one posting for this field.</li>
- /// <li>PostingsHeader and TermMetadata are plugged into by the specific postings implementation:
+ /// <list type="bullet">
+ /// <item><description>TermsDict (.tim) --> Header, <i>PostingsHeader</i>, NodeBlock<sup>NumBlocks</sup>,
+ /// FieldSummary, DirOffset, Footer</description></item>
+ /// <item><description>NodeBlock --> (OuterNode | InnerNode)</description></item>
+ /// <item><description>OuterNode --> EntryCount, SuffixLength, Byte<sup>SuffixLength</sup>, StatsLength, < TermStats ><sup>EntryCount</sup>, MetaLength, <<i>TermMetadata</i>><sup>EntryCount</sup></description></item>
+ /// <item><description>InnerNode --> EntryCount, SuffixLength[,Sub?], Byte<sup>SuffixLength</sup>, StatsLength, < TermStats ? ><sup>EntryCount</sup>, MetaLength, <<i>TermMetadata ? </i>><sup>EntryCount</sup></description></item>
+ /// <item><description>TermStats --> DocFreq, TotalTermFreq </description></item>
+ /// <item><description>FieldSummary --> NumFields, <FieldNumber, NumTerms, RootCodeLength, Byte<sup>RootCodeLength</sup>,
+ /// SumTotalTermFreq?, SumDocFreq, DocCount><sup>NumFields</sup></description></item>
+ /// <item><description>Header --> CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/></description></item>
+ /// <item><description>DirOffset --> Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>)</description></item>
+ /// <item><description>EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength,NumFields,
+ /// FieldNumber,RootCodeLength,DocCount --> VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>_</description></item>
+ /// <item><description>TotalTermFreq,NumTerms,SumTotalTermFreq,SumDocFreq -->
+ /// VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>)</description></item>
+ /// <item><description>Footer --> CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>)</description></item>
+ /// </list>
+ /// <para>Notes:</para>
+ /// <list type="bullet">
+ /// <item><description>Header is a CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) storing the version information
+ /// for the BlockTree implementation.</description></item>
+ /// <item><description>DirOffset is a pointer to the FieldSummary section.</description></item>
+ /// <item><description>DocFreq is the count of documents which contain the term.</description></item>
+ /// <item><description>TotalTermFreq is the total number of occurrences of the term. this is encoded
+ /// as the difference between the total number of occurrences and the DocFreq.</description></item>
+ /// <item><description>FieldNumber is the fields number from <see cref="fieldInfos"/>. (.fnm)</description></item>
+ /// <item><description>NumTerms is the number of unique terms for the field.</description></item>
+ /// <item><description>RootCode points to the root block for the field.</description></item>
+ /// <item><description>SumDocFreq is the total number of postings, the number of term-document pairs across
+ /// the entire field.</description></item>
+ /// <item><description>DocCount is the number of documents that have at least one posting for this field.</description></item>
+ /// <item><description>PostingsHeader and TermMetadata are plugged into by the specific postings implementation:
/// these contain arbitrary per-file data (such as parameters or versioning information)
- /// and per-term data (such as pointers to inverted files).</li>
- /// <li>For inner nodes of the tree, every entry will steal one bit to mark whether it points
- /// to child nodes(sub-block). If so, the corresponding TermStats and TermMetaData are omitted </li>
- /// </ul>
+ /// and per-term data (such as pointers to inverted files).</description></item>
+ /// <item><description>For inner nodes of the tree, every entry will steal one bit to mark whether it points
+ /// to child nodes(sub-block). If so, the corresponding <see cref="TermStats"/> and TermMetadata are omitted </description></item>
+ /// </list>
/// <a name="Termindex" id="Termindex"></a>
/// <h3>Term Index</h3>
- /// <p>The .tip file contains an index into the term dictionary, so that it can be
+ /// <para>The .tip file contains an index into the term dictionary, so that it can be
/// accessed randomly. The index is also used to determine
- /// when a given term cannot exist on disk (in the .tim file), saving a disk seek.</p>
- /// <ul>
- /// <li>TermsIndex (.tip) --> Header, FSTIndex<sup>NumFields</sup>
- /// <IndexStartFP><sup>NumFields</sup>, DirOffset, Footer</li>
- /// <li>Header --> <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
- /// <li>DirOffset --> <seealso cref="DataOutput#writeLong Uint64"/></li>
- /// <li>IndexStartFP --> <seealso cref="DataOutput#writeVLong VLong"/></li>
+ /// when a given term cannot exist on disk (in the .tim file), saving a disk seek.</para>
+ /// <list type="bullet">
+ /// <item><description>TermsIndex (.tip) --> Header, FSTIndex<sup>NumFields</sup>
+ /// <IndexStartFP><sup>NumFields</sup>, DirOffset, Footer</description></item>
+ /// <item><description>Header --> CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>)</description></item>
+ /// <item><description>DirOffset --> Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/></description>)</item>
+ /// <item><description>IndexStartFP --> VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/></description>)</item>
/// <!-- TODO: better describe FST output here -->
- /// <li>FSTIndex --> <seealso cref="FST FST<byte[]>"/></li>
- /// <li>Footer --> <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
- /// </ul>
- /// <p>Notes:</p>
- /// <ul>
- /// <li>The .tip file contains a separate FST for each
+ /// <item><description>FSTIndex --> <see cref="T:FST{byte[]}"/></description></item>
+ /// <item><description>Footer --> CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/></description></item>
+ /// </list>
+ /// <para>Notes:</para>
+ /// <list type="bullet">
+ /// <item><description>The .tip file contains a separate FST for each
/// field. The FST maps a term prefix to the on-disk
/// block that holds all terms starting with that
/// prefix. Each field's IndexStartFP points to its
- /// FST.</li>
- /// <li>DirOffset is a pointer to the start of the IndexStartFPs
- /// for all fields</li>
- /// <li>It's possible that an on-disk block would contain
+ /// FST.</description></item>
+ /// <item><description>DirOffset is a pointer to the start of the IndexStartFPs
+ /// for all fields</description></item>
+ /// <item><description>It's possible that an on-disk block would contain
/// too many terms (more than the allowed maximum
/// (default: 48)). When this happens, the block is
/// sub-divided into new blocks (called "floor
/// blocks"), and then the output in the FST for the
/// block's prefix encodes the leading byte of each
- /// sub-block, and its file pointer.
- /// </ul>
+ /// sub-block, and its file pointer.</description></item>
+ /// </list>
+ /// <para/>
+ /// @lucene.experimental
/// </summary>
- /// <seealso cref= BlockTreeTermsReader
- /// @lucene.experimental </seealso>
+ /// <seealso cref="BlockTreeTermsReader"/>
public class BlockTreeTermsWriter : FieldsConsumer
{
/// <summary>
- /// Suggested default value for the {@code
- /// minItemsInBlock} parameter to {@link
- /// #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}.
+ /// Suggested default value for the
+ /// <c>minItemsInBlock</c> parameter to
+ /// <see cref="BlockTreeTermsWriter(SegmentWriteState, PostingsWriterBase, int, int)"/>.
/// </summary>
public const int DEFAULT_MIN_BLOCK_SIZE = 25;
/// <summary>
- /// Suggested default value for the {@code
- /// maxItemsInBlock} parameter to {@link
- /// #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}.
+ /// Suggested default value for the
+ /// <c>maxItemsInBlock</c> parameter to
+ /// <see cref="BlockTreeTermsWriter(SegmentWriteState, PostingsWriterBase, int, int)"/>.
/// </summary>
public const int DEFAULT_MAX_BLOCK_SIZE = 48;
@@ -197,7 +198,7 @@ namespace Lucene.Net.Codecs
internal const int OUTPUT_FLAG_HAS_TERMS = 0x2;
/// <summary>
- /// Extension of terms file </summary>
+ /// Extension of terms file. </summary>
internal const string TERMS_EXTENSION = "tim";
internal const string TERMS_CODEC_NAME = "BLOCK_TREE_TERMS_DICT";
@@ -211,11 +212,11 @@ namespace Lucene.Net.Codecs
public const int VERSION_APPEND_ONLY = 1;
/// <summary>
- /// Meta data as array </summary>
+ /// Meta data as array. </summary>
public const int VERSION_META_ARRAY = 2;
/// <summary>
- /// checksums </summary>
+ /// Checksums. </summary>
public const int VERSION_CHECKSUM = 3;
/// <summary>
@@ -223,7 +224,7 @@ namespace Lucene.Net.Codecs
public const int VERSION_CURRENT = VERSION_CHECKSUM;
/// <summary>
- /// Extension of terms index file </summary>
+ /// Extension of terms index file. </summary>
internal const string TERMS_INDEX_EXTENSION = "tip";
internal const string TERMS_INDEX_CODEC_NAME = "BLOCK_TREE_TERMS_INDEX";
@@ -272,9 +273,9 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Create a new writer. The number of items (terms or
- /// sub-blocks) per block will aim to be between
- /// minItemsPerBlock and maxItemsPerBlock, though in some
- /// cases the blocks may be smaller than the min.
+ /// sub-blocks) per block will aim to be between
+ /// <paramref name="minItemsInBlock"/> and <paramref name="maxItemsInBlock"/>, though in some
+ /// cases the blocks may be smaller than the min.
/// </summary>
public BlockTreeTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter, int minItemsInBlock, int maxItemsInBlock)
{
@@ -1197,6 +1198,9 @@ namespace Lucene.Net.Codecs
internal readonly RAMOutputStream bytesWriter = new RAMOutputStream();
}
+ /// <summary>
+ /// Disposes all resources used by this object.
+ /// </summary>
protected override void Dispose(bool disposing)
{
if (disposing)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Codec.cs b/src/Lucene.Net/Codecs/Codec.cs
index a9f2448..680e490 100644
--- a/src/Lucene.Net/Codecs/Codec.cs
+++ b/src/Lucene.Net/Codecs/Codec.cs
@@ -98,7 +98,7 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Returns this codec's name </summary>
+ /// Returns this codec's name. </summary>
public string Name
{
get
@@ -108,46 +108,46 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Encodes/decodes postings </summary>
+ /// Encodes/decodes postings. </summary>
public abstract PostingsFormat PostingsFormat { get; }
/// <summary>
- /// Encodes/decodes docvalues </summary>
+ /// Encodes/decodes docvalues. </summary>
public abstract DocValuesFormat DocValuesFormat { get; }
/// <summary>
- /// Encodes/decodes stored fields </summary>
+ /// Encodes/decodes stored fields. </summary>
public abstract StoredFieldsFormat StoredFieldsFormat { get; }
/// <summary>
- /// Encodes/decodes term vectors </summary>
+ /// Encodes/decodes term vectors. </summary>
public abstract TermVectorsFormat TermVectorsFormat { get; }
/// <summary>
- /// Encodes/decodes field infos file </summary>
+ /// Encodes/decodes field infos file. </summary>
public abstract FieldInfosFormat FieldInfosFormat { get; }
/// <summary>
- /// Encodes/decodes segment info file </summary>
+ /// Encodes/decodes segment info file. </summary>
public abstract SegmentInfoFormat SegmentInfoFormat { get; }
/// <summary>
- /// Encodes/decodes document normalization values </summary>
+ /// Encodes/decodes document normalization values. </summary>
public abstract NormsFormat NormsFormat { get; }
/// <summary>
- /// Encodes/decodes live docs </summary>
+ /// Encodes/decodes live docs. </summary>
public abstract LiveDocsFormat LiveDocsFormat { get; }
/// <summary>
- /// looks up a codec by name </summary>
+ /// Looks up a codec by name. </summary>
public static Codec ForName(string name)
{
return codecFactory.GetCodec(name);
}
/// <summary>
- /// returns a list of all available codec names </summary>
+ /// Returns a list of all available codec names. </summary>
public static ICollection<string> AvailableCodecs()
{
if (codecFactory is IServiceListable)
@@ -166,8 +166,8 @@ namespace Lucene.Net.Codecs
private static Codec defaultCodec;
/// <summary>
- /// expert: returns the default codec used for newly created
- /// <seealso cref="IndexWriterConfig"/>s.
+ /// Expert: returns the default codec used for newly created
+ /// <seealso cref="Index.IndexWriterConfig"/>s.
/// </summary>
// TODO: should we use this, or maybe a system property is better?
public static Codec Default
@@ -189,7 +189,7 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// returns the codec's name. Subclasses can override to provide
+ /// Returns the codec's name. Subclasses can override to provide
/// more detail (such as parameters).
/// </summary>
public override string ToString()
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/CodecUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/CodecUtil.cs b/src/Lucene.Net/Codecs/CodecUtil.cs
index 221fd72..a6dd3af 100644
--- a/src/Lucene.Net/Codecs/CodecUtil.cs
+++ b/src/Lucene.Net/Codecs/CodecUtil.cs
@@ -24,13 +24,12 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Utility class for reading and writing versioned headers.
- /// <p>
+ /// <para/>
/// Writing codec headers is useful to ensure that a file is in
/// the format you think it is.
- ///
+ /// <para/>
/// @lucene.experimental
/// </summary>
-
public sealed class CodecUtil
{
private CodecUtil() // no instance
@@ -49,29 +48,29 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Writes a codec header, which records both a string to
- /// identify the file and a version number. this header can
+ /// identify the file and a version number. This header can
/// be parsed and validated with
- /// <seealso cref="#checkHeader(DataInput, String, int, int) checkHeader()"/>.
- /// <p>
+ /// <see cref="CheckHeader(DataInput, string, int, int)"/>.
+ /// <para/>
/// CodecHeader --> Magic,CodecName,Version
- /// <ul>
- /// <li>Magic --> <seealso cref="DataOutput#writeInt Uint32"/>. this
- /// identifies the start of the header. It is always {@value #CODEC_MAGIC}.
- /// <li>CodecName --> <seealso cref="DataOutput#writeString String"/>. this
- /// is a string to identify this file.
- /// <li>Version --> <seealso cref="DataOutput#writeInt Uint32"/>. Records
- /// the version of the file.
- /// </ul>
- /// <p>
+ /// <list type="bullet">
+ /// <item><description>Magic --> Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
+ /// identifies the start of the header. It is always <see cref="CODEC_MAGIC"/>.</description></item>
+ /// <item><description>CodecName --> String (<see cref="DataOutput.WriteString(string)"/>). this
+ /// is a string to identify this file.</description></item>
+ /// <item><description>Version --> Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). Records
+ /// the version of the file.</description></item>
+ /// </list>
+ /// <para/>
/// Note that the length of a codec header depends only upon the
/// name of the codec, so this length can be computed at any time
- /// with <seealso cref="#headerLength(String)"/>.
+ /// with <see cref="HeaderLength(string)"/>.
/// </summary>
/// <param name="out"> Output stream </param>
/// <param name="codec"> String to identify this file. It should be simple ASCII,
/// less than 128 characters in length. </param>
/// <param name="version"> Version number </param>
- /// <exception cref="IOException"> If there is an I/O error writing to the underlying medium. </exception>
+ /// <exception cref="System.IO.IOException"> If there is an I/O error writing to the underlying medium. </exception>
public static void WriteHeader(DataOutput @out, string codec, int version)
{
BytesRef bytes = new BytesRef(codec);
@@ -88,8 +87,8 @@ namespace Lucene.Net.Codecs
/// Computes the length of a codec header.
/// </summary>
/// <param name="codec"> Codec name. </param>
- /// <returns> length of the entire codec header. </returns>
- /// <seealso cref= #writeHeader(DataOutput, String, int) </seealso>
+ /// <returns> Length of the entire codec header. </returns>
+ /// <seealso cref="WriteHeader(DataOutput, string, int)"/>
public static int HeaderLength(string codec)
{
return 9 + codec.Length;
@@ -97,10 +96,10 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Reads and validates a header previously written with
- /// <seealso cref="#writeHeader(DataOutput, String, int)"/>.
- /// <p>
- /// When reading a file, supply the expected <code>codec</code> and
- /// an expected version range (<code>minVersion to maxVersion</code>).
+ /// <see cref="WriteHeader(DataOutput, string, int)"/>.
+ /// <para/>
+ /// When reading a file, supply the expected <paramref name="codec"/> and
+ /// an expected version range (<paramref name="minVersion"/> to <paramref name="maxVersion"/>).
/// </summary>
/// <param name="in"> Input stream, positioned at the point where the
/// header was previously written. Typically this is located
@@ -109,18 +108,18 @@ namespace Lucene.Net.Codecs
/// <param name="minVersion"> The minimum supported expected version number. </param>
/// <param name="maxVersion"> The maximum supported expected version number. </param>
/// <returns> The actual version found, when a valid header is found
- /// that matches <code>codec</code>, with an actual version
- /// where <code>minVersion <= actual <= maxVersion</code>.
+ /// that matches <paramref name="codec"/>, with an actual version
+ /// where <c>minVersion <= actual <= maxVersion</c>.
/// Otherwise an exception is thrown. </returns>
- /// <exception cref="CorruptIndexException"> If the first four bytes are not
- /// <seealso cref="#CODEC_MAGIC"/>, or if the actual codec found is
- /// not <code>codec</code>. </exception>
- /// <exception cref="IndexFormatTooOldException"> If the actual version is less
- /// than <code>minVersion</code>. </exception>
- /// <exception cref="IndexFormatTooNewException"> If the actual version is greater
- /// than <code>maxVersion</code>. </exception>
- /// <exception cref="IOException"> If there is an I/O error reading from the underlying medium. </exception>
- /// <seealso cref= #writeHeader(DataOutput, String, int) </seealso>
+ /// <exception cref="Index.CorruptIndexException"> If the first four bytes are not
+ /// <see cref="CODEC_MAGIC"/>, or if the actual codec found is
+ /// not <paramref name="codec"/>. </exception>
+ /// <exception cref="Index.IndexFormatTooOldException"> If the actual version is less
+ /// than <paramref name="minVersion"/>. </exception>
+ /// <exception cref="Index.IndexFormatTooNewException"> If the actual version is greater
+ /// than <paramref name="maxVersion"/>. </exception>
+ /// <exception cref="System.IO.IOException"> If there is an I/O error reading from the underlying medium. </exception>
+ /// <seealso cref="WriteHeader(DataOutput, string, int)"/>
public static int CheckHeader(DataInput @in, string codec, int minVersion, int maxVersion)
{
// Safety to guard against reading a bogus string:
@@ -133,10 +132,10 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Like {@link
- /// #checkHeader(DataInput,String,int,int)} except this
- /// version assumes the first int has already been read
- /// and validated from the input.
+ /// Like
+ /// <see cref="CheckHeader(DataInput,string,int,int)"/> except this
+ /// version assumes the first <see cref="int"/> has already been read
+ /// and validated from the input.
/// </summary>
public static int CheckHeaderNoMagic(DataInput @in, string codec, int minVersion, int maxVersion)
{
@@ -161,24 +160,24 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Writes a codec footer, which records both a checksum
- /// algorithm ID and a checksum. this footer can
+ /// algorithm ID and a checksum. This footer can
/// be parsed and validated with
- /// <seealso cref="#checkFooter(ChecksumIndexInput) checkFooter()"/>.
- /// <p>
+ /// <see cref="CheckFooter(ChecksumIndexInput)"/>.
+ /// <para/>
/// CodecFooter --> Magic,AlgorithmID,Checksum
- /// <ul>
- /// <li>Magic --> <seealso cref="DataOutput#writeInt Uint32"/>. this
- /// identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.
- /// <li>AlgorithmID --> <seealso cref="DataOutput#writeInt Uint32"/>. this
+ /// <list type="bullet">
+ /// <item><description>Magic --> Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
+ /// identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.</description></item>
+ /// <item><description>AlgorithmID --> Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
/// indicates the checksum algorithm used. Currently this is always 0,
- /// for zlib-crc32.
- /// <li>Checksum --> <seealso cref="DataOutput#writeLong Uint32"/>. The
+ /// for zlib-crc32.</description></item>
+ /// <item><description>Checksum --> Uint32 (<see cref="DataOutput.WriteInt64(long)"/>). The
/// actual checksum value for all previous bytes in the stream, including
- /// the bytes from Magic and AlgorithmID.
- /// </ul>
+ /// the bytes from Magic and AlgorithmID.</description></item>
+ /// </list>
/// </summary>
/// <param name="out"> Output stream </param>
- /// <exception cref="IOException"> If there is an I/O error writing to the underlying medium. </exception>
+ /// <exception cref="System.IO.IOException"> If there is an I/O error writing to the underlying medium. </exception>
public static void WriteFooter(IndexOutput @out)
{
@out.WriteInt32(FOOTER_MAGIC);
@@ -189,18 +188,18 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Computes the length of a codec footer.
/// </summary>
- /// <returns> length of the entire codec footer. </returns>
- /// <seealso cref= #writeFooter(IndexOutput) </seealso>
+ /// <returns> Length of the entire codec footer. </returns>
+ /// <seealso cref="WriteFooter(IndexOutput)"/>
public static int FooterLength()
{
return 16;
}
/// <summary>
- /// Validates the codec footer previously written by <seealso cref="#writeFooter"/>. </summary>
- /// <returns> actual checksum value </returns>
- /// <exception cref="IOException"> if the footer is invalid, if the checksum does not match,
- /// or if {@code in} is not properly positioned before the footer
+ /// Validates the codec footer previously written by <see cref="WriteFooter(IndexOutput)"/>. </summary>
+ /// <returns> Actual checksum value. </returns>
+ /// <exception cref="System.IO.IOException"> If the footer is invalid, if the checksum does not match,
+ /// or if <paramref name="in"/> is not properly positioned before the footer
/// at the end of the stream. </exception>
public static long CheckFooter(ChecksumIndexInput @in)
{
@@ -219,9 +218,9 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Returns (but does not validate) the checksum previously written by <seealso cref="#checkFooter"/>. </summary>
+ /// Returns (but does not validate) the checksum previously written by <see cref="CheckFooter(ChecksumIndexInput)"/>. </summary>
/// <returns> actual checksum value </returns>
- /// <exception cref="IOException"> if the footer is invalid </exception>
+ /// <exception cref="System.IO.IOException"> If the footer is invalid. </exception>
public static long RetrieveChecksum(IndexInput @in)
{
@in.Seek(@in.Length - FooterLength());
@@ -247,8 +246,7 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Checks that the stream is positioned at the end, and throws exception
/// if it is not. </summary>
- /// @deprecated Use <seealso cref="#checkFooter"/> instead, this should only used for files without checksums
- [Obsolete("Use CheckFooter() instead")]
+ [Obsolete("Use CheckFooter(ChecksumIndexInput) instead, this should only used for files without checksums.")]
public static void CheckEOF(IndexInput @in)
{
if (@in.GetFilePointer() != @in.Length)
@@ -258,10 +256,10 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Clones the provided input, reads all bytes from the file, and calls <seealso cref="#checkFooter"/>
- /// <p>
+ /// Clones the provided input, reads all bytes from the file, and calls <see cref="CheckFooter(ChecksumIndexInput)"/>
+ /// <para/>
/// Note that this method may be slow, as it must process the entire file.
- /// If you just need to extract the checksum value, call <seealso cref="#retrieveChecksum"/>.
+ /// If you just need to extract the checksum value, call <see cref="RetrieveChecksum(IndexInput)"/>.
/// </summary>
public static long ChecksumEntireFile(IndexInput input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesConsumer.cs b/src/Lucene.Net/Codecs/DocValuesConsumer.cs
index 999d719..f8b814c 100644
--- a/src/Lucene.Net/Codecs/DocValuesConsumer.cs
+++ b/src/Lucene.Net/Codecs/DocValuesConsumer.cs
@@ -42,27 +42,28 @@ namespace Lucene.Net.Codecs
/// sorted docvalues. Concrete implementations of this
/// actually do "something" with the docvalues (write it into
/// the index in a specific format).
- /// <p>
+ /// <para/>
/// The lifecycle is:
- /// <ol>
- /// <li>DocValuesConsumer is created by
- /// <seealso cref="DocValuesFormat#fieldsConsumer(SegmentWriteState)"/> or
- /// <seealso cref="NormsFormat#normsConsumer(SegmentWriteState)"/>.
- /// <li><seealso cref="#addNumericField"/>, <seealso cref="#addBinaryField"/>,
- /// or <seealso cref="#addSortedField"/> are called for each Numeric,
+ /// <list type="number">
+ /// <item><description>DocValuesConsumer is created by
+ /// <see cref="DocValuesFormat.FieldsConsumer(Index.SegmentWriteState)"/> or
+ /// <see cref="NormsFormat.NormsConsumer(Index.SegmentWriteState)"/>.</description></item>
+ /// <item><description><see cref="AddNumericField(FieldInfo, IEnumerable{long?})"/>,
+ /// <see cref="AddBinaryField(FieldInfo, IEnumerable{BytesRef})"/>,
+ /// or <see cref="AddSortedField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?})"/> are called for each Numeric,
/// Binary, or Sorted docvalues field. The API is a "pull" rather
/// than "push", and the implementation is free to iterate over the
- /// values multiple times (<seealso cref="Iterable#iterator()"/>).
- /// <li>After all fields are added, the consumer is <seealso cref="#close"/>d.
- /// </ol>
- ///
+ /// values multiple times (<see cref="IEnumerable{T}.GetEnumerator()"/>).</description></item>
+ /// <item><description>After all fields are added, the consumer is <see cref="Dispose()"/>d.</description></item>
+ /// </list>
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class DocValuesConsumer : IDisposable
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal DocValuesConsumer()
{
@@ -70,44 +71,44 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Writes numeric docvalues for a field. </summary>
- /// <param name="field"> field information </param>
- /// <param name="values"> Iterable of numeric values (one for each document). {@code null} indicates
+ /// <param name="field"> Field information. </param>
+ /// <param name="values"> <see cref="IEnumerable{T}"/> of numeric values (one for each document). <c>null</c> indicates
/// a missing value. </param>
- /// <exception cref="IOException"> if an I/O error occurred. </exception>
+ /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
public abstract void AddNumericField(FieldInfo field, IEnumerable<long?> values);
/// <summary>
/// Writes binary docvalues for a field. </summary>
- /// <param name="field"> field information </param>
- /// <param name="values"> Iterable of binary values (one for each document). {@code null} indicates
+ /// <param name="field"> Field information. </param>
+ /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values (one for each document). <c>null</c> indicates
/// a missing value. </param>
- /// <exception cref="IOException"> if an I/O error occurred. </exception>
+ /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
public abstract void AddBinaryField(FieldInfo field, IEnumerable<BytesRef> values);
/// <summary>
/// Writes pre-sorted binary docvalues for a field. </summary>
- /// <param name="field"> field information </param>
- /// <param name="values"> Iterable of binary values in sorted order (deduplicated). </param>
- /// <param name="docToOrd"> Iterable of ordinals (one for each document). {@code -1} indicates
+ /// <param name="field"> Field information. </param>
+ /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values in sorted order (deduplicated). </param>
+ /// <param name="docToOrd"> <see cref="IEnumerable{T}"/> of ordinals (one for each document). <c>-1</c> indicates
/// a missing value. </param>
- /// <exception cref="IOException"> if an I/O error occurred. </exception>
+ /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
public abstract void AddSortedField(FieldInfo field, IEnumerable<BytesRef> values, IEnumerable<long?> docToOrd);
/// <summary>
/// Writes pre-sorted set docvalues for a field </summary>
- /// <param name="field"> field information </param>
- /// <param name="values"> Iterable of binary values in sorted order (deduplicated). </param>
- /// <param name="docToOrdCount"> Iterable of the number of values for each document. A zero ordinal
+ /// <param name="field"> Field information. </param>
+ /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values in sorted order (deduplicated). </param>
+ /// <param name="docToOrdCount"> <see cref="IEnumerable{T}"/> of the number of values for each document. A zero ordinal
/// count indicates a missing value. </param>
- /// <param name="ords"> Iterable of ordinal occurrences (docToOrdCount*maxDoc total). </param>
- /// <exception cref="IOException"> if an I/O error occurred. </exception>
+ /// <param name="ords"> <see cref="IEnumerable{T}"/> of ordinal occurrences (<paramref name="docToOrdCount"/>*maxDoc total). </param>
+ /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
public abstract void AddSortedSetField(FieldInfo field, IEnumerable<BytesRef> values, IEnumerable<long?> docToOrdCount, IEnumerable<long?> ords);
/// <summary>
- /// Merges the numeric docvalues from <code>toMerge</code>.
- /// <p>
- /// The default implementation calls <seealso cref="#addNumericField"/>, passing
- /// an Iterable that merges and filters deleted documents on the fly.</p>
+ /// Merges the numeric docvalues from <paramref name="toMerge"/>.
+ /// <para>
+ /// The default implementation calls <see cref="AddNumericField(FieldInfo, IEnumerable{long?})"/>, passing
+ /// an <see cref="IEnumerable{T}"/> that merges and filters deleted documents on the fly.</para>
/// </summary>
public virtual void MergeNumericField(FieldInfo fieldInfo, MergeState mergeState, IList<NumericDocValues> toMerge, IList<IBits> docsWithField)
{
@@ -166,10 +167,10 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Merges the binary docvalues from <code>toMerge</code>.
- /// <p>
- /// The default implementation calls <seealso cref="#addBinaryField"/>, passing
- /// an Iterable that merges and filters deleted documents on the fly.
+ /// Merges the binary docvalues from <paramref name="toMerge"/>.
+ /// <para>
+ /// The default implementation calls <see cref="AddBinaryField(FieldInfo, IEnumerable{BytesRef})"/>, passing
+ /// an <see cref="IEnumerable{T}"/> that merges and filters deleted documents on the fly.</para>
/// </summary>
public virtual void MergeBinaryField(FieldInfo fieldInfo, MergeState mergeState, IList<BinaryDocValues> toMerge, IList<IBits> docsWithField)
{
@@ -229,10 +230,10 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Merges the sorted docvalues from <code>toMerge</code>.
- /// <p>
- /// The default implementation calls <seealso cref="#addSortedField"/>, passing
- /// an Iterable that merges ordinals and values and filters deleted documents.</p>
+ /// Merges the sorted docvalues from <paramref name="toMerge"/>.
+ /// <para>
+ /// The default implementation calls <see cref="AddSortedField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?})"/>, passing
+ /// an <see cref="IEnumerable{T}"/> that merges ordinals and values and filters deleted documents.</para>
/// </summary>
public virtual void MergeSortedField(FieldInfo fieldInfo, MergeState mergeState, IList<SortedDocValues> toMerge)
{
@@ -331,166 +332,11 @@ namespace Lucene.Net.Codecs
}
}
- /*
- private class IterableAnonymousInnerClassHelper3 : IEnumerable<BytesRef>
- {
- private readonly DocValuesConsumer OuterInstance;
-
- private SortedDocValues[] Dvs;
- private OrdinalMap Map;
-
- public IterableAnonymousInnerClassHelper3(DocValuesConsumer outerInstance, SortedDocValues[] dvs, OrdinalMap map)
- {
- this.OuterInstance = outerInstance;
- this.Dvs = dvs;
- this.Map = map;
- }
-
- // ord -> value
- public virtual IEnumerator<BytesRef> GetEnumerator()
- {
- return new IteratorAnonymousInnerClassHelper3(this);
- }
-
- private class IteratorAnonymousInnerClassHelper3 : IEnumerator<BytesRef>
- {
- private readonly IterableAnonymousInnerClassHelper3 OuterInstance;
-
- public IteratorAnonymousInnerClassHelper3(IterableAnonymousInnerClassHelper3 outerInstance)
- {
- this.OuterInstance = outerInstance;
- scratch = new BytesRef();
- }
-
- internal readonly BytesRef scratch;
- internal int currentOrd;
-
- public virtual bool HasNext()
- {
- return currentOrd < OuterInstance.Map.ValueCount;
- }
-
- public virtual BytesRef Next()
- {
- if (!HasNext())
- {
- throw new Exception();
- }
- int segmentNumber = OuterInstance.Map.GetFirstSegmentNumber(currentOrd);
- int segmentOrd = (int)OuterInstance.Map.GetFirstSegmentOrd(currentOrd);
- OuterInstance.Dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
- currentOrd++;
- return scratch;
- }
-
- public virtual void Remove()
- {
- throw new System.NotSupportedException();
- }
- }
- }
-
- private class IterableAnonymousInnerClassHelper4 : IEnumerable<Number>
- {
- private readonly DocValuesConsumer OuterInstance;
-
- private AtomicReader[] Readers;
- private SortedDocValues[] Dvs;
- private OrdinalMap Map;
-
- public IterableAnonymousInnerClassHelper4(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedDocValues[] dvs, OrdinalMap map)
- {
- this.OuterInstance = outerInstance;
- this.Readers = readers;
- this.Dvs = dvs;
- this.Map = map;
- }
-
- public virtual IEnumerator<Number> GetEnumerator()
- {
- return new IteratorAnonymousInnerClassHelper4(this);
- }
-
- private class IteratorAnonymousInnerClassHelper4 : IEnumerator<Number>
- {
- private readonly IterableAnonymousInnerClassHelper4 OuterInstance;
-
- public IteratorAnonymousInnerClassHelper4(IterableAnonymousInnerClassHelper4 outerInstance)
- {
- this.OuterInstance = outerInstance;
- readerUpto = -1;
- }
-
- internal int readerUpto;
- internal int docIDUpto;
- internal int nextValue;
- internal AtomicReader currentReader;
- internal Bits currentLiveDocs;
- internal bool nextIsSet;
-
- public virtual bool HasNext()
- {
- return nextIsSet || SetNext();
- }
-
- public virtual void Remove()
- {
- throw new System.NotSupportedException();
- }
-
- public virtual Number Next()
- {
- if (!HasNext())
- {
- throw new NoSuchElementException();
- }
- Debug.Assert(nextIsSet);
- nextIsSet = false;
- // TODO make a mutable number
- return nextValue;
- }
-
- private bool SetNext()
- {
- while (true)
- {
- if (readerUpto == OuterInstance.Readers.Length)
- {
- return false;
- }
-
- if (currentReader == null || docIDUpto == currentReader.MaxDoc)
- {
- readerUpto++;
- if (readerUpto < OuterInstance.Readers.Length)
- {
- currentReader = OuterInstance.Readers[readerUpto];
- currentLiveDocs = currentReader.LiveDocs;
- }
- docIDUpto = 0;
- continue;
- }
-
- if (currentLiveDocs == null || currentLiveDocs.get(docIDUpto))
- {
- nextIsSet = true;
- int segOrd = OuterInstance.Dvs[readerUpto].GetOrd(docIDUpto);
- nextValue = segOrd == -1 ? - 1 : (int) OuterInstance.Map.GetGlobalOrd(readerUpto, segOrd);
- docIDUpto++;
- return true;
- }
-
- docIDUpto++;
- }
- }
- }
- }*/
-
/// <summary>
- /// Merges the sortedset docvalues from <code>toMerge</code>.
- /// <p>
- /// The default implementation calls <seealso cref="#addSortedSetField"/>, passing
- /// an Iterable that merges ordinals and values and filters deleted documents .
+ /// Merges the sortedset docvalues from <paramref name="toMerge"/>.
+ /// <para>
+ /// The default implementation calls <see cref="AddSortedSetField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?}, IEnumerable{long?})"/>, passing
+ /// an <see cref="IEnumerable{T}"/> that merges ordinals and values and filters deleted documents.</para>
/// </summary>
public virtual void MergeSortedSetField(FieldInfo fieldInfo, MergeState mergeState, IList<SortedSetDocValues> toMerge)
{
@@ -659,283 +505,6 @@ namespace Lucene.Net.Codecs
}
}
- /*
- private class IterableAnonymousInnerClassHelper5 : IEnumerable<BytesRef>
- {
- private readonly DocValuesConsumer OuterInstance;
-
- private SortedSetDocValues[] Dvs;
- private OrdinalMap Map;
-
- public IterableAnonymousInnerClassHelper5(DocValuesConsumer outerInstance, SortedSetDocValues[] dvs, OrdinalMap map)
- {
- this.OuterInstance = outerInstance;
- this.Dvs = dvs;
- this.Map = map;
- }
-
- // ord -> value
- public virtual IEnumerator<BytesRef> GetEnumerator()
- {
- return new IteratorAnonymousInnerClassHelper5(this);
- }
-
- private class IteratorAnonymousInnerClassHelper5 : IEnumerator<BytesRef>
- {
- private readonly IterableAnonymousInnerClassHelper5 OuterInstance;
-
- public IteratorAnonymousInnerClassHelper5(IterableAnonymousInnerClassHelper5 outerInstance)
- {
- this.OuterInstance = outerInstance;
- scratch = new BytesRef();
- }
-
- internal readonly BytesRef scratch;
- internal long currentOrd;
-
- public virtual bool HasNext()
- {
- return currentOrd < OuterInstance.Map.ValueCount;
- }
-
- public virtual BytesRef Next()
- {
- if (!HasNext())
- {
- throw new Exception();
- }
- int segmentNumber = OuterInstance.Map.GetFirstSegmentNumber(currentOrd);
- long segmentOrd = OuterInstance.Map.GetFirstSegmentOrd(currentOrd);
- OuterInstance.Dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
- currentOrd++;
- return scratch;
- }
-
- public virtual void Remove()
- {
- throw new System.NotSupportedException();
- }
- }
- }
-
- private class IterableAnonymousInnerClassHelper6 : IEnumerable<Number>
- {
- private readonly DocValuesConsumer OuterInstance;
-
- private AtomicReader[] Readers;
- private SortedSetDocValues[] Dvs;
-
- public IterableAnonymousInnerClassHelper6(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedSetDocValues[] dvs)
- {
- this.OuterInstance = outerInstance;
- this.Readers = readers;
- this.Dvs = dvs;
- }
-
- public virtual IEnumerator<Number> GetEnumerator()
- {
- return new IteratorAnonymousInnerClassHelper6(this);
- }
-
- private class IteratorAnonymousInnerClassHelper6 : IEnumerator<Number>
- {
- private readonly IterableAnonymousInnerClassHelper6 OuterInstance;
-
- public IteratorAnonymousInnerClassHelper6(IterableAnonymousInnerClassHelper6 outerInstance)
- {
- this.OuterInstance = outerInstance;
- readerUpto = -1;
- }
-
- internal int readerUpto;
- internal int docIDUpto;
- internal int nextValue;
- internal AtomicReader currentReader;
- internal Bits currentLiveDocs;
- internal bool nextIsSet;
-
- public virtual bool HasNext()
- {
- return nextIsSet || SetNext();
- }
-
- public virtual void Remove()
- {
- throw new System.NotSupportedException();
- }
-
- public virtual Number Next()
- {
- if (!HasNext())
- {
- throw new Exception();
- }
- Debug.Assert(nextIsSet);
- nextIsSet = false;
- // TODO make a mutable number
- return nextValue;
- }
-
- private bool SetNext()
- {
- while (true)
- {
- if (readerUpto == OuterInstance.Readers.Length)
- {
- return false;
- }
-
- if (currentReader == null || docIDUpto == currentReader.MaxDoc)
- {
- readerUpto++;
- if (readerUpto < OuterInstance.Readers.Length)
- {
- currentReader = OuterInstance.Readers[readerUpto];
- currentLiveDocs = currentReader.LiveDocs;
- }
- docIDUpto = 0;
- continue;
- }
-
- if (currentLiveDocs == null || currentLiveDocs.Get(docIDUpto))
- {
- nextIsSet = true;
- SortedSetDocValues dv = OuterInstance.Dvs[readerUpto];
- dv.Document = docIDUpto;
- nextValue = 0;
- while (dv.NextOrd() != SortedSetDocValues.NO_MORE_ORDS)
- {
- nextValue++;
- }
- docIDUpto++;
- return true;
- }
-
- docIDUpto++;
- }
- }
- }
- }
-
- private class IterableAnonymousInnerClassHelper7 : IEnumerable<Number>
- {
- private readonly DocValuesConsumer OuterInstance;
-
- private AtomicReader[] Readers;
- private SortedSetDocValues[] Dvs;
- private OrdinalMap Map;
-
- public IterableAnonymousInnerClassHelper7(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedSetDocValues[] dvs, OrdinalMap map)
- {
- this.OuterInstance = outerInstance;
- this.Readers = readers;
- this.Dvs = dvs;
- this.Map = map;
- }
-
- public virtual IEnumerator<Number> GetEnumerator()
- {
- return new IteratorAnonymousInnerClassHelper7(this);
- }
-
- private class IteratorAnonymousInnerClassHelper7 : IEnumerator<Number>
- {
- private readonly IterableAnonymousInnerClassHelper7 OuterInstance;
-
- public IteratorAnonymousInnerClassHelper7(IterableAnonymousInnerClassHelper7 outerInstance)
- {
- this.OuterInstance = outerInstance;
- readerUpto = -1;
- ords = new long[8];
- }
-
- internal int readerUpto;
- internal int docIDUpto;
- internal long nextValue;
- internal AtomicReader currentReader;
- internal Bits currentLiveDocs;
- internal bool nextIsSet;
- internal long[] ords;
- internal int ordUpto;
- internal int ordLength;
-
- public virtual bool HasNext()
- {
- return nextIsSet || SetNext();
- }
-
- public virtual void Remove()
- {
- throw new System.NotSupportedException();
- }
-
- public virtual Number Next()
- {
- if (!HasNext())
- {
- throw new Exception();
- }
- Debug.Assert(nextIsSet);
- nextIsSet = false;
- // TODO make a mutable number
- return nextValue;
- }
-
- private bool SetNext()
- {
- while (true)
- {
- if (readerUpto == OuterInstance.Readers.Length)
- {
- return false;
- }
-
- if (ordUpto < ordLength)
- {
- nextValue = ords[ordUpto];
- ordUpto++;
- nextIsSet = true;
- return true;
- }
-
- if (currentReader == null || docIDUpto == currentReader.MaxDoc)
- {
- readerUpto++;
- if (readerUpto < OuterInstance.Readers.Length)
- {
- currentReader = OuterInstance.Readers[readerUpto];
- currentLiveDocs = currentReader.LiveDocs;
- }
- docIDUpto = 0;
- continue;
- }
-
- if (currentLiveDocs == null || currentLiveDocs.Get(docIDUpto))
- {
- Debug.Assert(docIDUpto < currentReader.MaxDoc);
- SortedSetDocValues dv = OuterInstance.Dvs[readerUpto];
- dv.Document = docIDUpto;
- ordUpto = ordLength = 0;
- long ord;
- while ((ord = dv.NextOrd()) != SortedSetDocValues.NO_MORE_ORDS)
- {
- if (ordLength == ords.Length)
- {
- ords = ArrayUtil.Grow(ords, ordLength + 1);
- }
- ords[ordLength] = OuterInstance.Map.GetGlobalOrd(readerUpto, ord);
- ordLength++;
- }
- docIDUpto++;
- continue;
- }
-
- docIDUpto++;
- }
- }
- }
- }*/
-
// TODO: seek-by-ord to nextSetBit
internal class BitsFilteredTermsEnum : FilteredTermsEnum
{
@@ -954,12 +523,18 @@ namespace Lucene.Net.Codecs
}
}
+ /// <summary>
+ /// Disposes all resources used by this object.
+ /// </summary>
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
+ /// <summary>
+ /// Implementations must override and should dispose all resources used by this instance.
+ /// </summary>
protected abstract void Dispose(bool disposing);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesFormat.cs b/src/Lucene.Net/Codecs/DocValuesFormat.cs
index 9ef0f4d..907aed9 100644
--- a/src/Lucene.Net/Codecs/DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/DocValuesFormat.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Codecs
/// Note, when extending this class, the name (<see cref="Name"/>) may
/// written into the index in certain configurations. In order for the segment
/// to be read, the name must resolve to your implementation via <see cref="ForName(string)"/>.
- /// this method uses <see cref="IDocValuesFormatFactory.GetDocValuesFormat(string)"/> to resolve format names.
+ /// This method uses <see cref="IDocValuesFormatFactory.GetDocValuesFormat(string)"/> to resolve format names.
/// <para/>
/// To implement your own format:
/// <list type="number">
@@ -123,8 +123,8 @@ namespace Lucene.Net.Codecs
/// NOTE: by the time this call returns, it must hold open any files it will
/// need to use; else, those files may be deleted. Additionally, required files
/// may be deleted during the execution of this call before there is a chance
- /// to open them. Under these circumstances an IOException should be thrown by
- /// the implementation. IOExceptions are expected and will automatically cause
+ /// to open them. Under these circumstances an <see cref="System.IO.IOException"/> should be thrown by
+ /// the implementation. <see cref="System.IO.IOException"/>s are expected and will automatically cause
/// a retry of the segment opening logic with the newly revised segments.
/// </summary>
public abstract DocValuesProducer FieldsProducer(SegmentReadState state);
@@ -147,14 +147,14 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// looks up a format by name </summary>
+ /// Looks up a format by name. </summary>
public static DocValuesFormat ForName(string name)
{
return docValuesFormatFactory.GetDocValuesFormat(name);
}
/// <summary>
- /// returns a list of all available format names </summary>
+ /// Returns a list of all available format names. </summary>
public static ICollection<string> AvailableDocValuesFormats()
{
if (docValuesFormatFactory is IServiceListable)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesProducer.cs b/src/Lucene.Net/Codecs/DocValuesProducer.cs
index c5f2605..900d1b3 100644
--- a/src/Lucene.Net/Codecs/DocValuesProducer.cs
+++ b/src/Lucene.Net/Codecs/DocValuesProducer.cs
@@ -29,74 +29,81 @@ namespace Lucene.Net.Codecs
/// <summary>
/// Abstract API that produces numeric, binary and
/// sorted docvalues.
- ///
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class DocValuesProducer : IDisposable
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal DocValuesProducer()
{
}
/// <summary>
- /// Returns <seealso cref="NumericDocValues"/> for this field.
- /// The returned instance need not be thread-safe: it will only be
- /// used by a single thread.
+ /// Returns <see cref="NumericDocValues"/> for this field.
+ /// The returned instance need not be thread-safe: it will only be
+ /// used by a single thread.
/// </summary>
public abstract NumericDocValues GetNumeric(FieldInfo field);
/// <summary>
- /// Returns <seealso cref="BinaryDocValues"/> for this field.
- /// The returned instance need not be thread-safe: it will only be
- /// used by a single thread.
+ /// Returns <see cref="BinaryDocValues"/> for this field.
+ /// The returned instance need not be thread-safe: it will only be
+ /// used by a single thread.
/// </summary>
public abstract BinaryDocValues GetBinary(FieldInfo field);
/// <summary>
- /// Returns <seealso cref="SortedDocValues"/> for this field.
- /// The returned instance need not be thread-safe: it will only be
- /// used by a single thread.
+ /// Returns <see cref="SortedDocValues"/> for this field.
+ /// The returned instance need not be thread-safe: it will only be
+ /// used by a single thread.
/// </summary>
public abstract SortedDocValues GetSorted(FieldInfo field);
/// <summary>
- /// Returns <seealso cref="SortedSetDocValues"/> for this field.
- /// The returned instance need not be thread-safe: it will only be
- /// used by a single thread.
+ /// Returns <see cref="SortedSetDocValues"/> for this field.
+ /// The returned instance need not be thread-safe: it will only be
+ /// used by a single thread.
/// </summary>
public abstract SortedSetDocValues GetSortedSet(FieldInfo field);
/// <summary>
- /// Returns a <seealso cref="IBits"/> at the size of <code>reader.maxDoc()</code>,
- /// with turned on bits for each docid that does have a value for this field.
- /// The returned instance need not be thread-safe: it will only be
- /// used by a single thread.
+ /// Returns a <see cref="IBits"/> at the size of <c>reader.MaxDoc</c>,
+ /// with turned on bits for each docid that does have a value for this field.
+ /// The returned instance need not be thread-safe: it will only be
+ /// used by a single thread.
/// </summary>
public abstract IBits GetDocsWithField(FieldInfo field);
/// <summary>
- /// Returns approximate RAM bytes used </summary>
+ /// Returns approximate RAM bytes used. </summary>
public abstract long RamBytesUsed();
/// <summary>
- /// Checks consistency of this producer
- /// <p>
+ /// Checks consistency of this producer.
+ /// <para/>
/// Note that this may be costly in terms of I/O, e.g.
/// may involve computing a checksum value against large data files.
+ /// <para/>
/// @lucene.internal
/// </summary>
public abstract void CheckIntegrity();
+ /// <summary>
+ /// Disposes all resources used by this object.
+ /// </summary>
public virtual void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
+ /// <summary>
+ /// Implementations must override and should dispose all resources used by this instance.
+ /// </summary>
protected abstract void Dispose(bool disposing);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosFormat.cs b/src/Lucene.Net/Codecs/FieldInfosFormat.cs
index a9f932c..30215f2 100644
--- a/src/Lucene.Net/Codecs/FieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosFormat.cs
@@ -20,28 +20,29 @@ namespace Lucene.Net.Codecs
using FieldInfos = Lucene.Net.Index.FieldInfos; // javadocs
/// <summary>
- /// Encodes/decodes <seealso cref="FieldInfos"/>
+ /// Encodes/decodes <see cref="FieldInfos"/>.
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class FieldInfosFormat
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal FieldInfosFormat()
{
}
/// <summary>
- /// Returns a <seealso cref="FieldInfosReader"/> to read field infos
- /// from the index
+ /// Returns a <see cref="Codecs.FieldInfosReader"/> to read field infos
+ /// from the index.
/// </summary>
public abstract FieldInfosReader FieldInfosReader { get; }
/// <summary>
- /// Returns a <seealso cref="FieldInfosWriter"/> to write field infos
- /// to the index
+ /// Returns a <see cref="Codecs.FieldInfosWriter"/> to write field infos
+ /// to the index.
/// </summary>
public abstract FieldInfosWriter FieldInfosWriter { get; }
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosReader.cs b/src/Lucene.Net/Codecs/FieldInfosReader.cs
index 3e86d1b..8014e33 100644
--- a/src/Lucene.Net/Codecs/FieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosReader.cs
@@ -22,22 +22,23 @@ namespace Lucene.Net.Codecs
using IOContext = Lucene.Net.Store.IOContext;
/// <summary>
- /// Codec API for reading <seealso cref="FieldInfos"/>.
+ /// Codec API for reading <see cref="FieldInfos"/>.
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class FieldInfosReader
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal FieldInfosReader()
{
}
/// <summary>
- /// Read the <seealso cref="FieldInfos"/> previously written with {@link
- /// FieldInfosWriter}.
+ /// Read the <see cref="FieldInfos"/> previously written with
+ /// <see cref="FieldInfosWriter"/>.
/// </summary>
public abstract FieldInfos Read(Directory directory, string segmentName, string segmentSuffix, IOContext iocontext);
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosWriter.cs b/src/Lucene.Net/Codecs/FieldInfosWriter.cs
index cd06e29..92bfe07 100644
--- a/src/Lucene.Net/Codecs/FieldInfosWriter.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosWriter.cs
@@ -22,22 +22,23 @@ namespace Lucene.Net.Codecs
using IOContext = Lucene.Net.Store.IOContext;
/// <summary>
- /// Codec API for writing <seealso cref="FieldInfos"/>.
+ /// Codec API for writing <see cref="FieldInfos"/>.
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class FieldInfosWriter
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal FieldInfosWriter()
{
}
/// <summary>
- /// Writes the provided <seealso cref="FieldInfos"/> to the
- /// directory.
+ /// Writes the provided <see cref="FieldInfos"/> to the
+ /// directory.
/// </summary>
public abstract void Write(Directory directory, string segmentName, string segmentSuffix, FieldInfos infos, IOContext context);
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldsConsumer.cs b/src/Lucene.Net/Codecs/FieldsConsumer.cs
index ee6d7cd..8c29e2e 100644
--- a/src/Lucene.Net/Codecs/FieldsConsumer.cs
+++ b/src/Lucene.Net/Codecs/FieldsConsumer.cs
@@ -32,30 +32,30 @@ namespace Lucene.Net.Codecs
/// payloads postings. Concrete implementations of this
/// actually do "something" with the postings (write it into
/// the index in a specific format).
- /// <p>
+ /// <para/>
/// The lifecycle is:
- /// <ol>
- /// <li>FieldsConsumer is created by
- /// <seealso cref="PostingsFormat#fieldsConsumer(SegmentWriteState)"/>.
- /// <li>For each field, <seealso cref="#addField(FieldInfo)"/> is called,
- /// returning a <seealso cref="TermsConsumer"/> for the field.
- /// <li>After all fields are added, the consumer is <seealso cref="#close"/>d.
- /// </ol>
- ///
+ /// <list type="number">
+ /// <item><description>FieldsConsumer is created by
+ /// <see cref="PostingsFormat.FieldsConsumer(Index.SegmentWriteState)"/>.</description></item>
+ /// <item><description>For each field, <see cref="AddField(FieldInfo)"/> is called,
+ /// returning a <see cref="TermsConsumer"/> for the field.</description></item>
+ /// <item><description>After all fields are added, the consumer is <see cref="Dispose()"/>d.</description></item>
+ /// </list>
+ /// <para/>
/// @lucene.experimental
/// </summary>
public abstract class FieldsConsumer : IDisposable
{
/// <summary>
/// Sole constructor. (For invocation by subclass
- /// constructors, typically implicit.)
+ /// constructors, typically implicit.)
/// </summary>
protected internal FieldsConsumer()
{
}
/// <summary>
- /// Add a new field </summary>
+ /// Add a new field. </summary>
public abstract TermsConsumer AddField(FieldInfo field);
/// <summary>
@@ -68,15 +68,16 @@ namespace Lucene.Net.Codecs
}
/// <summary>
- /// Called when we are done adding everything. </summary>
+ /// Implementations must override and should dispose all resources used by this instance.
+ /// </summary>
protected abstract void Dispose(bool disposing);
/// <summary>
- /// Called during merging to merge all <seealso cref="Fields"/> from
- /// sub-readers. this must recurse to merge all postings
- /// (terms, docs, positions, etc.). A {@link
- /// PostingsFormat} can override this default
- /// implementation to do its own merging.
+ /// Called during merging to merge all <see cref="Fields"/> from
+ /// sub-readers. this must recurse to merge all postings
+ /// (terms, docs, positions, etc.). A
+ /// <see cref="PostingsFormat"/> can override this default
+ /// implementation to do its own merging.
/// </summary>
public virtual void Merge(MergeState mergeState, Fields fields)
{