You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/01 22:48:57 UTC

[1/9] lucenenet git commit: SWEEP: Changed to in documentation comments

Repository: lucenenet
Updated Branches:
  refs/heads/master cfeaf2841 -> f43d23261


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs b/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
index c29c2f3..1146020 100644
--- a/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
@@ -30,16 +30,16 @@ namespace Lucene.Net.Search.Suggest.Fst
     /// <para>
     /// The construction step in the object finalizer works as follows:
     /// <list type="bullet">
-    /// <item>A set of input terms and their buckets is given.</item>
-    /// <item>All terms in the input are prefixed with a synthetic pseudo-character
+    /// <item><description>A set of input terms and their buckets is given.</description></item>
+    /// <item><description>All terms in the input are prefixed with a synthetic pseudo-character
     /// (code) of the weight bucket the term fell into. For example a term
     /// <c>abc</c> with a discretized weight equal '1' would become
-    /// <c>1abc</c>.</item>
-    /// <item>The terms are then sorted by their raw value of UTF-8 character values
-    /// (including the synthetic bucket code in front).</item>
-    /// <item>A finite state automaton (<see cref="FST"/>) is constructed from the input. The
+    /// <c>1abc</c>.</description></item>
+    /// <item><description>The terms are then sorted by their raw value of UTF-8 character values
+    /// (including the synthetic bucket code in front).</description></item>
+    /// <item><description>A finite state automaton (<see cref="FST"/>) is constructed from the input. The
     /// root node has arcs labeled with all possible weights. We cache all these
-    /// arcs, highest-weight first.</item>
+    /// arcs, highest-weight first.</description></item>
     /// </list>
     /// 
     /// </para>
@@ -47,21 +47,21 @@ namespace Lucene.Net.Search.Suggest.Fst
     /// At runtime, in <see cref="FSTCompletion.DoLookup(string, int)"/>, 
     /// the automaton is utilized as follows:
     /// <list type="bullet">
-    /// <item>For each possible term weight encoded in the automaton (cached arcs from
+    /// <item><description>For each possible term weight encoded in the automaton (cached arcs from
     /// the root above), starting with the highest one, we descend along the path of
     /// the input key. If the key is not a prefix of a sequence in the automaton
-    /// (path ends prematurely), we exit immediately -- no completions.</item>
-    /// <item>Otherwise, we have found an internal automaton node that ends the key.
+    /// (path ends prematurely), we exit immediately -- no completions.</description></item>
+    /// <item><description>Otherwise, we have found an internal automaton node that ends the key.
     /// <b>The entire subautomaton (all paths) starting from this node form the key's
     /// completions.</b> We start the traversal of this subautomaton. Every time we
     /// reach a final state (arc), we add a single suggestion to the list of results
     /// (the weight of this suggestion is constant and equal to the root path we
     /// started from). The tricky part is that because automaton edges are sorted and
     /// we scan depth-first, we can terminate the entire procedure as soon as we
-    /// collect enough suggestions the user requested.</item>
-    /// <item>In case the number of suggestions collected in the step above is still
+    /// collect enough suggestions the user requested.</description></item>
+    /// <item><description>In case the number of suggestions collected in the step above is still
     /// insufficient, we proceed to the next (smaller) weight leaving the root node
-    /// and repeat the same algorithm again.</item>
+    /// and repeat the same algorithm again.</description></item>
     /// </list>
     /// 
     /// <h2>Runtime behavior and performance characteristic</h2>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/Analyzer.cs b/src/Lucene.Net/Analysis/Analyzer.cs
index ba27c90..80e5ffb 100644
--- a/src/Lucene.Net/Analysis/Analyzer.cs
+++ b/src/Lucene.Net/Analysis/Analyzer.cs
@@ -45,22 +45,22 @@ namespace Lucene.Net.Analysis
     /// <para/>
     /// For some concrete implementations bundled with Lucene, look in the analysis modules:
     /// <list type="bullet">
-    ///   <item>Common:
-    ///       Analyzers for indexing content in different languages and domains.</item>
-    ///   <item>ICU:
-    ///       Exposes functionality from ICU to Apache Lucene.</item>
-    ///   <item>Kuromoji:
-    ///       Morphological analyzer for Japanese text.</item>
-    ///   <item>Morfologik:
-    ///       Dictionary-driven lemmatization for the Polish language.</item>
-    ///   <item>Phonetic:
-    ///       Analysis for indexing phonetic signatures (for sounds-alike search).</item>
-    ///   <item>Smart Chinese:
-    ///       Analyzer for Simplified Chinese, which indexes words.</item>
-    ///   <item>Stempel:
-    ///       Algorithmic Stemmer for the Polish Language.</item>
-    ///   <item>UIMA:
-    ///       Analysis integration with Apache UIMA.</item>
+    ///   <item><description>Common:
+    ///       Analyzers for indexing content in different languages and domains.</description></item>
+    ///   <item><description>ICU:
+    ///       Exposes functionality from ICU to Apache Lucene.</description></item>
+    ///   <item><description>Kuromoji:
+    ///       Morphological analyzer for Japanese text.</description></item>
+    ///   <item><description>Morfologik:
+    ///       Dictionary-driven lemmatization for the Polish language.</description></item>
+    ///   <item><description>Phonetic:
+    ///       Analysis for indexing phonetic signatures (for sounds-alike search).</description></item>
+    ///   <item><description>Smart Chinese:
+    ///       Analyzer for Simplified Chinese, which indexes words.</description></item>
+    ///   <item><description>Stempel:
+    ///       Algorithmic Stemmer for the Polish Language.</description></item>
+    ///   <item><description>UIMA:
+    ///       Analysis integration with Apache UIMA.</description></item>
     /// </list>
     /// </summary>
     public abstract class Analyzer : IDisposable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Analysis/Token.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/Token.cs b/src/Lucene.Net/Analysis/Token.cs
index 8e8cf07..be1938e 100644
--- a/src/Lucene.Net/Analysis/Token.cs
+++ b/src/Lucene.Net/Analysis/Token.cs
@@ -77,38 +77,38 @@ namespace Lucene.Net.Analysis
     /// for details.</para>
     /// <para>Typical Token reuse patterns:
     /// <list type="bullet">
-    ///     <item> Copying text from a string (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
+    ///     <item><description> Copying text from a string (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
     ///     <code>
     ///         return reusableToken.Reinit(string, startOffset, endOffset[, type]);
     ///     </code>
-    ///     </item>
-    ///     <item> Copying some text from a string (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
+    ///     </description></item>
+    ///     <item><description> Copying some text from a string (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
     ///     <code>
     ///         return reusableToken.Reinit(string, 0, string.Length, startOffset, endOffset[, type]);
     ///     </code>
-    ///     </item>
-    ///     <item> Copying text from char[] buffer (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
+    ///     </description></item>
+    ///     <item><description> Copying text from char[] buffer (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
     ///     <code>
     ///         return reusableToken.Reinit(buffer, 0, buffer.Length, startOffset, endOffset[, type]);
     ///     </code>
-    ///     </item>
-    ///     <item> Copying some text from a char[] buffer (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
+    ///     </description></item>
+    ///     <item><description> Copying some text from a char[] buffer (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
     ///     <code>
     ///         return reusableToken.Reinit(buffer, start, end - start, startOffset, endOffset[, type]);
     ///     </code>
-    ///     </item>
-    ///     <item> Copying from one one <see cref="Token"/> to another (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
+    ///     </description></item>
+    ///     <item><description> Copying from one one <see cref="Token"/> to another (type is reset to <see cref="TypeAttribute.DEFAULT_TYPE"/> if not specified):
     ///     <code>
     ///         return reusableToken.Reinit(source.Buffer, 0, source.Length, source.StartOffset, source.EndOffset[, source.Type]);
     ///     </code>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// A few things to note:
     /// <list type="bullet">
-    ///     <item><see cref="Clear()"/> initializes all of the fields to default values. this was changed in contrast to Lucene 2.4, but should affect no one.</item>
-    ///     <item>Because <see cref="TokenStream"/>s can be chained, one cannot assume that the <see cref="Token"/>'s current type is correct.</item>
-    ///     <item>The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.</item>
-    ///     <item>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</item>
+    ///     <item><description><see cref="Clear()"/> initializes all of the fields to default values. this was changed in contrast to Lucene 2.4, but should affect no one.</description></item>
+    ///     <item><description>Because <see cref="TokenStream"/>s can be chained, one cannot assume that the <see cref="Token"/>'s current type is correct.</description></item>
+    ///     <item><description>The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.</description></item>
+    ///     <item><description>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/TokenAttributes/IPositionIncrementAttribute.cs b/src/Lucene.Net/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
index 3d47b7d..f3adee1 100644
--- a/src/Lucene.Net/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
+++ b/src/Lucene.Net/Analysis/TokenAttributes/IPositionIncrementAttribute.cs
@@ -29,19 +29,19 @@ namespace Lucene.Net.Analysis.TokenAttributes
     /// <para/>Some common uses for this are:
     /// 
     /// <list type="bullet">
-    /// <item>Set it to zero to put multiple terms in the same position.  this is
+    /// <item><description>Set it to zero to put multiple terms in the same position.  this is
     /// useful if, e.g., a word has multiple stems.  Searches for phrases
     /// including either stem will match.  In this case, all but the first stem's
     /// increment should be set to zero: the increment of the first instance
     /// should be one.  Repeating a token with an increment of zero can also be
-    /// used to boost the scores of matches on that token.</item>
+    /// used to boost the scores of matches on that token.</description></item>
     ///
-    /// <item>Set it to values greater than one to inhibit exact phrase matches.
+    /// <item><description>Set it to values greater than one to inhibit exact phrase matches.
     /// If, for example, one does not want phrases to match across removed stop
     /// words, then one could build a stop word filter that removes stop words and
     /// also sets the increment to the number of stop words removed before each
     /// non-stop word.  Then exact phrase queries will only match when the terms
-    /// occur with no intervening stop words.</item>
+    /// occur with no intervening stop words.</description></item>
     /// </list>
     /// </summary>
     /// <seealso cref="Lucene.Net.Index.DocsAndPositionsEnum"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Analysis/TokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/TokenStream.cs b/src/Lucene.Net/Analysis/TokenStream.cs
index 7cec955..f9ec60f 100644
--- a/src/Lucene.Net/Analysis/TokenStream.cs
+++ b/src/Lucene.Net/Analysis/TokenStream.cs
@@ -31,9 +31,9 @@ namespace Lucene.Net.Analysis
     /// <para/>
     /// this is an abstract class; concrete subclasses are:
     /// <list type="bullet">
-    ///     <item><see cref="Tokenizer"/>, a <see cref="TokenStream"/> whose input is a <see cref="System.IO.TextReader"/>; and</item>
-    ///     <item><see cref="TokenFilter"/>, a <see cref="TokenStream"/> whose input is another
-    ///         <see cref="TokenStream"/>.</item>
+    ///     <item><description><see cref="Tokenizer"/>, a <see cref="TokenStream"/> whose input is a <see cref="System.IO.TextReader"/>; and</description></item>
+    ///     <item><description><see cref="TokenFilter"/>, a <see cref="TokenStream"/> whose input is another
+    ///         <see cref="TokenStream"/>.</description></item>
     /// </list>
     /// A new <see cref="TokenStream"/> API has been introduced with Lucene 2.9. this API
     /// has moved from being <see cref="Token"/>-based to <see cref="Util.IAttribute"/>-based. While
@@ -49,17 +49,17 @@ namespace Lucene.Net.Analysis
     /// <para/>
     /// <b>The workflow of the new <see cref="TokenStream"/> API is as follows:</b>
     /// <list type="number">
-    ///     <item>Instantiation of <see cref="TokenStream"/>/<see cref="TokenFilter"/>s which add/get
-    ///         attributes to/from the <see cref="AttributeSource"/>.</item>
-    ///     <item>The consumer calls <see cref="TokenStream.Reset()"/>.</item>
-    ///     <item>The consumer retrieves attributes from the stream and stores local
-    ///         references to all attributes it wants to access.</item>
-    ///     <item>The consumer calls <see cref="IncrementToken()"/> until it returns false
-    ///         consuming the attributes after each call.</item>
-    ///     <item>The consumer calls <see cref="End()"/> so that any end-of-stream operations
-    ///         can be performed.</item>
-    ///     <item>The consumer calls <see cref="Dispose()"/> to release any resource when finished
-    ///         using the <see cref="TokenStream"/>.</item>
+    ///     <item><description>Instantiation of <see cref="TokenStream"/>/<see cref="TokenFilter"/>s which add/get
+    ///         attributes to/from the <see cref="AttributeSource"/>.</description></item>
+    ///     <item><description>The consumer calls <see cref="TokenStream.Reset()"/>.</description></item>
+    ///     <item><description>The consumer retrieves attributes from the stream and stores local
+    ///         references to all attributes it wants to access.</description></item>
+    ///     <item><description>The consumer calls <see cref="IncrementToken()"/> until it returns false
+    ///         consuming the attributes after each call.</description></item>
+    ///     <item><description>The consumer calls <see cref="End()"/> so that any end-of-stream operations
+    ///         can be performed.</description></item>
+    ///     <item><description>The consumer calls <see cref="Dispose()"/> to release any resource when finished
+    ///         using the <see cref="TokenStream"/>.</description></item>
     /// </list>
     /// To make sure that filters and consumers know which attributes are available,
     /// the attributes must be added during instantiation. Filters and consumers are

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Codecs/Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Codec.cs b/src/Lucene.Net/Codecs/Codec.cs
index e70cd5e..a9f2448 100644
--- a/src/Lucene.Net/Codecs/Codec.cs
+++ b/src/Lucene.Net/Codecs/Codec.cs
@@ -31,13 +31,13 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// To implement your own codec:
     /// <list type="number">
-    ///     <item>Subclass this class.</item>
-    ///     <item>Subclass <see cref="DefaultCodecFactory"/>, override the <see cref="DefaultCodecFactory.Initialize()"/> method,
+    ///     <item><description>Subclass this class.</description></item>
+    ///     <item><description>Subclass <see cref="DefaultCodecFactory"/>, override the <see cref="DefaultCodecFactory.Initialize()"/> method,
     ///         and add the line <c>base.ScanForCodecs(typeof(YourCodec).GetTypeInfo().Assembly)</c>. 
     ///         If you have any codec classes in your assembly 
     ///         that are not meant for reading, you can add the <see cref="ExcludeCodecFromScanAttribute"/> 
-    ///         to them so they are ignored by the scan.</item>
-    ///     <item>set the new <see cref="ICodecFactory"/> by calling <see cref="SetCodecFactory"/> at application startup.</item>
+    ///         to them so they are ignored by the scan.</description></item>
+    ///     <item><description>set the new <see cref="ICodecFactory"/> by calling <see cref="SetCodecFactory"/> at application startup.</description></item>
     /// </list>
     /// If your codec has dependencies, you may also override <see cref="DefaultCodecFactory.GetCodec(Type)"/> to inject 
     /// them via pure DI or a DI container. See <a href="http://blog.ploeh.dk/2014/05/19/di-friendly-framework/">DI-Friendly Framework</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Codecs/DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesFormat.cs b/src/Lucene.Net/Codecs/DocValuesFormat.cs
index 813d9a1..9ef0f4d 100644
--- a/src/Lucene.Net/Codecs/DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/DocValuesFormat.cs
@@ -34,14 +34,14 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// To implement your own format:
     /// <list type="number">
-    ///     <item>Subclass this class.</item>
-    ///     <item>Subclass <see cref="DefaultDocValuesFormatFactory"/>, override the <see cref="DefaultDocValuesFormatFactory.Initialize()"/> method,
+    ///     <item><description>Subclass this class.</description></item>
+    ///     <item><description>Subclass <see cref="DefaultDocValuesFormatFactory"/>, override the <see cref="DefaultDocValuesFormatFactory.Initialize()"/> method,
     ///         and add the line <c>base.ScanForDocValuesFormats(typeof(YourDocValuesFormat).GetTypeInfo().Assembly)</c>. 
     ///         If you have any format classes in your assembly 
     ///         that are not meant for reading, you can add the <see cref="ExcludeDocValuesFormatFromScanAttribute"/> 
-    ///         to them so they are ignored by the scan.</item>
-    ///     <item>Set the new <see cref="IDocValuesFormatFactory"/> by calling <see cref="SetDocValuesFormatFactory(IDocValuesFormatFactory)"/>
-    ///         at application startup.</item>
+    ///         to them so they are ignored by the scan.</description></item>
+    ///     <item><description>Set the new <see cref="IDocValuesFormatFactory"/> by calling <see cref="SetDocValuesFormatFactory(IDocValuesFormatFactory)"/>
+    ///         at application startup.</description></item>
     /// </list>
     /// If your format has dependencies, you may also override <see cref="DefaultDocValuesFormatFactory.GetDocValuesFormat(Type)"/>
     /// to inject them via pure DI or a DI container. See <a href="http://blog.ploeh.dk/2014/05/19/di-friendly-framework/">DI-Friendly Framework</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Codecs/PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsFormat.cs b/src/Lucene.Net/Codecs/PostingsFormat.cs
index 2a74ea6..bc34e65 100644
--- a/src/Lucene.Net/Codecs/PostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/PostingsFormat.cs
@@ -32,14 +32,14 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// If you implement your own format:
     /// <list type="number">
-    ///     <item>Subclass this class.</item>
-    ///     <item>Subclass <see cref="DefaultPostingsFormatFactory"/>, override <see cref="DefaultPostingsFormatFactory.Initialize()"/>,
+    ///     <item><description>Subclass this class.</description></item>
+    ///     <item><description>Subclass <see cref="DefaultPostingsFormatFactory"/>, override <see cref="DefaultPostingsFormatFactory.Initialize()"/>,
     ///         and add the line <c>base.ScanForPostingsFormats(typeof(YourPostingsFormat).GetTypeInfo().Assembly)</c>. 
     ///         If you have any format classes in your assembly 
     ///         that are not meant for reading, you can add the <see cref="ExcludePostingsFormatFromScanAttribute"/> 
-    ///         to them so they are ignored by the scan.</item>
-    ///     <item>Set the new <see cref="IPostingsFormatFactory"/> by calling <see cref="SetPostingsFormatFactory(IPostingsFormatFactory)"/> 
-    ///         at application startup.</item>
+    ///         to them so they are ignored by the scan.</description></item>
+    ///     <item><description>Set the new <see cref="IPostingsFormatFactory"/> by calling <see cref="SetPostingsFormatFactory(IPostingsFormatFactory)"/> 
+    ///         at application startup.</description></item>
     /// </list>
     /// If your format has dependencies, you may also override <see cref="DefaultPostingsFormatFactory.GetPostingsFormat(Type)"/> to inject 
     /// them via pure DI or a DI container. See <a href="http://blog.ploeh.dk/2014/05/19/di-friendly-framework/">DI-Friendly Framework</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Document/Field.cs b/src/Lucene.Net/Document/Field.cs
index fbd7d13..54fe113 100644
--- a/src/Lucene.Net/Document/Field.cs
+++ b/src/Lucene.Net/Document/Field.cs
@@ -920,8 +920,8 @@ namespace Lucene.Net.Documents
         /// <exception cref="ArgumentNullException">if <paramref name="name"/> or <paramref name="value"/> is <c>null</c></exception>
         /// <exception cref="ArgumentException">in any of the following situations:
         /// <list type="bullet">
-        ///     <item>the field is neither stored nor indexed</item>
-        ///     <item>the field is not indexed but termVector is <see cref="TermVector.YES"/></item>
+        ///     <item><description>the field is neither stored nor indexed</description></item>
+        ///     <item><description>the field is not indexed but termVector is <see cref="TermVector.YES"/></description></item>
         /// </list>
         /// </exception>
         [Obsolete("Use StringField, TextField instead.")]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/AutomatonTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/AutomatonTermsEnum.cs b/src/Lucene.Net/Index/AutomatonTermsEnum.cs
index e969235..e60a975 100644
--- a/src/Lucene.Net/Index/AutomatonTermsEnum.cs
+++ b/src/Lucene.Net/Index/AutomatonTermsEnum.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Index
     /// <para/>
     /// The algorithm is such:
     /// <list type="number">
-    ///     <item>As long as matches are successful, keep reading sequentially.</item>
-    ///     <item>When a match fails, skip to the next string in lexicographic order that
-    ///         does not enter a reject state.</item>
+    ///     <item><description>As long as matches are successful, keep reading sequentially.</description></item>
+    ///     <item><description>When a match fails, skip to the next string in lexicographic order that
+    ///         does not enter a reject state.</description></item>
     /// </list>
     /// <para>
     /// The algorithm does not attempt to actually skip to the next string that is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/DocTermOrds.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/DocTermOrds.cs b/src/Lucene.Net/Index/DocTermOrds.cs
index 1805499..237e3fc 100644
--- a/src/Lucene.Net/Index/DocTermOrds.cs
+++ b/src/Lucene.Net/Index/DocTermOrds.cs
@@ -74,36 +74,36 @@ namespace Lucene.Net.Index
     /// <remarks>
     /// Final form of the un-inverted field:
     /// <list type="bullet">
-    ///     <item>Each document points to a list of term numbers that are contained in that document.</item>
-    ///     <item>
+    ///     <item><description>Each document points to a list of term numbers that are contained in that document.</description></item>
+    ///     <item><description>
     ///         Term numbers are in sorted order, and are encoded as variable-length deltas from the
     ///         previous term number.  Real term numbers start at 2 since 0 and 1 are reserved.  A
     ///         term number of 0 signals the end of the termNumber list.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         There is a single int[maxDoc()] which either contains a pointer into a byte[] for
     ///         the termNumber lists, or directly contains the termNumber list if it fits in the 4
     ///         bytes of an integer.  If the first byte in the integer is 1, the next 3 bytes
     ///         are a pointer into a byte[] where the termNumber list starts.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         There are actually 256 byte arrays, to compensate for the fact that the pointers
     ///         into the byte arrays are only 3 bytes long.  The correct byte array for a document
     ///         is a function of it's id.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         To save space and speed up faceting, any term that matches enough documents will
     ///         not be un-inverted... it will be skipped while building the un-inverted field structure,
     ///         and will use a set intersection method during faceting.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         To further save memory, the terms (the actual string values) are not all stored in
     ///         memory, but a TermIndex is used to convert term numbers to term values only
     ///         for the terms needed after faceting has completed.  Only every 128th term value
     ///         is stored, along with it's corresponding term number, and this is used as an
     ///         index to find the closest term and iterate until the desired number is hit (very
     ///         much like Lucene's own internal term index).
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// </remarks>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/DocumentsWriterDeleteQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/DocumentsWriterDeleteQueue.cs b/src/Lucene.Net/Index/DocumentsWriterDeleteQueue.cs
index 36052ef..f035d65 100644
--- a/src/Lucene.Net/Index/DocumentsWriterDeleteQueue.cs
+++ b/src/Lucene.Net/Index/DocumentsWriterDeleteQueue.cs
@@ -52,13 +52,13 @@ namespace Lucene.Net.Index
     /// DWPT updates a document it:
     ///
     /// <list type="number">
-    ///     <item>consumes a document and finishes its processing</item>
-    ///     <item>updates its private <see cref="DeleteSlice"/> either by calling
+    ///     <item><description>consumes a document and finishes its processing</description></item>
+    ///     <item><description>updates its private <see cref="DeleteSlice"/> either by calling
     ///     <see cref="UpdateSlice(DeleteSlice)"/> or <see cref="Add(Term, DeleteSlice)"/> (if the
-    ///         document has a delTerm)</item>
-    ///     <item>applies all deletes in the slice to its private <see cref="BufferedUpdates"/>
-    ///         and resets it</item>
-    ///     <item>increments its internal document id</item>
+    ///         document has a delTerm)</description></item>
+    ///     <item><description>applies all deletes in the slice to its private <see cref="BufferedUpdates"/>
+    ///         and resets it</description></item>
+    ///     <item><description>increments its internal document id</description></item>
     /// </list>
     ///
     /// The DWPT also doesn't apply its current documents delete term until it has

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/FlushByRamOrCountsPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/FlushByRamOrCountsPolicy.cs b/src/Lucene.Net/Index/FlushByRamOrCountsPolicy.cs
index 5ed2c7b..8c4da3c 100644
--- a/src/Lucene.Net/Index/FlushByRamOrCountsPolicy.cs
+++ b/src/Lucene.Net/Index/FlushByRamOrCountsPolicy.cs
@@ -28,13 +28,13 @@ namespace Lucene.Net.Index
     /// number of buffered delete terms.
     ///
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///         <see cref="OnDelete(DocumentsWriterFlushControl, DocumentsWriterPerThreadPool.ThreadState)"/>
     ///         - applies pending delete operations based on the global number of buffered
     ///         delete terms iff <see cref="LiveIndexWriterConfig.MaxBufferedDeleteTerms"/> is
     ///         enabled
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         <see cref="OnInsert(DocumentsWriterFlushControl, DocumentsWriterPerThreadPool.ThreadState)"/>
     ///         - flushes either on the number of documents per
     ///         <see cref="DocumentsWriterPerThread"/> (
@@ -42,15 +42,15 @@ namespace Lucene.Net.Index
     ///         memory consumption in the current indexing session iff
     ///         <see cref="LiveIndexWriterConfig.MaxBufferedDocs"/> or
     ///         <see cref="LiveIndexWriterConfig.RAMBufferSizeMB"/> is enabled respectively
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         <see cref="FlushPolicy.OnUpdate(DocumentsWriterFlushControl, DocumentsWriterPerThreadPool.ThreadState)"/>
     ///         - calls
     ///         <see cref="OnInsert(DocumentsWriterFlushControl, DocumentsWriterPerThreadPool.ThreadState)"/>
     ///         and
     ///         <see cref="OnDelete(DocumentsWriterFlushControl, DocumentsWriterPerThreadPool.ThreadState)"/>
     ///         in order
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// All <see cref="IndexWriterConfig"/> settings are used to mark
     /// <see cref="DocumentsWriterPerThread"/> as flush pending during indexing with

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/FlushPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/FlushPolicy.cs b/src/Lucene.Net/Index/FlushPolicy.cs
index d342b0b..8eca198 100644
--- a/src/Lucene.Net/Index/FlushPolicy.cs
+++ b/src/Lucene.Net/Index/FlushPolicy.cs
@@ -30,10 +30,10 @@ namespace Lucene.Net.Index
     /// <para/>
     /// Segments are traditionally flushed by:
     /// <list type="bullet">
-    ///     <item>RAM consumption - configured via
-    ///         <see cref="LiveIndexWriterConfig.RAMBufferSizeMB"/></item>
-    ///     <item>Number of RAM resident documents - configured via
-    ///         <see cref="LiveIndexWriterConfig.MaxBufferedDocs"/></item>
+    ///     <item><description>RAM consumption - configured via
+    ///         <see cref="LiveIndexWriterConfig.RAMBufferSizeMB"/></description></item>
+    ///     <item><description>Number of RAM resident documents - configured via
+    ///         <see cref="LiveIndexWriterConfig.MaxBufferedDocs"/></description></item>
     /// </list>
     /// The policy also applies pending delete operations (by term and/or query),
     /// given the threshold set in

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Index/IndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/IndexReader.cs b/src/Lucene.Net/Index/IndexReader.cs
index c9ade02..3b72fc7 100644
--- a/src/Lucene.Net/Index/IndexReader.cs
+++ b/src/Lucene.Net/Index/IndexReader.cs
@@ -36,16 +36,16 @@ namespace Lucene.Net.Index
     ///
     /// <para/>There are two different types of <see cref="IndexReader"/>s:
     /// <list type="bullet">
-    ///     <item><see cref="AtomicReader"/>: These indexes do not consist of several sub-readers,
+    ///     <item><description><see cref="AtomicReader"/>: These indexes do not consist of several sub-readers,
     ///         they are atomic. They support retrieval of stored fields, doc values, terms,
-    ///         and postings.</item>
-    ///     <item><see cref="CompositeReader"/>: Instances (like <see cref="DirectoryReader"/>)
+    ///         and postings.</description></item>
+    ///     <item><description><see cref="CompositeReader"/>: Instances (like <see cref="DirectoryReader"/>)
     ///         of this reader can only
     ///         be used to get stored fields from the underlying <see cref="AtomicReader"/>s,
     ///         but it is not possible to directly retrieve postings. To do that, get
     ///         the sub-readers via <see cref="CompositeReader.GetSequentialSubReaders()"/>.
     ///         Alternatively, you can mimic an <see cref="AtomicReader"/> (with a serious slowdown),
-    ///         by wrapping composite readers with <see cref="SlowCompositeReaderWrapper"/>.</item>
+    ///         by wrapping composite readers with <see cref="SlowCompositeReaderWrapper"/>.</description></item>
     /// </list>
     ///
     /// <para/><see cref="IndexReader"/> instances for indexes on disk are usually constructed

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Store/CompoundFileDirectory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/CompoundFileDirectory.cs b/src/Lucene.Net/Store/CompoundFileDirectory.cs
index 29ce68e..58cca90 100644
--- a/src/Lucene.Net/Store/CompoundFileDirectory.cs
+++ b/src/Lucene.Net/Store/CompoundFileDirectory.cs
@@ -42,29 +42,29 @@ namespace Lucene.Net.Store
     /// <para/>
     /// Files:
     /// <list type="bullet">
-    ///     <item><c>.cfs</c>: An optional "virtual" file consisting of all the other
-    ///         index files for systems that frequently run out of file handles.</item>
-    ///     <item><c>.cfe</c>: The "virtual" compound file's entry table holding all
-    ///         entries in the corresponding .cfs file.</item>
+    ///     <item><description><c>.cfs</c>: An optional "virtual" file consisting of all the other
+    ///         index files for systems that frequently run out of file handles.</description></item>
+    ///     <item><description><c>.cfe</c>: The "virtual" compound file's entry table holding all
+    ///         entries in the corresponding .cfs file.</description></item>
     /// </list>
     /// <para>Description:</para>
     /// <list type="bullet">
-    ///     <item>Compound (.cfs) --&gt; Header, FileData <sup>FileCount</sup></item>
-    ///     <item>Compound Entry Table (.cfe) --&gt; Header, FileCount, &lt;FileName,
-    ///         DataOffset, DataLength&gt; <sup>FileCount</sup>, Footer</item>
-    ///     <item>Header --&gt; <see cref="CodecUtil.WriteHeader"/></item>
-    ///     <item>FileCount --&gt; <see cref="DataOutput.WriteVInt32"/></item>
-    ///     <item>DataOffset,DataLength --&gt; <see cref="DataOutput.WriteInt64"/></item>
-    ///     <item>FileName --&gt; <see cref="DataOutput.WriteString"/></item>
-    ///     <item>FileData --&gt; raw file data</item>
-    ///     <item>Footer --&gt; <see cref="CodecUtil.WriteFooter"/></item>
+    ///     <item><description>Compound (.cfs) --&gt; Header, FileData <sup>FileCount</sup></description></item>
+    ///     <item><description>Compound Entry Table (.cfe) --&gt; Header, FileCount, &lt;FileName,
+    ///         DataOffset, DataLength&gt; <sup>FileCount</sup>, Footer</description></item>
+    ///     <item><description>Header --&gt; <see cref="CodecUtil.WriteHeader"/></description></item>
+    ///     <item><description>FileCount --&gt; <see cref="DataOutput.WriteVInt32"/></description></item>
+    ///     <item><description>DataOffset,DataLength --&gt; <see cref="DataOutput.WriteInt64"/></description></item>
+    ///     <item><description>FileName --&gt; <see cref="DataOutput.WriteString"/></description></item>
+    ///     <item><description>FileData --&gt; raw file data</description></item>
+    ///     <item><description>Footer --&gt; <see cref="CodecUtil.WriteFooter"/></description></item>
     /// </list>
     /// <para>Notes:</para>
     /// <list type="bullet">
-    ///   <item>FileCount indicates how many files are contained in this compound file.
-    ///         The entry table that follows has that many entries.</item>
-    ///   <item>Each directory entry contains a long pointer to the start of this file's data
-    ///         section, the files length, and a <see cref="string"/> with that file's name.</item>
+    ///   <item><description>FileCount indicates how many files are contained in this compound file.
+    ///         The entry table that follows has that many entries.</description></item>
+    ///   <item><description>Each directory entry contains a long pointer to the start of this file's data
+    ///         section, the files length, and a <see cref="string"/> with that file's name.</description></item>
     /// </list>
     /// <para/>
     /// @lucene.experimental

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Store/Directory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/Directory.cs b/src/Lucene.Net/Store/Directory.cs
index 17cb98b..7565b8a 100644
--- a/src/Lucene.Net/Store/Directory.cs
+++ b/src/Lucene.Net/Store/Directory.cs
@@ -31,9 +31,9 @@ namespace Lucene.Net.Store
     /// .NET's i/o APIs not used directly, but rather all i/o is
     /// through this API.  This permits things such as: 
     /// <list type="bullet">
-    ///     <item> implementation of RAM-based indices;</item>
-    ///     <item> implementation indices stored in a database;</item>
-    ///     <item> implementation of an index as a single file;</item>
+    ///     <item><description> implementation of RAM-based indices;</description></item>
+    ///     <item><description> implementation indices stored in a database;</description></item>
+    ///     <item><description> implementation of an index as a single file;</description></item>
     /// </list>
     /// <para/>
     /// Directory locking is implemented by an instance of
@@ -67,9 +67,9 @@ namespace Lucene.Net.Store
         /// Returns the length of a file in the directory. this method follows the
         /// following contract:
         /// <list>
-        ///     <item>Throws <see cref="System.IO.FileNotFoundException"/>
-        ///         if the file does not exist.</item>
-        ///     <item>Returns a value &gt;=0 if the file exists, which specifies its length.</item>
+        ///     <item><description>Throws <see cref="System.IO.FileNotFoundException"/>
+        ///         if the file does not exist.</description></item>
+        ///     <item><description>Returns a value &gt;=0 if the file exists, which specifies its length.</description></item>
         /// </list>
         /// </summary>
         /// <param name="name"> the name of the file for which to return the length. </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Store/FSDirectory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/FSDirectory.cs b/src/Lucene.Net/Store/FSDirectory.cs
index 42c5bfc..d1cbda1 100644
--- a/src/Lucene.Net/Store/FSDirectory.cs
+++ b/src/Lucene.Net/Store/FSDirectory.cs
@@ -36,14 +36,14 @@ namespace Lucene.Net.Store
     ///
     /// <list type="bullet">
     ///
-    ///     <item> <see cref="SimpleFSDirectory"/> is a straightforward
+    ///     <item><description> <see cref="SimpleFSDirectory"/> is a straightforward
     ///         implementation using <see cref="System.IO.FileStream"/>.
     ///         However, it has poor concurrent performance
     ///         (multiple threads will bottleneck) as it
     ///         synchronizes when multiple threads read from the
-    ///         same file.</item>
+    ///         same file.</description></item>
     ///
-    ///     <item> <see cref="NIOFSDirectory"/> uses java.nio's
+    ///     <item><description> <see cref="NIOFSDirectory"/> uses java.nio's
     ///         FileChannel's positional io when reading to avoid
     ///         synchronization when reading from the same file.
     ///         Unfortunately, due to a Windows-only <a
@@ -53,9 +53,9 @@ namespace Lucene.Net.Store
     ///         choice. Applications using <see cref="System.Threading.Thread.Interrupt()"/> or
     ///         <see cref="System.Threading.Tasks.Task{TResult}"/> should use
     ///         <see cref="SimpleFSDirectory"/> instead. See <see cref="NIOFSDirectory"/> java doc
-    ///         for details.</item>
+    ///         for details.</description></item>
     ///
-    ///     <item> <see cref="MMapDirectory"/> uses memory-mapped IO when
+    ///     <item><description> <see cref="MMapDirectory"/> uses memory-mapped IO when
     ///         reading. This is a good choice if you have plenty
     ///         of virtual memory relative to your index size, eg
     ///         if you are running on a 64 bit runtime, or you are
@@ -65,7 +65,7 @@ namespace Lucene.Net.Store
     ///         Applications using <see cref="System.Threading.Thread.Interrupt()"/> or
     ///         <see cref="System.Threading.Tasks.Task"/> should use
     ///         <see cref="SimpleFSDirectory"/> instead. See <see cref="MMapDirectory"/>
-    ///         doc for details.</item>
+    ///         doc for details.</description></item>
     /// </list>
     ///
     /// Unfortunately, because of system peculiarities, there is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/C5.Support.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/C5.Support.cs b/src/Lucene.Net/Support/C5.Support.cs
index 50baa83..ab6600a 100644
--- a/src/Lucene.Net/Support/C5.Support.cs
+++ b/src/Lucene.Net/Support/C5.Support.cs
@@ -3961,13 +3961,13 @@ namespace Lucene.Net.Support.C5
         /// <summary>
         /// A default generic equality comparer for type T. The procedure is as follows:
         /// <list>
-        /// <item>If the actual generic argument T implements the generic interface
+        /// <item><description>If the actual generic argument T implements the generic interface
         /// <see cref="T:C5.ISequenced`1"/> for some value W of its generic parameter T,
-        /// the equalityComparer will be <see cref="T:C5.SequencedCollectionEqualityComparer`2"/></item>
-        /// <item>If the actual generic argument T implements 
+        /// the equalityComparer will be <see cref="T:C5.SequencedCollectionEqualityComparer`2"/></description></item>
+        /// <item><description>If the actual generic argument T implements 
         /// <see cref="T:C5.ICollection`1"/> for some value W of its generic parameter T,
-        /// the equalityComparer will be <see cref="T:C5.UnsequencedCollectionEqualityComparer`2"/></item>
-        /// <item>Otherwise the SCG.EqualityComparer&lt;T&gt;.Default is returned</item>
+        /// the equalityComparer will be <see cref="T:C5.UnsequencedCollectionEqualityComparer`2"/></description></item>
+        /// <item><description>Otherwise the SCG.EqualityComparer&lt;T&gt;.Default is returned</description></item>
         /// </list>   
         /// </summary>
         /// <value>The comparer</value>
@@ -5311,9 +5311,9 @@ namespace Lucene.Net.Support.C5
         /// whose only sign changes when going through items in increasing order
         /// can be 
         /// <list>
-        /// <item>from positive to zero</item>
-        /// <item>from positive to negative</item>
-        /// <item>from zero to negative</item>
+        /// <item><description>from positive to zero</description></item>
+        /// <item><description>from positive to negative</description></item>
+        /// <item><description>from zero to negative</description></item>
         /// </list>
         /// The "cut" function is supplied as the <code>CompareTo</code> method 
         /// of an object <code>c</code> implementing 
@@ -6030,10 +6030,10 @@ namespace Lucene.Net.Support.C5
     /// 
     /// <para>The methods are grouped according to
     /// <list>
-    /// <item>Extrema: report or report and delete an extremal item. This is reminiscent of simplified priority queues.</item>
-    /// <item>Nearest neighbor: report predecessor or successor in the collection of an item. Cut belongs to this group.</item>
-    /// <item>Range: report a view of a range of elements or remove all elements in a range.</item>
-    /// <item>AddSorted: add a collection of items known to be sorted in the same order (should be faster) (to be removed?)</item>
+    /// <item><description>Extrema: report or report and delete an extremal item. This is reminiscent of simplified priority queues.</description></item>
+    /// <item><description>Nearest neighbor: report predecessor or successor in the collection of an item. Cut belongs to this group.</description></item>
+    /// <item><description>Range: report a view of a range of elements or remove all elements in a range.</description></item>
+    /// <item><description>AddSorted: add a collection of items known to be sorted in the same order (should be faster) (to be removed?)</description></item>
     /// </list>
     /// </para>
     /// 
@@ -6175,9 +6175,9 @@ namespace Lucene.Net.Support.C5
         /// whose only sign changes when going through items in increasing order
         /// can be 
         /// <list>
-        /// <item>from positive to zero</item>
-        /// <item>from positive to negative</item>
-        /// <item>from zero to negative</item>
+        /// <item><description>from positive to zero</description></item>
+        /// <item><description>from positive to negative</description></item>
+        /// <item><description>from zero to negative</description></item>
         /// </list>
         /// The "cut" function is supplied as the <code>CompareTo</code> method 
         /// of an object <code>c</code> implementing 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/Codecs/DefaultCodecFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Codecs/DefaultCodecFactory.cs b/src/Lucene.Net/Support/Codecs/DefaultCodecFactory.cs
index 8c1ecb1..a08acc8 100644
--- a/src/Lucene.Net/Support/Codecs/DefaultCodecFactory.cs
+++ b/src/Lucene.Net/Support/Codecs/DefaultCodecFactory.cs
@@ -28,19 +28,19 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// The most common use cases are:
     /// <list type="bullet">
-    ///     <item>subclass <see cref="DefaultCodecFactory"/> and override
+    ///     <item><description>subclass <see cref="DefaultCodecFactory"/> and override
     ///         <see cref="DefaultCodecFactory.GetCodec(Type)"/> so an external dependency injection
     ///         container can be used to supply the instances (lifetime should be singleton). Note that you could 
     ///         alternately use the "named type" feature that many DI containers have to supply the type based on name by 
-    ///         overriding <see cref="GetCodec(string)"/>.</item>
-    ///     <item>subclass <see cref="DefaultCodecFactory"/> and override
+    ///         overriding <see cref="GetCodec(string)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultCodecFactory"/> and override
     ///         <see cref="DefaultCodecFactory.GetCodecType(string)"/> so a type new type can be
-    ///         supplied that is not in the <see cref="DefaultCodecFactory.codecNameToTypeMap"/>.</item>
-    ///     <item>subclass <see cref="DefaultCodecFactory"/> to add new or override the default <see cref="Codec"/> 
-    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutCodecType(Type)"/>.</item>
-    ///     <item>subclass <see cref="DefaultCodecFactory"/> to scan additional assemblies for <see cref="Codec"/>
+    ///         supplied that is not in the <see cref="DefaultCodecFactory.codecNameToTypeMap"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultCodecFactory"/> to add new or override the default <see cref="Codec"/> 
+    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutCodecType(Type)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultCodecFactory"/> to scan additional assemblies for <see cref="Codec"/>
     ///         subclasses in by overriding <see cref="Initialize()"/> and calling <see cref="ScanForCodecs(Assembly)"/>. 
-    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</item>
+    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</description></item>
     /// </list>
     /// <para/>
     /// To set the <see cref="ICodecFactory"/>, call <see cref="Codec.SetCodecFactory(ICodecFactory)"/>.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/Codecs/DefaultDocValuesFormatFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Codecs/DefaultDocValuesFormatFactory.cs b/src/Lucene.Net/Support/Codecs/DefaultDocValuesFormatFactory.cs
index a85d6af..f03c3ae 100644
--- a/src/Lucene.Net/Support/Codecs/DefaultDocValuesFormatFactory.cs
+++ b/src/Lucene.Net/Support/Codecs/DefaultDocValuesFormatFactory.cs
@@ -28,19 +28,19 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// The most common use cases are:
     /// <list type="bullet">
-    ///     <item>subclass <see cref="DefaultDocValuesFormatFactory"/> and override
+    ///     <item><description>subclass <see cref="DefaultDocValuesFormatFactory"/> and override
     ///         <see cref="DefaultDocValuesFormatFactory.GetDocValuesFormat(Type)"/> so an external dependency injection
     ///         container can be used to supply the instances (lifetime should be singleton). Note that you could 
     ///         alternately use the "named type" feature that many DI containers have to supply the type based on name by 
-    ///         overriding <see cref="GetDocValuesFormat(string)"/>.</item>
-    ///     <item>subclass <see cref="DefaultDocValuesFormatFactory"/> and override
+    ///         overriding <see cref="GetDocValuesFormat(string)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultDocValuesFormatFactory"/> and override
     ///         <see cref="DefaultDocValuesFormatFactory.GetDocValuesFormatType(string)"/> so a type new type can be
-    ///         supplied that is not in the <see cref="DefaultDocValuesFormatFactory.docValuesFormatNameToTypeMap"/>.</item>
-    ///     <item>subclass <see cref="DefaultDocValuesFormatFactory"/> to add new or override the default <see cref="DocValuesFormat"/> 
-    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutDocValuesFormatType(Type)"/>.</item>
-    ///     <item>subclass <see cref="DefaultDocValuesFormatFactory"/> to scan additional assemblies for <see cref="DocValuesFormat"/>
+    ///         supplied that is not in the <see cref="DefaultDocValuesFormatFactory.docValuesFormatNameToTypeMap"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultDocValuesFormatFactory"/> to add new or override the default <see cref="DocValuesFormat"/> 
+    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutDocValuesFormatType(Type)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultDocValuesFormatFactory"/> to scan additional assemblies for <see cref="DocValuesFormat"/>
     ///         subclasses in by overriding <see cref="Initialize()"/> and calling <see cref="ScanForDocValuesFormats(Assembly)"/>. 
-    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</item>
+    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</description></item>
     /// </list>
     /// <para/>
     /// To set the <see cref="IDocValuesFormatFactory"/>, call <see cref="DocValuesFormat.SetDocValuesFormatFactory(IDocValuesFormatFactory)"/>.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/Codecs/DefaultPostingsFormatFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Codecs/DefaultPostingsFormatFactory.cs b/src/Lucene.Net/Support/Codecs/DefaultPostingsFormatFactory.cs
index 0cbd907..08fb60e 100644
--- a/src/Lucene.Net/Support/Codecs/DefaultPostingsFormatFactory.cs
+++ b/src/Lucene.Net/Support/Codecs/DefaultPostingsFormatFactory.cs
@@ -28,19 +28,19 @@ namespace Lucene.Net.Codecs
     /// <para/>
     /// The most common use cases are:
     /// <list type="bullet">
-    ///     <item>subclass <see cref="DefaultPostingsFormatFactory"/> and override
+    ///     <item><description>subclass <see cref="DefaultPostingsFormatFactory"/> and override
     ///         <see cref="DefaultPostingsFormatFactory.GetPostingsFormat(Type)"/> so an external dependency injection
     ///         container can be used to supply the instances (lifetime should be singleton). Note that you could 
     ///         alternately use the "named type" feature that many DI containers have to supply the type based on name by 
-    ///         overriding <see cref="GetPostingsFormat(string)"/>.</item>
-    ///     <item>subclass <see cref="DefaultPostingsFormatFactory"/> and override
+    ///         overriding <see cref="GetPostingsFormat(string)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultPostingsFormatFactory"/> and override
     ///         <see cref="DefaultPostingsFormatFactory.GetPostingsFormatType(string)"/> so a type new type can be
-    ///         supplied that is not in the <see cref="DefaultPostingsFormatFactory.postingsFormatNameToTypeMap"/>.</item>
-    ///     <item>subclass <see cref="DefaultPostingsFormatFactory"/> to add new or override the default <see cref="PostingsFormat"/> 
-    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutPostingsFormatType(Type)"/>.</item>
-    ///     <item>subclass <see cref="DefaultPostingsFormatFactory"/> to scan additional assemblies for <see cref="PostingsFormat"/>
+    ///         supplied that is not in the <see cref="DefaultPostingsFormatFactory.postingsFormatNameToTypeMap"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultPostingsFormatFactory"/> to add new or override the default <see cref="PostingsFormat"/> 
+    ///         types by overriding <see cref="Initialize()"/> and calling <see cref="PutPostingsFormatType(Type)"/>.</description></item>
+    ///     <item><description>subclass <see cref="DefaultPostingsFormatFactory"/> to scan additional assemblies for <see cref="PostingsFormat"/>
     ///         subclasses in by overriding <see cref="Initialize()"/> and calling <see cref="ScanForPostingsFormats(Assembly)"/>. 
-    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</item>
+    ///         For performance reasons, the default behavior only loads Lucene.Net codecs.</description></item>
     /// </list>
     /// <para/>
     /// To set the <see cref="IPostingsFormatFactory"/>, call <see cref="PostingsFormat.SetPostingsFormatFactory(IPostingsFormatFactory)"/>.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/HashMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/HashMap.cs b/src/Lucene.Net/Support/HashMap.cs
index aaf270b..6a293c3 100644
--- a/src/Lucene.Net/Support/HashMap.cs
+++ b/src/Lucene.Net/Support/HashMap.cs
@@ -57,15 +57,15 @@ namespace Lucene.Net.Support
     /// <remarks>
     /// <h2>Unordered Dictionaries</h2>
     /// <list type="bullet">
-    ///     <item><see cref="Dictionary{TKey, TValue}"/> - use when order is not important and all keys are non-null.</item>
-    ///     <item><see cref="HashMap{TKey, TValue}"/> - use when order is not important and support for a null key is required.</item>
+    ///     <item><description><see cref="Dictionary{TKey, TValue}"/> - use when order is not important and all keys are non-null.</description></item>
+    ///     <item><description><see cref="HashMap{TKey, TValue}"/> - use when order is not important and support for a null key is required.</description></item>
     /// </list>
     /// <h2>Ordered Dictionaries</h2>
     /// <list type="bullet">
-    ///     <item><see cref="LinkedHashMap{TKey, TValue}"/> - use when you need to preserve entry insertion order. Keys are nullable.</item>
-    ///     <item><see cref="SortedDictionary{TKey, TValue}"/> - use when you need natural sort order. Keys must be unique.</item>
-    ///     <item><see cref="TreeDictionary{K, V}"/> - use when you need natural sort order. Keys may contain duplicates.</item>
-    ///     <item><see cref="LurchTable{TKey, TValue}"/> - use when you need to sort by most recent access or most recent update. Works well for LRU caching.</item>
+    ///     <item><description><see cref="LinkedHashMap{TKey, TValue}"/> - use when you need to preserve entry insertion order. Keys are nullable.</description></item>
+    ///     <item><description><see cref="SortedDictionary{TKey, TValue}"/> - use when you need natural sort order. Keys must be unique.</description></item>
+    ///     <item><description><see cref="TreeDictionary{K, V}"/> - use when you need natural sort order. Keys may contain duplicates.</description></item>
+    ///     <item><description><see cref="LurchTable{TKey, TValue}"/> - use when you need to sort by most recent access or most recent update. Works well for LRU caching.</description></item>
     /// </list>
     /// </remarks>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/IO/Buffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/Buffer.cs b/src/Lucene.Net/Support/IO/Buffer.cs
index 3892365..79bd73e 100644
--- a/src/Lucene.Net/Support/IO/Buffer.cs
+++ b/src/Lucene.Net/Support/IO/Buffer.cs
@@ -27,39 +27,39 @@ namespace Lucene.Net.Support.IO
     /// <para/>
     /// A buffer can be described by the following properties:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///         Capacity:
     ///         The number of elements a buffer can hold. Capacity may not be
     ///         negative and never changes.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         Position:
     ///         A cursor of this buffer. Elements are read or written at the
     ///         position if you do not specify an index explicitly. Position may not be
     ///         negative and not greater than the limit.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         Limit:
     ///         Controls the scope of accessible elements. You can only read or
     ///         write elements from index zero to <c>limit - 1</c>. Accessing
     ///         elements out of the scope will cause an exception. Limit may not be negative
     ///         and not greater than capacity.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         Mark: 
     ///         Used to remember the current position, so that you can reset the
     ///         position later. Mark may not be negative and no greater than position.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         A buffer can be read-only or read-write. Trying to modify the elements
     ///         of a read-only buffer will cause a <see cref="ReadOnlyBufferException"/>,
     ///         while changing the position, limit and mark of a read-only buffer is OK.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///         A buffer can be direct or indirect. A direct buffer will try its best to
     ///         take advantage of native memory APIs and it may not stay in the heap,
     ///         thus it is not affected by garbage collection.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para/>
     /// Buffers are not thread-safe. If concurrent access to a buffer instance is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/IO/ByteBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/ByteBuffer.cs b/src/Lucene.Net/Support/IO/ByteBuffer.cs
index 021ce02..709d1aa 100644
--- a/src/Lucene.Net/Support/IO/ByteBuffer.cs
+++ b/src/Lucene.Net/Support/IO/ByteBuffer.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Support.IO
     /// <para/>
     /// A byte buffer can be created in either one of the following ways:
     /// <list type="bullet">
-    ///     <item><see cref="Allocate(int)"/> a new byte array and create a
-    ///     buffer based on it</item>
-    ///     <item><see cref="AllocateDirect(int)"/> a memory block and create a direct
-    ///     buffer based on it</item>
-    ///     <item><see cref="Wrap(byte[])"/> an existing byte array to create a new buffer</item>
+    ///     <item><description><see cref="Allocate(int)"/> a new byte array and create a
+    ///     buffer based on it</description></item>
+    ///     <item><description><see cref="AllocateDirect(int)"/> a memory block and create a direct
+    ///     buffer based on it</description></item>
+    ///     <item><description><see cref="Wrap(byte[])"/> an existing byte array to create a new buffer</description></item>
     /// </list>
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/IO/FileSupport.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/FileSupport.cs b/src/Lucene.Net/Support/IO/FileSupport.cs
index 82e8b1c..90f470a 100644
--- a/src/Lucene.Net/Support/IO/FileSupport.cs
+++ b/src/Lucene.Net/Support/IO/FileSupport.cs
@@ -134,8 +134,8 @@ namespace Lucene.Net.Support.IO
         /// Creates a new empty file in the specified directory, using the given prefix and suffix strings to generate its name. 
         /// If this method returns successfully then it is guaranteed that:
         /// <list type="number">
-        /// <item>The file denoted by the returned abstract pathname did not exist before this method was invoked, and</item>
-        /// <item>Neither this method nor any of its variants will return the same abstract pathname again in the current invocation of the virtual machine.</item>
+        /// <item><description>The file denoted by the returned abstract pathname did not exist before this method was invoked, and</description></item>
+        /// <item><description>Neither this method nor any of its variants will return the same abstract pathname again in the current invocation of the virtual machine.</description></item>
         /// </list>
         /// This method provides only part of a temporary-file facility.To arrange for a file created by this method to be deleted automatically, use the deleteOnExit() method.
         /// The prefix argument must be at least three characters long. It is recommended that the prefix be a short, meaningful string such as "hjb" or "mail". The suffix argument may be null, in which case the suffix ".tmp" will be used.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/IO/LongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/LongBuffer.cs b/src/Lucene.Net/Support/IO/LongBuffer.cs
index 82255c7..b278245 100644
--- a/src/Lucene.Net/Support/IO/LongBuffer.cs
+++ b/src/Lucene.Net/Support/IO/LongBuffer.cs
@@ -30,10 +30,10 @@ namespace Lucene.Net.Support.IO
     /// <para/>
     /// A long buffer can be created in either of the following ways:
     /// <list type="bullet">
-    ///     <item><see cref="Allocate(int)"/> a new long array and create a buffer
-    ///     based on it</item>
-    ///     <item><see cref="Wrap(long[])"/> an existing long array to create a new
-    ///     buffer</item>
+    ///     <item><description><see cref="Allocate(int)"/> a new long array and create a buffer
+    ///     based on it</description></item>
+    ///     <item><description><see cref="Wrap(long[])"/> an existing long array to create a new
+    ///     buffer</description></item>
     /// </list>
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/IO/LongToByteBufferAdapter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/LongToByteBufferAdapter.cs b/src/Lucene.Net/Support/IO/LongToByteBufferAdapter.cs
index b83974d..ca5f4d7 100644
--- a/src/Lucene.Net/Support/IO/LongToByteBufferAdapter.cs
+++ b/src/Lucene.Net/Support/IO/LongToByteBufferAdapter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Support.IO
     /// <para/>
     /// Implementation notice:
     /// <list type="bullet">
-    ///     <item>After a byte buffer instance is wrapped, it becomes privately owned by
-    ///     the adapter. It must NOT be accessed outside the adapter any more.</item>
-    ///     <item>The byte buffer's position and limit are NOT linked with the adapter.
-    ///     The adapter extends Buffer, thus has its own position and limit.</item>
+    ///     <item><description>After a byte buffer instance is wrapped, it becomes privately owned by
+    ///     the adapter. It must NOT be accessed outside the adapter any more.</description></item>
+    ///     <item><description>The byte buffer's position and limit are NOT linked with the adapter.
+    ///     The adapter extends Buffer, thus has its own position and limit.</description></item>
     /// </list>
     /// </summary>
     internal sealed class Int64ToByteBufferAdapter : Int64Buffer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/LinkedHashMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/LinkedHashMap.cs b/src/Lucene.Net/Support/LinkedHashMap.cs
index d241e68..e3d9c94 100644
--- a/src/Lucene.Net/Support/LinkedHashMap.cs
+++ b/src/Lucene.Net/Support/LinkedHashMap.cs
@@ -47,15 +47,15 @@ namespace Lucene.Net.Support
     /// <remarks>
     /// <h2>Unordered Dictionaries</h2>
     /// <list type="bullet">
-    ///     <item><see cref="Dictionary{TKey, TValue}"/> - use when order is not important and all keys are non-null.</item>
-    ///     <item><see cref="HashMap{TKey, TValue}"/> - use when order is not important and support for a null key is required.</item>
+    ///     <item><description><see cref="Dictionary{TKey, TValue}"/> - use when order is not important and all keys are non-null.</description></item>
+    ///     <item><description><see cref="HashMap{TKey, TValue}"/> - use when order is not important and support for a null key is required.</description></item>
     /// </list>
     /// <h2>Ordered Dictionaries</h2>
     /// <list type="bullet">
-    ///     <item><see cref="LinkedHashMap{TKey, TValue}"/> - use when you need to preserve entry insertion order. Keys are nullable.</item>
-    ///     <item><see cref="SortedDictionary{TKey, TValue}"/> - use when you need natural sort order. Keys must be unique.</item>
-    ///     <item><see cref="TreeDictionary{K, V}"/> - use when you need natural sort order. Keys may contain duplicates.</item>
-    ///     <item><see cref="LurchTable{TKey, TValue}"/> - use when you need to sort by most recent access or most recent update. Works well for LRU caching.</item>
+    ///     <item><description><see cref="LinkedHashMap{TKey, TValue}"/> - use when you need to preserve entry insertion order. Keys are nullable.</description></item>
+    ///     <item><description><see cref="SortedDictionary{TKey, TValue}"/> - use when you need natural sort order. Keys must be unique.</description></item>
+    ///     <item><description><see cref="TreeDictionary{K, V}"/> - use when you need natural sort order. Keys may contain duplicates.</description></item>
+    ///     <item><description><see cref="LurchTable{TKey, TValue}"/> - use when you need to sort by most recent access or most recent update. Works well for LRU caching.</description></item>
     /// </list>
     /// </remarks>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Support/StringExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/StringExtensions.cs b/src/Lucene.Net/Support/StringExtensions.cs
index fe815d4..e8513f9 100644
--- a/src/Lucene.Net/Support/StringExtensions.cs
+++ b/src/Lucene.Net/Support/StringExtensions.cs
@@ -36,8 +36,8 @@ namespace Lucene.Net.Support
         /// <summary>
         /// This method mimics the Java String.compareTo(String) method in that it
         /// <list type="number">
-        /// <item>Compares the strings using lexographic sorting rules</item>
-        /// <item>Performs a culture-insensitive comparison</item>
+        /// <item><description>Compares the strings using lexographic sorting rules</description></item>
+        /// <item><description>Performs a culture-insensitive comparison</description></item>
         /// </list>
         /// This method is a convenience to replace the .NET CompareTo method 
         /// on all strings, provided the logic does not expect specific values

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net/Util/ArrayUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ArrayUtil.cs b/src/Lucene.Net/Util/ArrayUtil.cs
index afe75f8..2e45dfc 100644
--- a/src/Lucene.Net/Util/ArrayUtil.cs
+++ b/src/Lucene.Net/Util/ArrayUtil.cs
@@ -813,14 +813,14 @@ namespace Lucene.Net.Util
         /// <para/>
         /// The comparer returned depends on the <typeparam name="T"/> argument:
         /// <list type="number">
-        ///     <item>If the type is <see cref="string"/>, the comparer returned uses
+        ///     <item><description>If the type is <see cref="string"/>, the comparer returned uses
         ///         the <see cref="string.CompareOrdinal(string, string)"/> to make the comparison
         ///         to ensure that the current culture doesn't affect the results. This is the
-        ///         default string comparison used in Java, and what Lucene's design depends on.</item>
-        ///     <item>If the type implements <see cref="IComparable{T}"/>, the comparer uses
+        ///         default string comparison used in Java, and what Lucene's design depends on.</description></item>
+        ///     <item><description>If the type implements <see cref="IComparable{T}"/>, the comparer uses
         ///         <see cref="IComparable{T}.CompareTo(T)"/> for the comparison. This allows
-        ///         the use of types with custom comparison schemes.</item>
-        ///     <item>If neither of the above conditions are true, will default to <see cref="Comparer{T}.Default"/>.</item>
+        ///         the use of types with custom comparison schemes.</description></item>
+        ///     <item><description>If neither of the above conditions are true, will default to <see cref="Comparer{T}.Default"/>.</description></item>
         /// </list>
         /// <para/>
         /// NOTE: This was naturalComparer() in Lucene


[7/9] lucenenet git commit: SWEEP: Lucene.Net.Index: Fixed up documentation comments for types starting with M-Z

Posted by ni...@apache.org.
SWEEP: Lucene.Net.Index: Fixed up documentation comments for types starting with M-Z


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/646db0ce
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/646db0ce
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/646db0ce

Branch: refs/heads/master
Commit: 646db0ce2d64e564e9bc8da3420d62e34e2bacb2
Parents: 7099a84
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Jun 2 04:26:52 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Jun 2 04:26:52 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   7 +-
 src/Lucene.Net/Index/MergePolicy.cs             | 199 +++++++-------
 src/Lucene.Net/Index/MergeScheduler.cs          |  20 +-
 src/Lucene.Net/Index/MergeState.cs              |  33 +--
 src/Lucene.Net/Index/MergeTrigger.cs            |   6 +-
 src/Lucene.Net/Index/MultiBits.cs               |  18 +-
 src/Lucene.Net/Index/MultiDocValues.cs          |  97 +++----
 .../Index/MultiDocsAndPositionsEnum.cs          |  20 +-
 src/Lucene.Net/Index/MultiDocsEnum.cs           |  22 +-
 src/Lucene.Net/Index/MultiFields.cs             | 117 ++++----
 src/Lucene.Net/Index/MultiReader.cs             |  24 +-
 src/Lucene.Net/Index/MultiTerms.cs              |   8 +-
 src/Lucene.Net/Index/MultiTermsEnum.cs          |  19 +-
 src/Lucene.Net/Index/NoDeletionPolicy.cs        |   6 +-
 src/Lucene.Net/Index/NoMergePolicy.cs           |  12 +-
 src/Lucene.Net/Index/NoMergeScheduler.cs        |  10 +-
 src/Lucene.Net/Index/NormsConsumer.cs           |   4 +-
 .../Index/NumericDocValuesFieldUpdates.cs       |   6 +-
 src/Lucene.Net/Index/NumericDocValuesWriter.cs  |  90 +-----
 src/Lucene.Net/Index/OrdTermState.cs            |   6 +-
 src/Lucene.Net/Index/ParallelAtomicReader.cs    |  27 +-
 src/Lucene.Net/Index/ParallelCompositeReader.cs |  28 +-
 .../Index/PersistentSnapshotDeletionPolicy.cs   |  60 ++--
 src/Lucene.Net/Index/PrefixCodedTerms.cs        |   7 +-
 src/Lucene.Net/Index/RandomAccessOrds.cs        |  18 +-
 src/Lucene.Net/Index/ReaderManager.cs           |  38 +--
 src/Lucene.Net/Index/ReaderSlice.cs             |   4 +-
 src/Lucene.Net/Index/ReaderUtil.cs              |   8 +-
 src/Lucene.Net/Index/ReadersAndUpdates.cs       | 186 +------------
 src/Lucene.Net/Index/SegmentCommitInfo.cs       |  34 +--
 src/Lucene.Net/Index/SegmentCoreReaders.cs      |   2 +-
 src/Lucene.Net/Index/SegmentDocValues.cs        |   6 +-
 src/Lucene.Net/Index/SegmentInfo.cs             |  81 +++---
 src/Lucene.Net/Index/SegmentInfos.cs            | 271 ++++++++++---------
 src/Lucene.Net/Index/SegmentMerger.cs           |  14 +-
 src/Lucene.Net/Index/SegmentReadState.cs        |  45 +--
 src/Lucene.Net/Index/SegmentReader.cs           |  73 ++---
 src/Lucene.Net/Index/SegmentWriteState.cs       |  41 +--
 src/Lucene.Net/Index/SerialMergeScheduler.cs    |   4 +-
 .../Index/SimpleMergedSegmentWarmer.cs          |   4 +-
 src/Lucene.Net/Index/SingleTermsEnum.cs         |   8 +-
 .../Index/SingletonSortedSetDocValues.cs        |  10 +-
 .../Index/SlowCompositeReaderWrapper.cs         |  18 +-
 src/Lucene.Net/Index/SnapshotDeletionPolicy.cs  |  70 ++---
 src/Lucene.Net/Index/SortedDocValues.cs         |  25 +-
 .../Index/SortedDocValuesTermsEnum.cs           |   6 +-
 src/Lucene.Net/Index/SortedDocValuesWriter.cs   | 142 +---------
 src/Lucene.Net/Index/SortedSetDocValues.cs      |  29 +-
 .../Index/SortedSetDocValuesTermsEnum.cs        |   6 +-
 .../Index/SortedSetDocValuesWriter.cs           | 229 +---------------
 src/Lucene.Net/Index/StandardDirectoryReader.cs |   6 +-
 src/Lucene.Net/Index/StoredFieldVisitor.cs      |  32 +--
 src/Lucene.Net/Index/StoredFieldsProcessor.cs   |   2 +-
 src/Lucene.Net/Index/TaskMergeScheduler.cs      |  70 ++---
 src/Lucene.Net/Index/Term.cs                    |  52 ++--
 src/Lucene.Net/Index/TermContext.cs             |  63 ++---
 src/Lucene.Net/Index/TermState.cs               |  15 +-
 src/Lucene.Net/Index/TermVectorsConsumer.cs     |   2 +-
 .../Index/TermVectorsConsumerPerField.cs        |   7 +-
 src/Lucene.Net/Index/Terms.cs                   |  87 +++---
 src/Lucene.Net/Index/TermsEnum.cs               |  70 ++---
 src/Lucene.Net/Index/TermsHash.cs               |  14 +-
 .../Index/TermsHashConsumerPerField.cs          |   8 +-
 src/Lucene.Net/Index/TermsHashPerField.cs       |  10 +-
 .../ThreadAffinityDocumentsWriterThreadPool.cs  |  16 +-
 src/Lucene.Net/Index/TieredMergePolicy.cs       |   2 +-
 src/Lucene.Net/Index/TrackingIndexWriter.cs     | 104 +++----
 src/Lucene.Net/Index/TwoPhaseCommit.cs          |  14 +-
 src/Lucene.Net/Index/TwoPhaseCommitTool.cs      |  35 +--
 .../Index/TwoStoredFieldsConsumers.cs           |   2 +-
 src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs |  53 ++--
 71 files changed, 1143 insertions(+), 1739 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4c3522b..c42e0bc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,10 +52,9 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Index (namespace) (Except for types starting with letter A-L and TieredMergePolicy)
-   3. Search (namespace)
-   4. Support (namespace)
-   5. Util (namespace) (Except for Util.Fst)
+   2. Search (namespace)
+   3. Support (namespace)
+   4. Util (namespace) (Except for Util.Fst)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MergePolicy.cs b/src/Lucene.Net/Index/MergePolicy.cs
index 72e2d68..20c0949 100644
--- a/src/Lucene.Net/Index/MergePolicy.cs
+++ b/src/Lucene.Net/Index/MergePolicy.cs
@@ -34,30 +34,30 @@ namespace Lucene.Net.Index
     //using AlreadySetException = Lucene.Net.Util.SetOnce.AlreadySetException;
 
     /// <summary>
-    /// <p>Expert: a MergePolicy determines the sequence of
-    /// primitive merge operations.</p>
+    /// <para>Expert: a <see cref="MergePolicy"/> determines the sequence of
+    /// primitive merge operations.</para>
     ///
-    /// <p>Whenever the segments in an index have been altered by
-    /// <seealso cref="IndexWriter"/>, either the addition of a newly
+    /// <para>Whenever the segments in an index have been altered by
+    /// <see cref="IndexWriter"/>, either the addition of a newly
     /// flushed segment, addition of many segments from
-    /// addIndexes* calls, or a previous merge that may now need
-    /// to cascade, <seealso cref="IndexWriter"/> invokes {@link
-    /// #findMerges} to give the MergePolicy a chance to pick
-    /// merges that are now required.  this method returns a
-    /// <seealso cref="MergeSpecification"/> instance describing the set of
+    /// AddIndexes* calls, or a previous merge that may now need
+    /// to cascade, <see cref="IndexWriter"/> invokes <see cref="FindMerges(MergeTrigger, SegmentInfos)"/>
+    /// to give the <see cref="MergePolicy"/> a chance to pick
+    /// merges that are now required.  This method returns a
+    /// <see cref="MergeSpecification"/> instance describing the set of
     /// merges that should be done, or null if no merges are
-    /// necessary.  When IndexWriter.forceMerge is called, it calls
-    /// <seealso cref="#findForcedMerges(SegmentInfos,int,Map)"/> and the MergePolicy should
-    /// then return the necessary merges.</p>
+    /// necessary.  When <see cref="IndexWriter.ForceMerge(int)"/> is called, it calls
+    /// <see cref="FindForcedMerges(SegmentInfos, int, IDictionary{SegmentCommitInfo, bool?})"/> and the <see cref="MergePolicy"/> should
+    /// then return the necessary merges.</para>
     ///
-    /// <p>Note that the policy can return more than one merge at
-    /// a time.  In this case, if the writer is using {@link
-    /// SerialMergeScheduler}, the merges will be run
-    /// sequentially but if it is using {@link
-    /// ConcurrentMergeScheduler} they will be run concurrently.</p>
+    /// <para>Note that the policy can return more than one merge at
+    /// a time.  In this case, if the writer is using 
+    /// <see cref="SerialMergeScheduler"/>, the merges will be run
+    /// sequentially but if it is using
+    /// <see cref="ConcurrentMergeScheduler"/> they will be run concurrently.</para>
     ///
-    /// <p>The default MergePolicy is {@link
-    /// TieredMergePolicy}.</p>
+    /// <para>The default MergePolicy is
+    /// <see cref="TieredMergePolicy"/>.</para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -131,7 +131,6 @@ namespace Lucene.Net.Index
             /// Estimated size in bytes of the merged segment. </summary>
             public long EstimatedMergeBytes { get; internal set; } // used by IndexWriter // LUCENENET NOTE: original was volatile, but long cannot be in .NET
 
-
             // Sum of sizeInBytes of all SegmentInfos; set by IW.mergeInit
             internal long totalMergeBytes; // LUCENENET NOTE: original was volatile, but long cannot be in .NET
 
@@ -172,8 +171,8 @@ namespace Lucene.Net.Index
             /// <summary>
             /// Expert: Get the list of readers to merge. Note that this list does not
             ///  necessarily match the list of segments to merge and should only be used
-            ///  to feed SegmentMerger to initialize a merge. When a <seealso cref="OneMerge"/>
-            ///  reorders doc IDs, it must override <seealso cref="#getDocMap"/> too so that
+            ///  to feed SegmentMerger to initialize a merge. When a <see cref="OneMerge"/>
+            ///  reorders doc IDs, it must override <see cref="GetDocMap"/> too so that
             ///  deletes that happened during the merge can be applied to the newly
             ///  merged segment.
             /// </summary>
@@ -195,7 +194,7 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Expert: Sets the <seealso cref="SegmentCommitInfo"/> of this <seealso cref="OneMerge"/>.
+            /// Expert: Sets the <see cref="SegmentCommitInfo"/> of this <see cref="OneMerge"/>.
             /// Allows sub-classes to e.g. set diagnostics properties.
             /// </summary>
             public virtual SegmentCommitInfo Info
@@ -211,11 +210,11 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Expert: If <seealso cref="#getMergeReaders()"/> reorders document IDs, this method
-            ///  must be overridden to return a mapping from the <i>natural</i> doc ID
-            ///  (the doc ID that would result from a natural merge) to the actual doc
-            ///  ID. this mapping is used to apply deletions that happened during the
-            ///  merge to the new segment.
+            /// Expert: If <see cref="GetMergeReaders()"/> reorders document IDs, this method
+            /// must be overridden to return a mapping from the <i>natural</i> doc ID
+            /// (the doc ID that would result from a natural merge) to the actual doc
+            /// ID. This mapping is used to apply deletions that happened during the
+            /// merge to the new segment.
             /// </summary>
             public virtual DocMap GetDocMap(MergeState mergeState)
             {
@@ -239,7 +238,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Record that an exception occurred while executing
-            ///  this merge
+            /// this merge
             /// </summary>
             internal virtual Exception Exception
             {
@@ -261,8 +260,8 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Mark this merge as aborted.  If this is called
-            ///  before the merge is committed then the merge will
-            ///  not be committed.
+            /// before the merge is committed then the merge will
+            /// not be committed.
             /// </summary>
             internal virtual void Abort()
             {
@@ -274,7 +273,7 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Returns true if this merge was aborted. </summary>
+            /// Returns <c>true</c> if this merge was aborted. </summary>
             internal virtual bool IsAborted
             {
                 get
@@ -287,8 +286,8 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Called periodically by <seealso cref="IndexWriter"/> while
-            ///  merging to see if the merge is aborted.
+            /// Called periodically by <see cref="IndexWriter"/> while
+            /// merging to see if the merge is aborted.
             /// </summary>
             public virtual void CheckAborted(Directory dir)
             {
@@ -325,8 +324,8 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Set or clear whether this merge is paused paused (for example
-            ///  <seealso cref="ConcurrentMergeScheduler"/> will pause merges
-            ///  if too many are running).
+            /// <see cref="ConcurrentMergeScheduler"/> will pause merges
+            /// if too many are running).
             /// </summary>
             internal virtual void SetPause(bool paused)
             {
@@ -342,7 +341,7 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Returns true if this merge is paused.
+            /// Returns <c>true</c> if this merge is paused.
             /// </summary>
             /// <seealso cref="SetPause(bool)"/>
             internal virtual bool IsPaused
@@ -358,7 +357,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Returns a readable description of the current merge
-            ///  state.
+            /// state.
             /// </summary>
             public virtual string SegString(Directory dir)
             {
@@ -390,8 +389,8 @@ namespace Lucene.Net.Index
             /// <summary>
             /// Returns the total size in bytes of this merge. Note that this does not
             /// indicate the size of the merged segment, but the
-            /// input total size. this is only set once the merge is
-            /// initialized by IndexWriter.
+            /// input total size. This is only set once the merge is
+            /// initialized by <see cref="IndexWriter"/>.
             /// </summary>
             public virtual long TotalBytesSize
             {
@@ -401,7 +400,6 @@ namespace Lucene.Net.Index
             /// <summary>
             /// Returns the total number of documents that are included with this merge.
             /// Note that this does not indicate the number of documents after the merge.
-            ///
             /// </summary>
             public virtual int TotalNumDocs
             {
@@ -417,7 +415,7 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Return <seealso cref="MergeInfo"/> describing this merge. </summary>
+            /// Return <see cref="Store.MergeInfo"/> describing this merge. </summary>
             public virtual MergeInfo MergeInfo
             {
                 get
@@ -428,9 +426,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// A MergeSpecification instance provides the information
+        /// A <see cref="MergeSpecification"/> instance provides the information
         /// necessary to perform multiple merges.  It simply
-        /// contains a list of <seealso cref="OneMerge"/> instances.
+        /// contains a list of <see cref="OneMerge"/> instances.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -440,12 +438,11 @@ namespace Lucene.Net.Index
             /// <summary>
             /// The subset of segments to be included in the primitive merge.
             /// </summary>
-
             public IList<OneMerge> Merges { get; private set; }
 
             /// <summary>
-            /// Sole constructor.  Use {@link
-            ///  #add(MergePolicy.OneMerge)} to add merges.
+            /// Sole constructor.  Use 
+            /// <see cref="Add(OneMerge)"/> to add merges.
             /// </summary>
             public MergeSpecification()
             {
@@ -453,8 +450,8 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Adds the provided <seealso cref="OneMerge"/> to this
-            ///  specification.
+            /// Adds the provided <see cref="OneMerge"/> to this
+            /// specification.
             /// </summary>
             public virtual void Add(OneMerge merge)
             {
@@ -463,7 +460,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Returns a description of the merges in this
-            ///  specification.
+            /// specification.
             /// </summary>
             public virtual string SegString(Directory dir)
             {
@@ -480,7 +477,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Exception thrown if there are any problems while
-        ///  executing a merge.
+        /// executing a merge.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE
@@ -525,8 +522,8 @@ namespace Lucene.Net.Index
 #endif
 
             /// <summary>
-            /// Returns the <seealso cref="Directory"/> of the index that hit
-            ///  the exception.
+            /// Returns the <see cref="Store.Directory"/> of the index that hit
+            /// the exception.
             /// </summary>
             public virtual Directory Directory
             {
@@ -539,9 +536,9 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Thrown when a merge was explicity aborted because
-        ///  <seealso cref="IndexWriter#close(boolean)"/> was called with
-        ///  <code>false</code>.  Normally this exception is
-        ///  privately caught and suppresed by <seealso cref="IndexWriter"/>.
+        /// <see cref="IndexWriter.Dispose(bool)"/> was called with
+        /// <c>false</c>.  Normally this exception is
+        /// privately caught and suppresed by <see cref="IndexWriter"/>.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE
@@ -550,15 +547,15 @@ namespace Lucene.Net.Index
         public class MergeAbortedException : System.IO.IOException
         {
             /// <summary>
-            /// Create a <seealso cref="MergeAbortedException"/>. </summary>
+            /// Create a <see cref="MergeAbortedException"/>. </summary>
             public MergeAbortedException()
                 : base("merge is aborted")
             {
             }
 
             /// <summary>
-            /// Create a <seealso cref="MergeAbortedException"/> with a
-            ///  specified message.
+            /// Create a <see cref="MergeAbortedException"/> with a
+            /// specified message.
             /// </summary>
             public MergeAbortedException(string message)
                 : base(message)
@@ -579,30 +576,30 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Default ratio for compound file system usage. Set to <tt>1.0</tt>, always use
+        /// Default ratio for compound file system usage. Set to <c>1.0</c>, always use
         /// compound file system.
         /// </summary>
         protected static readonly double DEFAULT_NO_CFS_RATIO = 1.0;
 
         /// <summary>
-        /// Default max segment size in order to use compound file system. Set to <seealso cref="Long#MAX_VALUE"/>.
+        /// Default max segment size in order to use compound file system. Set to <see cref="long.MaxValue"/>.
         /// </summary>
         protected static readonly long DEFAULT_MAX_CFS_SEGMENT_SIZE = long.MaxValue;
 
         /// <summary>
-        /// <seealso cref="IndexWriter"/> that contains this instance. </summary>
+        /// <see cref="IndexWriter"/> that contains this instance. </summary>
         protected SetOnce<IndexWriter> m_writer;
 
         /// <summary>
         /// If the size of the merge segment exceeds this ratio of
-        ///  the total index size then it will remain in
-        ///  non-compound format
+        /// the total index size then it will remain in
+        /// non-compound format
         /// </summary>
         protected double m_noCFSRatio = DEFAULT_NO_CFS_RATIO;
 
         /// <summary>
         /// If the size of the merged segment exceeds
-        ///  this value then it will not use compound file format.
+        /// this value then it will not use compound file format.
         /// </summary>
         protected long m_maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
 
@@ -617,8 +614,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Creates a new merge policy instance. Note that if you intend to use it
-        /// without passing it to <seealso cref="IndexWriter"/>, you should call
-        /// <seealso cref="#setIndexWriter(IndexWriter)"/>.
+        /// without passing it to <see cref="IndexWriter"/>, you should call
+        /// <see cref="SetIndexWriter(IndexWriter)"/>.
         /// </summary>
         public MergePolicy()
             : this(DEFAULT_NO_CFS_RATIO, DEFAULT_MAX_CFS_SEGMENT_SIZE)
@@ -626,9 +623,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Creates a new merge policy instance with default settings for noCFSRatio
-        /// and maxCFSSegmentSize. this ctor should be used by subclasses using different
-        /// defaults than the <seealso cref="MergePolicy"/>
+        /// Creates a new merge policy instance with default settings for <see cref="m_noCFSRatio"/>
+        /// and <see cref="m_maxCFSSegmentSize"/>. This ctor should be used by subclasses using different
+        /// defaults than the <see cref="MergePolicy"/>
         /// </summary>
         protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize)
         {
@@ -638,11 +635,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Sets the <seealso cref="IndexWriter"/> to use by this merge policy. this method is
-        /// allowed to be called only once, and is usually set by IndexWriter. If it is
-        /// called more than once, <seealso cref="AlreadySetException"/> is thrown.
+        /// Sets the <see cref="IndexWriter"/> to use by this merge policy. This method is
+        /// allowed to be called only once, and is usually set by <see cref="IndexWriter"/>. If it is
+        /// called more than once, <see cref="AlreadySetException"/> is thrown.
         /// </summary>
-        /// <seealso cref= SetOnce </seealso>
+        /// <seealso cref="SetOnce{T}"/>
         public virtual void SetIndexWriter(IndexWriter writer)
         {
             this.m_writer.Set(writer);
@@ -651,7 +648,7 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Determine what set of merge operations are now necessary on the index.
         /// <see cref="IndexWriter"/> calls this whenever there is a change to the segments.
-        /// this call is always synchronized on the <see cref="IndexWriter"/> instance so
+        /// This call is always synchronized on the <see cref="IndexWriter"/> instance so
         /// only one thread at a time will call this method. </summary>
         /// <param name="mergeTrigger"> the event that triggered the merge </param>
         /// <param name="segmentInfos">
@@ -661,20 +658,20 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Determine what set of merge operations is necessary in
         /// order to merge to &lt;= the specified segment count. <see cref="IndexWriter"/> calls this when its
-        /// <see cref="IndexWriter.ForceMerge"/> method is called. this call is always
+        /// <see cref="IndexWriter.ForceMerge(int, bool)"/> method is called. This call is always
         /// synchronized on the <see cref="IndexWriter"/> instance so only one thread at a
         /// time will call this method.
         /// </summary>
         /// <param name="segmentInfos">
-        ///          the total set of segments in the index </param>
+        ///          The total set of segments in the index </param>
         /// <param name="maxSegmentCount">
-        ///          requested maximum number of segments in the index (currently this
+        ///          Requested maximum number of segments in the index (currently this
         ///          is always 1) </param>
         /// <param name="segmentsToMerge">
-        ///          contains the specific SegmentInfo instances that must be merged
-        ///          away. this may be a subset of all
-        ///          SegmentInfos.  If the value is True for a
-        ///          given SegmentInfo, that means this segment was
+        ///          Contains the specific <see cref="SegmentInfo"/> instances that must be merged
+        ///          away. This may be a subset of all
+        ///          SegmentInfos.  If the value is <c>true</c> for a
+        ///          given <see cref="SegmentInfo"/>, that means this segment was
         ///          an original segment present in the
         ///          to-be-merged index; else, it was a segment
         ///          produced by a cascaded merge. </param>
@@ -704,11 +701,11 @@ namespace Lucene.Net.Index
         protected abstract void Dispose(bool disposing);
 
         /// <summary>
-        /// Returns true if a new segment (regardless of its origin) should use the
-        /// compound file format. The default implementation returns <code>true</code>
+        /// Returns <c>true</c> if a new segment (regardless of its origin) should use the
+        /// compound file format. The default implementation returns <c>true</c>
         /// iff the size of the given mergedInfo is less or equal to
-        /// <seealso cref="#getMaxCFSSegmentSizeMB()"/> and the size is less or equal to the
-        /// TotalIndexSize * <seealso cref="#getNoCFSRatio()"/> otherwise <code>false</code>.
+        /// <see cref="MaxCFSSegmentSizeMB"/> and the size is less or equal to the
+        /// TotalIndexSize * <see cref="NoCFSRatio"/> otherwise <code>false</code>.
         /// </summary>
         public virtual bool UseCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo)
         {
@@ -734,9 +731,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return the byte size of the provided {@link
-        ///  SegmentCommitInfo}, pro-rated by percentage of
-        ///  non-deleted documents is set.
+        /// Return the byte size of the provided 
+        /// <see cref="SegmentCommitInfo"/>, pro-rated by percentage of
+        /// non-deleted documents is set.
         /// </summary>
         protected virtual long Size(SegmentCommitInfo info)
         {
@@ -748,9 +745,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns true if this single info is already fully merged (has no
-        ///  pending deletes, is in the same dir as the
-        ///  writer, and matches the current compound file setting
+        /// Returns <c>true</c> if this single info is already fully merged (has no
+        /// pending deletes, is in the same dir as the
+        /// writer, and matches the current compound file setting
         /// </summary>
         protected bool IsMerged(SegmentInfos infos, SegmentCommitInfo info)
         {
@@ -766,9 +763,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns current {@code noCFSRatio}.
+        /// Gets or Sets current <see cref="m_noCFSRatio"/>.
+        /// <para/>
+        /// If a merged segment will be more than this percentage
+        /// of the total size of the index, leave the segment as
+        /// non-compound file even if compound file is enabled.
+        /// Set to 1.0 to always use CFS regardless of merge
+        /// size.
         /// </summary>
-        ///  <seealso cref= #setNoCFSRatio  </seealso>
         public double NoCFSRatio
         {
             get
@@ -786,7 +788,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the largest size allowed for a compound file segment </summary>
+        /// Gets or Sets the largest size allowed for a compound file segment.
+        /// <para/>
+        /// If a merged segment will be more than this value,
+        /// leave the segment as
+        /// non-compound file even if compound file is enabled.
+        /// Set this to <see cref="double.PositiveInfinity"/> (default) and <see cref="NoCFSRatio"/> to 1.0
+        /// to always use CFS regardless of merge size.
+        /// </summary>
         public double MaxCFSSegmentSizeMB
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MergeScheduler.cs b/src/Lucene.Net/Index/MergeScheduler.cs
index 224c44b..cdd1225 100644
--- a/src/Lucene.Net/Index/MergeScheduler.cs
+++ b/src/Lucene.Net/Index/MergeScheduler.cs
@@ -20,13 +20,13 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// <p>Expert: <seealso cref="IndexWriter"/> uses an instance
-    ///  implementing this interface to execute the merges
-    ///  selected by a <seealso cref="MergePolicy"/>.  The default
-    ///  MergeScheduler is <seealso cref="ConcurrentMergeScheduler"/>.</p>
-    ///  <p>Implementers of sub-classes should make sure that <seealso cref="#clone()"/>
-    ///  returns an independent instance able to work with any <seealso cref="IndexWriter"/>
-    ///  instance.</p>
+    /// <para>Expert: <see cref="IndexWriter"/> uses an instance
+    /// implementing this interface to execute the merges
+    /// selected by a <see cref="MergePolicy"/>.  The default
+    /// MergeScheduler is <see cref="ConcurrentMergeScheduler"/>.</para>
+    /// <para>Implementers of sub-classes should make sure that <see cref="Clone()"/>
+    /// returns an independent instance able to work with any <see cref="IndexWriter"/>
+    /// instance.</para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -36,7 +36,7 @@ namespace Lucene.Net.Index
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected MergeScheduler()
         {
@@ -50,6 +50,8 @@ namespace Lucene.Net.Index
         ///  </param>
         public abstract void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound);
 
+        /// <summary>
+        /// Dispose this MergeScheduler. </summary>
         public void Dispose()
         {
             Dispose(true);
@@ -57,7 +59,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Close this MergeScheduler. </summary>
+        /// Dispose this MergeScheduler. </summary>
         protected abstract void Dispose(bool disposing);
 
         public virtual IMergeScheduler Clone()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MergeState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MergeState.cs b/src/Lucene.Net/Index/MergeState.cs
index 45d5526..942e2b9 100644
--- a/src/Lucene.Net/Index/MergeState.cs
+++ b/src/Lucene.Net/Index/MergeState.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Holds common state used during segment merging.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -53,7 +53,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Returns the total number of documents, ignoring
-            ///  deletions.
+            /// deletions.
             /// </summary>
             public abstract int MaxDoc { get; }
 
@@ -69,15 +69,15 @@ namespace Lucene.Net.Index
             public abstract int NumDeletedDocs { get; }
 
             /// <summary>
-            /// Returns true if there are any deletions. </summary>
+            /// Returns <c>true</c> if there are any deletions. </summary>
             public virtual bool HasDeletions
             {
                 get { return NumDeletedDocs > 0; }
             }
 
             /// <summary>
-            /// Creates a <seealso cref="DocMap"/> instance appropriate for
-            ///  this reader.
+            /// Creates a <see cref="DocMap"/> instance appropriate for
+            /// this reader.
             /// </summary>
             public static DocMap Build(AtomicReader reader)
             {
@@ -171,11 +171,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// <seealso cref="SegmentInfo"/> of the newly merged segment. </summary>
+        /// <see cref="Index.SegmentInfo"/> of the newly merged segment. </summary>
         public SegmentInfo SegmentInfo { get; private set; }
 
         /// <summary>
-        /// <seealso cref="FieldInfos"/> of the newly merged segment. </summary>
+        /// <see cref="Index.FieldInfos"/> of the newly merged segment. </summary>
         public FieldInfos FieldInfos { get; set; }
 
         /// <summary>
@@ -195,20 +195,20 @@ namespace Lucene.Net.Index
         public int[] DocBase { get; set; }
 
         /// <summary>
-        /// Holds the CheckAbort instance, which is invoked
-        ///  periodically to see if the merge has been aborted.
+        /// Holds the <see cref="Index.CheckAbort"/> instance, which is invoked
+        /// periodically to see if the merge has been aborted.
         /// </summary>
         public CheckAbort CheckAbort { get; private set; }
 
         /// <summary>
-        /// InfoStream for debugging messages. </summary>
+        /// <see cref="Util.InfoStream"/> for debugging messages. </summary>
         public InfoStream InfoStream { get; private set; }
 
         // TODO: get rid of this? it tells you which segments are 'aligned' (e.g. for bulk merging)
         // but is this really so expensive to compute again in different components, versus once in SM?
 
         /// <summary>
-        /// <seealso cref="SegmentReader"/>s that have identical field
+        /// <see cref="SegmentReader"/>s that have identical field
         /// name/number mapping, so their stored fields and term
         /// vectors may be bulk merged.
         /// </summary>
@@ -217,7 +217,7 @@ namespace Lucene.Net.Index
         public SegmentReader[] MatchingSegmentReaders { get; set; }
 
         /// <summary>
-        /// How many <seealso cref="#matchingSegmentReaders"/> are set. </summary>
+        /// How many <see cref="MatchingSegmentReaders"/> are set. </summary>
         public int MatchedCount { get; set; }
 
         /// <summary>
@@ -241,7 +241,7 @@ namespace Lucene.Net.Index
         private readonly Directory dir;
 
         /// <summary>
-        /// Creates a #CheckAbort instance. </summary>
+        /// Creates a <see cref="CheckAbort"/> instance. </summary>
         public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
         {
             this.merge = merge;
@@ -251,9 +251,9 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Records the fact that roughly units amount of work
         /// have been done since this method was last called.
-        /// When adding time-consuming code into SegmentMerger,
+        /// When adding time-consuming code into <see cref="SegmentMerger"/>,
         /// you should test different values for units to ensure
-        /// that the time in between calls to merge.checkAborted
+        /// that the time in between calls to merge.CheckAborted
         /// is up to ~ 1 second.
         /// </summary>
         public virtual void Work(double units)
@@ -267,7 +267,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// If you use this: IW.close(false) cannot abort your merge!
+        /// If you use this: IW.Dispose(false) cannot abort your merge!
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public static readonly CheckAbort NONE = new CheckAbortAnonymousInnerClassHelper();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MergeTrigger.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MergeTrigger.cs b/src/Lucene.Net/Index/MergeTrigger.cs
index 113be31..ea8c80c 100644
--- a/src/Lucene.Net/Index/MergeTrigger.cs
+++ b/src/Lucene.Net/Index/MergeTrigger.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// MergeTrigger is passed to
+    /// <see cref="MergeTrigger"/> is passed to
     /// <see cref="MergePolicy.FindMerges(MergeTrigger, SegmentInfos)"/> to indicate the
     /// event that triggered the merge.
     /// </summary>
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Merge was triggered by a full flush. Full flushes
-        /// can be caused by a commit, NRT reader reopen or a close call on the index writer.
+        /// can be caused by a commit, NRT reader reopen or a <see cref="IndexWriter.Dispose()"/> call on the index writer.
         /// </summary>
         FULL_FLUSH,
 
@@ -46,7 +46,7 @@ namespace Lucene.Net.Index
         MERGE_FINISHED,
 
         /// <summary>
-        /// Merge was triggered by a closing IndexWriter.
+        /// Merge was triggered by a disposing <see cref="IndexWriter"/>.
         /// </summary>
         CLOSING
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiBits.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiBits.cs b/src/Lucene.Net/Index/MultiBits.cs
index 3ac7e67..1894d1f 100644
--- a/src/Lucene.Net/Index/MultiBits.cs
+++ b/src/Lucene.Net/Index/MultiBits.cs
@@ -24,11 +24,11 @@ namespace Lucene.Net.Index
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    /// Concatenates multiple Bits together, on every lookup.
+    /// Concatenates multiple <see cref="IBits"/> together, on every lookup.
     ///
-    /// <p><b>NOTE</b>: this is very costly, as every lookup must
+    /// <para/><b>NOTE</b>: this is very costly, as every lookup must
     /// do a binary search to locate the right sub-reader.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -99,7 +99,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Represents a sub-Bits from
-        /// <seealso cref="MultiBits#getMatchingSub(Lucene.Net.Index.ReaderSlice) getMatchingSub()"/>.
+        /// <see cref="MultiBits.GetMatchingSub(Lucene.Net.Index.ReaderSlice)"/>.
         /// </summary>
         public sealed class SubResult
         {
@@ -108,11 +108,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a sub-Bits matching the provided <code>slice</code>
-        /// <p>
-        /// Because <code>null</code> usually has a special meaning for
-        /// Bits (e.g. no deleted documents), you must check
-        /// <seealso cref="SubResult#matches"/> instead to ensure the sub was
+        /// Returns a sub-Bits matching the provided <paramref name="slice"/>
+        /// <para/>
+        /// Because <c>null</c> usually has a special meaning for
+        /// <see cref="IBits"/> (e.g. no deleted documents), you must check
+        /// <see cref="SubResult.Matches"/> instead to ensure the sub was
         /// actually found.
         /// </summary>
         public SubResult GetMatchingSub(ReaderSlice slice)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiDocValues.cs b/src/Lucene.Net/Index/MultiDocValues.cs
index f97a672..1a542f0 100644
--- a/src/Lucene.Net/Index/MultiDocValues.cs
+++ b/src/Lucene.Net/Index/MultiDocValues.cs
@@ -32,16 +32,17 @@ namespace Lucene.Net.Index
     using TermsEnumWithSlice = Lucene.Net.Index.MultiTermsEnum.TermsEnumWithSlice;
 
     /// <summary>
-    /// A wrapper for CompositeIndexReader providing access to DocValues.
+    /// A wrapper for <see cref="CompositeReader"/> providing access to <see cref="DocValues"/>.
     ///
-    /// <p><b>NOTE</b>: for multi readers, you'll get better
+    /// <para/><b>NOTE</b>: for multi readers, you'll get better
     /// performance by gathering the sub readers using
-    /// <seealso cref="IndexReader#getContext()"/> to get the
+    /// <see cref="IndexReader.Context"/> to get the
     /// atomic leaves and then operate per-AtomicReader,
     /// instead of using this class.
     ///
-    /// <p><b>NOTE</b>: this is very costly.
+    /// <para/><b>NOTE</b>: this is very costly.
     ///
+    /// <para/>
     /// @lucene.experimental
     /// @lucene.internal
     /// </summary>
@@ -57,11 +58,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a NumericDocValues for a reader's norms (potentially merging on-the-fly).
-        /// <p>
-        /// this is a slow way to access normalization values. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getNormValues(String)"/>
-        /// </p>
+        /// Returns a <see cref="NumericDocValues"/> for a reader's norms (potentially merging on-the-fly).
+        /// <para>
+        /// This is a slow way to access normalization values. Instead, access them per-segment
+        /// with <seealso cref="AtomicReader.GetNormValues(string)"/>
+        /// </para>
         /// </summary>
         public static NumericDocValues GetNormValues(IndexReader r, string field)
         {
@@ -128,12 +129,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a NumericDocValues for a reader's docvalues (potentially merging on-the-fly)
-        /// <p>
-        /// this is a slow way to access numeric values. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getNumericDocValues(String)"/>
-        /// </p>
-        ///
+        /// Returns a <see cref="NumericDocValues"/> for a reader's docvalues (potentially merging on-the-fly)
+        /// <para>
+        /// This is a slow way to access numeric values. Instead, access them per-segment
+        /// with <see cref="AtomicReader.GetNumericDocValues(string)"/>
+        /// </para>
         /// </summary>
         public static NumericDocValues GetNumericValues(IndexReader r, string field)
         {
@@ -200,12 +200,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a Bits for a reader's docsWithField (potentially merging on-the-fly)
-        /// <p>
-        /// this is a slow way to access this bitset. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getDocsWithField(String)"/>
-        /// </p>
-        ///
+        /// Returns a <see cref="IBits"/> for a reader's docsWithField (potentially merging on-the-fly)
+        /// <para>
+        /// This is a slow way to access this bitset. Instead, access them per-segment
+        /// with <see cref="AtomicReader.GetDocsWithField(string)"/>
+        /// </para>
         /// </summary>
         public static IBits GetDocsWithField(IndexReader r, string field)
         {
@@ -261,11 +260,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a BinaryDocValues for a reader's docvalues (potentially merging on-the-fly)
-        /// <p>
-        /// this is a slow way to access binary values. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getBinaryDocValues(String)"/>
-        /// </p>
+        /// Returns a <see cref="BinaryDocValues"/> for a reader's docvalues (potentially merging on-the-fly)
+        /// <para>
+        /// This is a slow way to access binary values. Instead, access them per-segment
+        /// with <see cref="AtomicReader.GetBinaryDocValues(string)"/>
+        /// </para>
         /// </summary>
         public static BinaryDocValues GetBinaryValues(IndexReader r, string field)
         {
@@ -333,11 +332,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a SortedDocValues for a reader's docvalues (potentially doing extremely slow things).
-        /// <p>
+        /// Returns a <see cref="SortedDocValues"/> for a reader's docvalues (potentially doing extremely slow things).
+        /// <para>
         /// this is an extremely slow way to access sorted values. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getSortedDocValues(String)"/>
-        /// </p>
+        /// with <see cref="AtomicReader.GetSortedDocValues(string)"/>
+        /// </para>
         /// </summary>
         public static SortedDocValues GetSortedValues(IndexReader r, string field)
         {
@@ -390,11 +389,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a SortedSetDocValues for a reader's docvalues (potentially doing extremely slow things).
-        /// <p>
-        /// this is an extremely slow way to access sorted values. Instead, access them per-segment
-        /// with <seealso cref="AtomicReader#getSortedSetDocValues(String)"/>
-        /// </p>
+        /// Returns a <see cref="SortedSetDocValues"/> for a reader's docvalues (potentially doing extremely slow things).
+        /// <para>
+        /// This is an extremely slow way to access sorted values. Instead, access them per-segment
+        /// with <see cref="AtomicReader.GetSortedSetDocValues(string)"/>
+        /// </para>
         /// </summary>
         public static SortedSetDocValues GetSortedSetValues(IndexReader r, string field)
         {
@@ -469,11 +468,11 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Creates an ordinal map that allows mapping ords to/from a merged
-            /// space from <code>subs</code>. </summary>
+            /// space from <c>subs</c>. </summary>
             /// <param name="owner"> a cache key </param>
-            /// <param name="subs"> TermsEnums that support <seealso cref="TermsEnum#ord()"/>. They need
-            ///             not be dense (e.g. can be FilteredTermsEnums}. </param>
-            /// <exception cref="IOException"> if an I/O error occurred. </exception>
+            /// <param name="subs"> <see cref="TermsEnum"/>s that support <see cref="TermsEnum.Ord"/>. They need
+            ///             not be dense (e.g. can be FilteredTermsEnums). </param>
+            /// <exception cref="System.IO.IOException"> if an I/O error occurred. </exception>
             public OrdinalMap(object owner, TermsEnum[] subs)
             {
                 // create the ordinal mappings by pulling a termsenum over each sub's
@@ -539,7 +538,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Given global ordinal, returns the ordinal of the first segment which contains
-            /// this ordinal (the corresponding to the segment return <seealso cref="#getFirstSegmentNumber"/>).
+            /// this ordinal (the corresponding to the segment return <see cref="GetFirstSegmentNumber(long)"/>).
             /// </summary>
             public virtual long GetFirstSegmentOrd(long globalOrd)
             {
@@ -581,7 +580,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Implements SortedDocValues over n subs, using an OrdinalMap
+        /// Implements <see cref="SortedDocValues"/> over n subs, using an <see cref="OrdinalMap"/>
+        /// <para/>
         /// @lucene.internal
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -590,7 +590,7 @@ namespace Lucene.Net.Index
         public class MultiSortedDocValues : SortedDocValues
         {
             /// <summary>
-            /// docbase for each leaf: parallel with <seealso cref="#values"/> </summary>
+            /// docbase for each leaf: parallel with <see cref="Values"/> </summary>
             [WritableArray]
             [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
             public int[] DocStarts
@@ -610,7 +610,7 @@ namespace Lucene.Net.Index
             private readonly SortedDocValues[] values;
 
             /// <summary>
-            /// ordinal map mapping ords from <code>values</code> to global ord space </summary>
+            /// ordinal map mapping ords from <c>values</c> to global ord space </summary>
             public OrdinalMap Mapping
             {
                 get { return mapping; }
@@ -618,7 +618,7 @@ namespace Lucene.Net.Index
             private readonly OrdinalMap mapping;
 
             /// <summary>
-            /// Creates a new MultiSortedDocValues over <code>values</code> </summary>
+            /// Creates a new <see cref="MultiSortedDocValues"/> over <paramref name="values"/> </summary>
             internal MultiSortedDocValues(SortedDocValues[] values, int[] docStarts, OrdinalMap mapping)
             {
                 Debug.Assert(values.Length == mapping.ordDeltas.Length);
@@ -652,7 +652,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Implements MultiSortedSetDocValues over n subs, using an OrdinalMap
+        /// Implements <see cref="MultiSortedSetDocValues"/> over n subs, using an <see cref="OrdinalMap"/>
+        /// <para/>
         /// @lucene.internal
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -661,7 +662,7 @@ namespace Lucene.Net.Index
         public class MultiSortedSetDocValues : SortedSetDocValues
         {
             /// <summary>
-            /// docbase for each leaf: parallel with <seealso cref="#values"/> </summary>
+            /// docbase for each leaf: parallel with <see cref="Values"/> </summary>
             [WritableArray]
             [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
             public int[] DocStarts
@@ -681,7 +682,7 @@ namespace Lucene.Net.Index
             private readonly SortedSetDocValues[] values;
 
             /// <summary>
-            /// ordinal map mapping ords from <code>values</code> to global ord space </summary>
+            /// ordinal map mapping ords from <c>values</c> to global ord space </summary>
             public OrdinalMap Mapping
             {
                 get { return mapping; } 
@@ -691,7 +692,7 @@ namespace Lucene.Net.Index
             internal int currentSubIndex;
 
             /// <summary>
-            /// Creates a new MultiSortedSetDocValues over <code>values</code> </summary>
+            /// Creates a new <see cref="MultiSortedSetDocValues"/> over <paramref name="values"/> </summary>
             internal MultiSortedSetDocValues(SortedSetDocValues[] values, int[] docStarts, OrdinalMap mapping)
             {
                 Debug.Assert(values.Length == mapping.ordDeltas.Length);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiDocsAndPositionsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiDocsAndPositionsEnum.cs b/src/Lucene.Net/Index/MultiDocsAndPositionsEnum.cs
index bfab6d7..b8092bf 100644
--- a/src/Lucene.Net/Index/MultiDocsAndPositionsEnum.cs
+++ b/src/Lucene.Net/Index/MultiDocsAndPositionsEnum.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Exposes flex API, merged from flex API of sub-segments.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -52,8 +52,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns {@code true} if this instance can be reused by
-        ///  the provided <seealso cref="MultiTermsEnum"/>.
+        /// Returns <c>true</c> if this instance can be reused by
+        /// the provided <see cref="MultiTermsEnum"/>.
         /// </summary>
         public bool CanReuse(MultiTermsEnum parent)
         {
@@ -61,7 +61,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Rre-use and reset this instance on the provided slices. </summary>
+        /// Re-use and reset this instance on the provided slices. </summary>
         public MultiDocsAndPositionsEnum Reset(EnumWithSlice[] subs, int numSubs)
         {
             this.numSubs = numSubs;
@@ -80,7 +80,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// How many sub-readers we are merging. </summary>
-        ///  <seealso cref= #getSubs  </seealso>
+        /// <see cref="Subs"/>
         public int NumSubs
         {
             get
@@ -203,8 +203,8 @@ namespace Lucene.Net.Index
 
         // TODO: implement bulk read more efficiently than super
         /// <summary>
-        /// Holds a <seealso cref="DocsAndPositionsEnum"/> along with the
-        ///  corresponding <seealso cref="ReaderSlice"/>.
+        /// Holds a <see cref="Index.DocsAndPositionsEnum"/> along with the
+        /// corresponding <see cref="ReaderSlice"/>.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -216,12 +216,12 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// <seealso cref="DocsAndPositionsEnum"/> for this sub-reader. </summary>
+            /// <see cref="Index.DocsAndPositionsEnum"/> for this sub-reader. </summary>
             public DocsAndPositionsEnum DocsAndPositionsEnum { get; internal set; } // LUCENENET NOTE: Made setter internal because ctor is internal
 
             /// <summary>
-            /// <seealso cref="ReaderSlice"/> describing how this sub-reader
-            ///  fits into the composite reader.
+            /// <see cref="ReaderSlice"/> describing how this sub-reader
+            /// fits into the composite reader.
             /// </summary>
             public ReaderSlice Slice { get; internal set; } // LUCENENET NOTE: Made setter internal because ctor is internal
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiDocsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiDocsEnum.cs b/src/Lucene.Net/Index/MultiDocsEnum.cs
index 0e34a7c..d1e2e8a 100644
--- a/src/Lucene.Net/Index/MultiDocsEnum.cs
+++ b/src/Lucene.Net/Index/MultiDocsEnum.cs
@@ -23,9 +23,9 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Exposes <seealso cref="DocsEnum"/>, merged from <seealso cref="DocsEnum"/>
+    /// Exposes <see cref="DocsEnum"/>, merged from <see cref="DocsEnum"/>
     /// API of sub-segments.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -44,7 +44,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Sole constructor </summary>
-        /// <param name="parent"> The <seealso cref="MultiTermsEnum"/> that created us. </param>
+        /// <param name="parent"> The <see cref="MultiTermsEnum"/> that created us. </param>
         /// <param name="subReaderCount"> How many sub-readers are being merged.  </param>
         public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount)
         {
@@ -70,8 +70,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns {@code true} if this instance can be reused by
-        ///  the provided <seealso cref="MultiTermsEnum"/>.
+        /// Returns <c>true</c> if this instance can be reused by
+        /// the provided <see cref="MultiTermsEnum"/>.
         /// </summary>
         public bool CanReuse(MultiTermsEnum parent)
         {
@@ -80,7 +80,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// How many sub-readers we are merging. </summary>
-        ///  <seealso cref= #getSubs  </seealso>
+        /// <seealso cref="Subs"/>
         public int NumSubs
         {
             get
@@ -189,8 +189,8 @@ namespace Lucene.Net.Index
 
         // TODO: implement bulk read more efficiently than super
         /// <summary>
-        /// Holds a <seealso cref="DocsEnum"/> along with the
-        ///  corresponding <seealso cref="ReaderSlice"/>.
+        /// Holds a <see cref="Index.DocsEnum"/> along with the
+        /// corresponding <see cref="ReaderSlice"/>.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -202,12 +202,12 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// <seealso cref="DocsEnum"/> of this sub-reader. </summary>
+            /// <see cref="Index.DocsEnum"/> of this sub-reader. </summary>
             public DocsEnum DocsEnum { get; internal set; } // LUCENENET NOTE: Made setter internal because ctor is internal
 
             /// <summary>
-            /// <seealso cref="ReaderSlice"/> describing how this sub-reader
-            ///  fits into the composite reader.
+            /// <see cref="ReaderSlice"/> describing how this sub-reader
+            /// fits into the composite reader.
             /// </summary>
             public ReaderSlice Slice { get; internal set; } // LUCENENET NOTE: Made setter internal because ctor is internal
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiFields.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiFields.cs b/src/Lucene.Net/Index/MultiFields.cs
index 045200b..b5e750c 100644
--- a/src/Lucene.Net/Index/MultiFields.cs
+++ b/src/Lucene.Net/Index/MultiFields.cs
@@ -29,17 +29,17 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Exposes flex API, merged from flex API of sub-segments.
-    /// this is useful when you're interacting with an {@link
-    /// IndexReader} implementation that consists of sequential
-    /// sub-readers (eg <seealso cref="DirectoryReader"/> or {@link
-    /// MultiReader}).
+    /// This is useful when you're interacting with an 
+    /// <see cref="IndexReader"/> implementation that consists of sequential
+    /// sub-readers (eg <see cref="DirectoryReader"/> or 
+    /// <see cref="MultiReader"/>).
     ///
-    /// <p><b>NOTE</b>: for composite readers, you'll get better
+    /// <para/><b>NOTE</b>: for composite readers, you'll get better
     /// performance by gathering the sub readers using
-    /// <seealso cref="IndexReader#getContext()"/> to get the
+    /// <see cref="IndexReader.Context"/> to get the
     /// atomic leaves and then operate per-AtomicReader,
     /// instead of using this class.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -52,14 +52,14 @@ namespace Lucene.Net.Index
         private readonly IDictionary<string, Terms> terms = new ConcurrentDictionary<string, Terms>();
 
         /// <summary>
-        /// Returns a single <seealso cref="Fields"/> instance for this
-        ///  reader, merging fields/terms/docs/positions on the
-        ///  fly.  this method will return null if the reader
-        ///  has no postings.
+        /// Returns a single <see cref="Fields"/> instance for this
+        /// reader, merging fields/terms/docs/positions on the
+        /// fly.  This method will return <c>null</c> if the reader
+        /// has no postings.
         ///
-        ///  <p><b>NOTE</b>: this is a slow way to access postings.
-        ///  It's better to get the sub-readers and iterate through them
-        ///  yourself.
+        /// <para/><b>NOTE</b>: this is a slow way to access postings.
+        /// It's better to get the sub-readers and iterate through them
+        /// yourself.
         /// </summary>
         public static Fields GetFields(IndexReader reader)
         {
@@ -103,15 +103,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a single <seealso cref="IBits"/> instance for this
-        ///  reader, merging live Documents on the
-        ///  fly.  this method will return null if the reader
-        ///  has no deletions.
+        /// Returns a single <see cref="IBits"/> instance for this
+        /// reader, merging live Documents on the
+        /// fly.  This method will return <c>null</c> if the reader
+        /// has no deletions.
         ///
-        ///  <p><b>NOTE</b>: this is a very slow way to access live docs.
-        ///  For example, each Bits access will require a binary search.
-        ///  It's better to get the sub-readers and iterate through them
-        ///  yourself.
+        /// <para/><b>NOTE</b>: this is a very slow way to access live docs.
+        /// For example, each <see cref="IBits"/> access will require a binary search.
+        /// It's better to get the sub-readers and iterate through them
+        /// yourself.
         /// </summary>
         public static IBits GetLiveDocs(IndexReader reader)
         {
@@ -143,7 +143,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        ///  this method may return null if the field does not exist. </summary>
+        /// this method may return <c>null</c> if the field does not exist. </summary>
         public static Terms GetTerms(IndexReader r, string field)
         {
             Fields fields = GetFields(r);
@@ -158,9 +158,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <seealso cref="DocsEnum"/> for the specified field &
-        ///  term.  this will return null if the field or term does
-        ///  not exist.
+        /// Returns <see cref="DocsEnum"/> for the specified field &amp;
+        /// term.  This will return <c>null</c> if the field or term does
+        /// not exist.
         /// </summary>
         public static DocsEnum GetTermDocsEnum(IndexReader r, IBits liveDocs, string field, BytesRef term)
         {
@@ -168,12 +168,12 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <seealso cref="DocsEnum"/> for the specified field &
-        ///  term, with control over whether freqs are required.
-        ///  Some codecs may be able to optimize their
-        ///  implementation when freqs are not required.  this will
-        ///  return null if the field or term does not exist.  See {@link
-        ///  TermsEnum#docs(Bits,DocsEnum,int)}.
+        /// Returns <see cref="DocsEnum"/> for the specified field &amp;
+        /// term, with control over whether freqs are required.
+        /// Some codecs may be able to optimize their
+        /// implementation when freqs are not required.  This will
+        /// return <c>null</c> if the field or term does not exist.  See
+        /// <see cref="TermsEnum.Docs(IBits, DocsEnum, DocsFlags)"/>.
         /// </summary>
         public static DocsEnum GetTermDocsEnum(IndexReader r, IBits liveDocs, string field, BytesRef term, DocsFlags flags)
         {
@@ -192,23 +192,23 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns <seealso cref="DocsAndPositionsEnum"/> for the specified
-        ///  field & term.  this will return null if the field or
-        ///  term does not exist or positions were not indexed. </summary>
-        ///  <seealso cref= #getTermPositionsEnum(IndexReader, Bits, String, BytesRef, int)  </seealso>
+        /// Returns <see cref="DocsAndPositionsEnum"/> for the specified
+        /// field &amp; term.  This will return <c>null</c> if the field or
+        /// term does not exist or positions were not indexed. </summary>
+        /// <seealso cref="GetTermPositionsEnum(IndexReader, IBits, string, BytesRef, DocsAndPositionsFlags)"/>
         public static DocsAndPositionsEnum GetTermPositionsEnum(IndexReader r, IBits liveDocs, string field, BytesRef term)
         {
             return GetTermPositionsEnum(r, liveDocs, field, term, DocsAndPositionsFlags.OFFSETS | DocsAndPositionsFlags.PAYLOADS);
         }
 
         /// <summary>
-        /// Returns <seealso cref="DocsAndPositionsEnum"/> for the specified
-        ///  field & term, with control over whether offsets and payloads are
-        ///  required.  Some codecs may be able to optimize
-        ///  their implementation when offsets and/or payloads are not
-        ///  required. this will return null if the field or term does not
-        ///  exist or positions were not indexed. See {@link
-        ///  TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}.
+        /// Returns <see cref="DocsAndPositionsEnum"/> for the specified
+        /// field &amp; term, with control over whether offsets and payloads are
+        /// required.  Some codecs may be able to optimize
+        /// their implementation when offsets and/or payloads are not
+        /// required. This will return <c>null</c> if the field or term does not
+        /// exist or positions were not indexed. See 
+        /// <see cref="TermsEnum.DocsAndPositions(IBits, DocsAndPositionsEnum, DocsAndPositionsFlags)"/>.
         /// </summary>
         public static DocsAndPositionsEnum GetTermPositionsEnum(IndexReader r, IBits liveDocs, string field, BytesRef term, DocsAndPositionsFlags flags)
         {
@@ -227,7 +227,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: construct a new MultiFields instance directly.
+        /// Expert: construct a new <see cref="MultiFields"/> instance directly.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         // TODO: why is this public?
@@ -291,13 +292,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Call this to get the (merged) FieldInfos for a
-        ///  composite reader.
-        ///  <p>
-        ///  NOTE: the returned field numbers will likely not
-        ///  correspond to the actual field numbers in the underlying
-        ///  readers, and codec metadata (<seealso cref="FieldInfo#getAttribute(String)"/>
-        ///  will be unavailable.
+        /// Call this to get the (merged) <see cref="FieldInfos"/> for a
+        /// composite reader.
+        /// <para/>
+        /// NOTE: the returned field numbers will likely not
+        /// correspond to the actual field numbers in the underlying
+        /// readers, and codec metadata (<see cref="FieldInfo.GetAttribute(string)"/>)
+        /// will be unavailable.
         /// </summary>
         public static FieldInfos GetMergedFieldInfos(IndexReader reader)
         {
@@ -310,13 +311,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Call this to get the (merged) FieldInfos representing the
-        ///  set of indexed fields <b>only</b> for a composite reader.
-        ///  <p>
-        ///  NOTE: the returned field numbers will likely not
-        ///  correspond to the actual field numbers in the underlying
-        ///  readers, and codec metadata (<seealso cref="FieldInfo#getAttribute(String)"/>
-        ///  will be unavailable.
+        /// Call this to get the (merged) <see cref="FieldInfos"/> representing the
+        /// set of indexed fields <b>only</b> for a composite reader.
+        /// <para/>
+        /// NOTE: the returned field numbers will likely not
+        /// correspond to the actual field numbers in the underlying
+        /// readers, and codec metadata (<see cref="FieldInfo.GetAttribute(string)"/>)
+        /// will be unavailable.
         /// </summary>
         public static ICollection<string> GetIndexedFields(IndexReader reader)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiReader.cs b/src/Lucene.Net/Index/MultiReader.cs
index 2be292a..9cafc9c 100644
--- a/src/Lucene.Net/Index/MultiReader.cs
+++ b/src/Lucene.Net/Index/MultiReader.cs
@@ -21,22 +21,22 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// A <seealso cref="CompositeReader"/> which reads multiple indexes, appending
-    ///  their content. It can be used to create a view on several
-    ///  sub-readers (like <seealso cref="DirectoryReader"/>) and execute searches on it.
+    /// A <see cref="CompositeReader"/> which reads multiple indexes, appending
+    /// their content. It can be used to create a view on several
+    /// sub-readers (like <see cref="DirectoryReader"/>) and execute searches on it.
     ///
-    /// <p> For efficiency, in this API documents are often referred to via
+    /// <para/> For efficiency, in this API documents are often referred to via
     /// <i>document numbers</i>, non-negative integers which each name a unique
     /// document in the index.  These document numbers are ephemeral -- they may change
     /// as documents are added to and deleted from an index.  Clients should thus not
     /// rely on a given document having the same number between sessions.
     ///
-    /// <p><a name="thread-safety"></a><p><b>NOTE</b>: {@link
-    /// IndexReader} instances are completely thread
+    /// <para/><a name="thread-safety"></a><b>NOTE</b>: 
+    /// <see cref="IndexReader"/> instances are completely thread
     /// safe, meaning multiple threads can call any of its methods,
     /// concurrently.  If your application requires external
     /// synchronization, you should <b>not</b> synchronize on the
-    /// <code>IndexReader</code> instance; use your own
+    /// <see cref="IndexReader"/> instance; use your own
     /// (non-Lucene) objects instead.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -47,8 +47,8 @@ namespace Lucene.Net.Index
         private readonly bool closeSubReaders;
 
         /// <summary>
-        /// <p>Construct a MultiReader aggregating the named set of (sub)readers.
-        /// <p>Note that all subreaders are closed if this Multireader is closed.</p> </summary>
+        /// <para>Construct a <see cref="MultiReader"/> aggregating the named set of (sub)readers.</para>
+        /// <para>Note that all subreaders are closed if this Multireader is closed.</para> </summary>
         /// <param name="subReaders"> set of (sub)readers </param>
         public MultiReader(params IndexReader[] subReaders)
             : this(subReaders, true)
@@ -56,10 +56,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// <p>Construct a MultiReader aggregating the named set of (sub)readers. </summary>
+        /// Construct a <see cref="MultiReader"/> aggregating the named set of (sub)readers. </summary>
         /// <param name="subReaders"> set of (sub)readers; this array will be cloned. </param>
-        /// <param name="closeSubReaders"> indicates whether the subreaders should be closed
-        /// when this MultiReader is closed </param>
+        /// <param name="closeSubReaders"> indicates whether the subreaders should be disposed
+        /// when this <see cref="MultiReader"/> is disposed </param>
         public MultiReader(IndexReader[] subReaders, bool closeSubReaders)
             : base((IndexReader[])subReaders.Clone())
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiTerms.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiTerms.cs b/src/Lucene.Net/Index/MultiTerms.cs
index 6854182..d6370a7 100644
--- a/src/Lucene.Net/Index/MultiTerms.cs
+++ b/src/Lucene.Net/Index/MultiTerms.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Index
     /// <summary>
     /// Exposes flex API, merged from flex API of
     /// sub-segments.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -47,9 +47,9 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Sole constructor.
         /// </summary>
-        /// <param name="subs"> The <seealso cref="Terms"/> instances of all sub-readers. </param>
-        /// <param name="subSlices"> A parallel array (matching {@code
-        ///        subs}) describing the sub-reader slices. </param>
+        /// <param name="subs"> The <see cref="Terms"/> instances of all sub-readers. </param>
+        /// <param name="subSlices"> A parallel array (matching 
+        ///        <paramref name="subs"/>) describing the sub-reader slices. </param>
         public MultiTerms(Terms[] subs, ReaderSlice[] subSlices)
         {
             this.subs = subs;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/MultiTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MultiTermsEnum.cs b/src/Lucene.Net/Index/MultiTermsEnum.cs
index 813897a..7fda1de 100644
--- a/src/Lucene.Net/Index/MultiTermsEnum.cs
+++ b/src/Lucene.Net/Index/MultiTermsEnum.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Exposes <seealso cref="TermsEnum"/> API, merged from <seealso cref="TermsEnum"/> API of sub-segments.
-    /// this does a merge sort, by term text, of the sub-readers.
-    ///
+    /// Exposes <see cref="TermsEnum"/> API, merged from <see cref="TermsEnum"/> API of sub-segments.
+    /// This does a merge sort, by term text, of the sub-readers.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -70,8 +70,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns how many sub-reader slices contain the current </summary>
-        ///  term.  <seealso cref= #getMatchArray  </seealso>
+        /// Returns how many sub-reader slices contain the current 
+        /// term.</summary> 
+        /// <seealso cref="MatchArray"/>
         public int MatchCount
         {
             get
@@ -91,8 +92,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Sole constructor. </summary>
-        ///  <param name="slices"> Which sub-reader slices we should
-        ///  merge.  </param>
+        /// <param name="slices"> Which sub-reader slices we should
+        /// merge.</param>
         public MultiTermsEnum(ReaderSlice[] slices)
         {
             queue = new TermMergeQueue(slices.Length);
@@ -125,8 +126,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// The terms array must be newly created TermsEnum, ie
-        ///  <seealso cref="TermsEnum#next"/> has not yet been called.
+        /// The terms array must be newly created <see cref="TermsEnum"/>, ie
+        /// <see cref="TermsEnum.Next()"/> has not yet been called.
         /// </summary>
         public TermsEnum Reset(TermsEnumIndex[] termsEnumsIndex)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NoDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NoDeletionPolicy.cs b/src/Lucene.Net/Index/NoDeletionPolicy.cs
index e36a523..865abeb 100644
--- a/src/Lucene.Net/Index/NoDeletionPolicy.cs
+++ b/src/Lucene.Net/Index/NoDeletionPolicy.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// An <seealso cref="IndexDeletionPolicy"/> which keeps all index commits around, never
-    /// deleting them. this class is a singleton and can be accessed by referencing
-    /// <seealso cref="#INSTANCE"/>.
+    /// An <see cref="IndexDeletionPolicy"/> which keeps all index commits around, never
+    /// deleting them. This class is a singleton and can be accessed by referencing
+    /// <see cref="INSTANCE"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NoMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NoMergePolicy.cs b/src/Lucene.Net/Index/NoMergePolicy.cs
index beb702c..d137a22 100644
--- a/src/Lucene.Net/Index/NoMergePolicy.cs
+++ b/src/Lucene.Net/Index/NoMergePolicy.cs
@@ -21,11 +21,11 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// A <seealso cref="MergePolicy"/> which never returns merges to execute (hence it's
+    /// A <see cref="MergePolicy"/> which never returns merges to execute (hence it's
     /// name). It is also a singleton and can be accessed through
-    /// <seealso cref="NoMergePolicy#NO_COMPOUND_FILES"/> if you want to indicate the index
-    /// does not use compound files, or through <seealso cref="NoMergePolicy#COMPOUND_FILES"/>
-    /// otherwise. Use it if you want to prevent an <seealso cref="IndexWriter"/> from ever
+    /// <see cref="NoMergePolicy.NO_COMPOUND_FILES"/> if you want to indicate the index
+    /// does not use compound files, or through <see cref="NoMergePolicy.COMPOUND_FILES"/>
+    /// otherwise. Use it if you want to prevent an <see cref="IndexWriter"/> from ever
     /// executing merges, without going through the hassle of tweaking a merge
     /// policy's settings to achieve that, such as changing its merge factor.
     /// </summary>
@@ -35,13 +35,13 @@ namespace Lucene.Net.Index
     public sealed class NoMergePolicy : MergePolicy
     {
         /// <summary>
-        /// A singleton <seealso cref="NoMergePolicy"/> which indicates the index does not use
+        /// A singleton <see cref="NoMergePolicy"/> which indicates the index does not use
         /// compound files.
         /// </summary>
         public static readonly MergePolicy NO_COMPOUND_FILES = new NoMergePolicy(false);
 
         /// <summary>
-        /// A singleton <seealso cref="NoMergePolicy"/> which indicates the index uses compound
+        /// A singleton <see cref="NoMergePolicy"/> which indicates the index uses compound
         /// files.
         /// </summary>
         public static readonly MergePolicy COMPOUND_FILES = new NoMergePolicy(true);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NoMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NoMergeScheduler.cs b/src/Lucene.Net/Index/NoMergeScheduler.cs
index 020e8df..4283b78 100644
--- a/src/Lucene.Net/Index/NoMergeScheduler.cs
+++ b/src/Lucene.Net/Index/NoMergeScheduler.cs
@@ -23,10 +23,10 @@ namespace Lucene.Net.Index
     /// A <see cref="MergeScheduler"/> which never executes any merges. It is also a
     /// singleton and can be accessed through <see cref="NoMergeScheduler.INSTANCE"/>. Use
     /// it if you want to prevent an <see cref="IndexWriter"/> from ever executing merges,
-    /// regardless of the <seealso cref="MergePolicy"/> used. Note that you can achieve the
-    /// same thing by using <seealso cref="NoMergePolicy"/>, however with
-    /// <seealso cref="NoMergeScheduler"/> you also ensure that no unnecessary code of any
-    /// <seealso cref="MergeScheduler"/> implementation is ever executed. Hence it is
+    /// regardless of the <see cref="MergePolicy"/> used. Note that you can achieve the
+    /// same thing by using <see cref="NoMergePolicy"/>, however with
+    /// <see cref="NoMergeScheduler"/> you also ensure that no unnecessary code of any
+    /// <see cref="MergeScheduler"/> implementation is ever executed. Hence it is
     /// recommended to use both if you want to disable merges from ever happening.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -35,7 +35,7 @@ namespace Lucene.Net.Index
     public sealed class NoMergeScheduler : MergeScheduler
     {
         /// <summary>
-        /// The single instance of <seealso cref="NoMergeScheduler"/> </summary>
+        /// The single instance of <see cref="NoMergeScheduler"/> </summary>
         public static readonly MergeScheduler INSTANCE = new NoMergeScheduler();
 
         private NoMergeScheduler()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NormsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NormsConsumer.cs b/src/Lucene.Net/Index/NormsConsumer.cs
index e0437e0..444cd6c 100644
--- a/src/Lucene.Net/Index/NormsConsumer.cs
+++ b/src/Lucene.Net/Index/NormsConsumer.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Writes norms.  Each thread X field accumulates the norms
-    ///  for the doc/fields it saw, then the flush method below
-    ///  merges all of these together into a single _X.nrm file.
+    /// for the doc/fields it saw, then the flush method below
+    /// merges all of these together into a single _X.nrm file.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NumericDocValuesFieldUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NumericDocValuesFieldUpdates.cs b/src/Lucene.Net/Index/NumericDocValuesFieldUpdates.cs
index 3b627d9..7d09541 100644
--- a/src/Lucene.Net/Index/NumericDocValuesFieldUpdates.cs
+++ b/src/Lucene.Net/Index/NumericDocValuesFieldUpdates.cs
@@ -30,9 +30,9 @@ namespace Lucene.Net.Index
     using PagedMutable = Lucene.Net.Util.Packed.PagedMutable;
 
     /// <summary>
-    /// A <seealso cref="DocValuesFieldUpdates"/> which holds updates of documents, of a single
-    /// <seealso cref="NumericDocValuesField"/>.
-    ///
+    /// A <see cref="DocValuesFieldUpdates"/> which holds updates of documents, of a single
+    /// <see cref="NumericDocValuesField"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/NumericDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/NumericDocValuesWriter.cs b/src/Lucene.Net/Index/NumericDocValuesWriter.cs
index fdfbccc..ae9e1e2 100644
--- a/src/Lucene.Net/Index/NumericDocValuesWriter.cs
+++ b/src/Lucene.Net/Index/NumericDocValuesWriter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Buffers up pending long per doc, then flushes when
-    ///  segment flushes.
+    /// segment flushes.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -134,96 +134,8 @@ namespace Lucene.Net.Index
             }
         }
 
-        /*
-	  private class IterableAnonymousInnerClassHelper : IEnumerable<Number>
-	  {
-		  private readonly NumericDocValuesWriter OuterInstance;
-
-		  private int MaxDoc;
-
-		  public IterableAnonymousInnerClassHelper(NumericDocValuesWriter outerInstance, int maxDoc)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.MaxDoc = maxDoc;
-		  }
-
-		  public virtual IEnumerator<Number> GetEnumerator()
-		  {
-			return new NumericIterator(OuterInstance, MaxDoc);
-		  }
-	  }*/
-
         public override void Abort()
         {
         }
-
-        /*
-	  // iterates over the values we have in ram
-	  private class NumericIterator : IEnumerator<Number>
-	  {
-		  internal bool InstanceFieldsInitialized = false;
-
-		  internal virtual void InitializeInstanceFields()
-		  {
-			  Iter = OuterInstance.Pending.Iterator();
-			  Size = (int)OuterInstance.Pending.Size();
-		  }
-
-		  private readonly NumericDocValuesWriter OuterInstance;
-
-		internal AppendingDeltaPackedLongBuffer.Iterator Iter;
-		internal int Size;
-		internal readonly int MaxDoc;
-		internal int Upto;
-
-		internal NumericIterator(NumericDocValuesWriter outerInstance, int maxDoc)
-		{
-			this.OuterInstance = outerInstance;
-
-			if (!InstanceFieldsInitialized)
-			{
-				InitializeInstanceFields();
-				InstanceFieldsInitialized = true;
-			}
-		  this.MaxDoc = maxDoc;
-		}
-
-		public override bool HasNext()
-		{
-		  return Upto < MaxDoc;
-		}
-
-		public override Number Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new NoSuchElementException();
-		  }
-		  long? value;
-		  if (Upto < Size)
-		  {
-			long v = Iter.next();
-            if (OuterInstance.DocsWithField == null || OuterInstance.DocsWithField.Get(Upto))
-			{
-			  value = v;
-			}
-			else
-			{
-			  value = null;
-			}
-		  }
-		  else
-		  {
-              value = OuterInstance.DocsWithField != null ? null : MISSING;
-		  }
-		  Upto++;
-		  return value;
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }*/
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/OrdTermState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/OrdTermState.cs b/src/Lucene.Net/Index/OrdTermState.cs
index 7a9a2ed..4383d1c 100644
--- a/src/Lucene.Net/Index/OrdTermState.cs
+++ b/src/Lucene.Net/Index/OrdTermState.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// An ordinal based <seealso cref="TermState"/>
-    ///
+    /// An ordinal based <see cref="TermState"/>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -32,7 +32,7 @@ namespace Lucene.Net.Index
     {
         /// <summary>
         /// Term ordinal, i.e. it's position in the full list of
-        ///  sorted terms.
+        /// sorted terms.
         /// </summary>
         public long Ord { get; set; }
 


[8/9] lucenenet git commit: Moved Lucene.Net.Collections > Lucene.Net.Support.Collections

Posted by ni...@apache.org.
Moved Lucene.Net.Collections > Lucene.Net.Support.Collections


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/ad0d5fb4
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/ad0d5fb4
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/ad0d5fb4

Branch: refs/heads/master
Commit: ad0d5fb4fc281008795b244b157a11bf2939ed72
Parents: 646db0c
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Jun 2 04:27:49 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Jun 2 04:38:02 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Miscellaneous/CapitalizationFilterFactory.cs         | 3 ++-
 .../Analysis/Util/AbstractAnalysisFactory.cs                      | 1 +
 .../Analysis/Wikipedia/WikipediaTokenizer.cs                      | 1 +
 .../Analysis/Wikipedia/WikipediaTokenizerFactory.cs               | 1 +
 src/Lucene.Net.Join/ToChildBlockJoinQuery.cs                      | 1 +
 src/Lucene.Net.Join/ToParentBlockJoinQuery.cs                     | 1 +
 src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs   | 1 +
 .../Analysis/Core/TestTypeTokenFilter.cs                          | 1 +
 src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs       | 1 +
 src/Lucene.Net/Index/MergePolicy.cs                               | 1 +
 src/Lucene.Net/Index/SegmentInfo.cs                               | 1 +
 src/Lucene.Net/Search/ConstantScoreQuery.cs                       | 1 +
 src/Lucene.Net/Support/Collections.cs                             | 2 +-
 13 files changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
index 3b61583..2718858 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilterFactory.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Analysis.Util;
+using Lucene.Net.Support;
+using Lucene.Net.Analysis.Util;
 using System.Collections.Generic;
 using System.Globalization;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index 6761686..e4b5133 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -1,4 +1,5 @@
 using Lucene.Net.Analysis.Core;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;
 using System.Collections.Generic;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
index 46679ba..cec77e2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizer.cs
@@ -1,4 +1,5 @@
 using Lucene.Net.Analysis.TokenAttributes;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System.Collections.Generic;
 using System.IO;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
index f23fe28..d9bc313 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Wikipedia/WikipediaTokenizerFactory.cs
@@ -1,4 +1,5 @@
 using Lucene.Net.Analysis.Util;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System.Collections.Generic;
 using System.IO;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Join/ToChildBlockJoinQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Join/ToChildBlockJoinQuery.cs b/src/Lucene.Net.Join/ToChildBlockJoinQuery.cs
index 7d2bdcd..08cadf7 100644
--- a/src/Lucene.Net.Join/ToChildBlockJoinQuery.cs
+++ b/src/Lucene.Net.Join/ToChildBlockJoinQuery.cs
@@ -1,5 +1,6 @@
 using Lucene.Net.Index;
 using Lucene.Net.Search;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;
 using System.Collections.Generic;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs b/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
index 3dea9b6..ec43f33 100644
--- a/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
+++ b/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
@@ -1,5 +1,6 @@
 using Lucene.Net.Index;
 using Lucene.Net.Search;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;
 using System.Collections.Generic;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
index 532155c..bb4f4d8 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
@@ -1,6 +1,7 @@
 using Lucene.Net.Index;
 using Lucene.Net.Queries.Function.DocValues;
 using Lucene.Net.Search;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using Lucene.Net.Util.Mutable;
 using System;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
index 8ba07cf..f51e84f 100644
--- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
+++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Core/TestTypeTokenFilter.cs
@@ -1,5 +1,6 @@
 using Lucene.Net.Analysis.Standard;
 using Lucene.Net.Analysis.TokenAttributes;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using NUnit.Framework;
 using System;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
index 9bf4ddc..da6a314 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
@@ -1,3 +1,4 @@
+using Lucene.Net.Support;
 using System;
 using System.Collections.Generic;
 using System.Diagnostics;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net/Index/MergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/MergePolicy.cs b/src/Lucene.Net/Index/MergePolicy.cs
index 20c0949..7909639 100644
--- a/src/Lucene.Net/Index/MergePolicy.cs
+++ b/src/Lucene.Net/Index/MergePolicy.cs
@@ -1,3 +1,4 @@
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;
 using System.Collections.Generic;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net/Index/SegmentInfo.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentInfo.cs b/src/Lucene.Net/Index/SegmentInfo.cs
index 1109130..b7f25df 100644
--- a/src/Lucene.Net/Index/SegmentInfo.cs
+++ b/src/Lucene.Net/Index/SegmentInfo.cs
@@ -1,3 +1,4 @@
+using Lucene.Net.Support;
 using System;
 using System.Collections.Generic;
 using System.Diagnostics;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net/Search/ConstantScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ConstantScoreQuery.cs b/src/Lucene.Net/Search/ConstantScoreQuery.cs
index 360329c..7ff56c2 100644
--- a/src/Lucene.Net/Search/ConstantScoreQuery.cs
+++ b/src/Lucene.Net/Search/ConstantScoreQuery.cs
@@ -1,3 +1,4 @@
+using Lucene.Net.Support;
 using System;
 using System.Collections.Generic;
 using System.Diagnostics;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ad0d5fb4/src/Lucene.Net/Support/Collections.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Collections.cs b/src/Lucene.Net/Support/Collections.cs
index 6f03419..dcafc25 100644
--- a/src/Lucene.Net/Support/Collections.cs
+++ b/src/Lucene.Net/Support/Collections.cs
@@ -8,7 +8,7 @@ using System.Reflection;
 using System.Runtime.Serialization;
 using System.Text;
 
-namespace Lucene.Net
+namespace Lucene.Net.Support
 {
     /*
 	 * Licensed to the Apache Software Foundation (ASF) under one or more


[9/9] lucenenet git commit: Moved Lucene.Net.IcuBreakIterator > Lucene.Net.Support.IcuBreakIterator

Posted by ni...@apache.org.
Moved Lucene.Net.IcuBreakIterator > Lucene.Net.Support.IcuBreakIterator


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/f43d2326
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/f43d2326
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/f43d2326

Branch: refs/heads/master
Commit: f43d23261125354345f43f2c82ab03bf228a366b
Parents: ad0d5fb
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Jun 2 04:39:07 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Jun 2 04:39:07 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.ICU/Support/IcuBreakIterator.cs | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f43d2326/src/Lucene.Net.ICU/Support/IcuBreakIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.ICU/Support/IcuBreakIterator.cs b/src/Lucene.Net.ICU/Support/IcuBreakIterator.cs
index 79819ed..49b8bd3 100644
--- a/src/Lucene.Net.ICU/Support/IcuBreakIterator.cs
+++ b/src/Lucene.Net.ICU/Support/IcuBreakIterator.cs
@@ -1,12 +1,11 @@
 #if FEATURE_BREAKITERATOR
-using Lucene.Net.Support;
 using System;
 using System.Collections.Generic;
 using System.Globalization;
 using System.Linq;
 using System.Text;
 
-namespace Lucene.Net
+namespace Lucene.Net.Support
 {
     /*
 	 * Licensed to the Apache Software Foundation (ASF) under one or more


[2/9] lucenenet git commit: SWEEP: Changed to in documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
index 04b6c1d..e36aee8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
@@ -44,14 +44,14 @@ namespace Lucene.Net.Analysis.Pt
     /// <code>{ "suffix", N, "replacement", { "exception1", "exception2", ...}}</code>
     /// where:
     /// <list type="bullet">
-    ///   <item><c>suffix</c> is the suffix to be removed (such as "inho").</item>
-    ///   <item><c>N</c> is the min stem size, where stem is defined as the candidate stem 
-    ///       after removing the suffix (but before appending the replacement!)</item>
-    ///   <item><c>replacement</c> is an optimal string to append after removing the suffix.
-    ///       This can be the empty string.</item>
-    ///   <item><c>exceptions</c> is an optional list of exceptions, patterns that should 
+    ///   <item><description><c>suffix</c> is the suffix to be removed (such as "inho").</description></item>
+    ///   <item><description><c>N</c> is the min stem size, where stem is defined as the candidate stem 
+    ///       after removing the suffix (but before appending the replacement!)</description></item>
+    ///   <item><description><c>replacement</c> is an optimal string to append after removing the suffix.
+    ///       This can be the empty string.</description></item>
+    ///   <item><description><c>exceptions</c> is an optional list of exceptions, patterns that should 
     ///       not be stemmed. These patterns can be specified as whole word or suffix (ends-with) 
-    ///       patterns, depending upon the exceptions format flag in the step header.</item>
+    ///       patterns, depending upon the exceptions format flag in the step header.</description></item>
     /// </list>
     /// </para>
     /// <para>
@@ -61,17 +61,17 @@ namespace Lucene.Net.Analysis.Pt
     /// </blockquote>
     /// where:
     /// <list type="bullet">
-    ///   <item><c>name</c> is a name for the step (such as "Plural").</item>
-    ///   <item><c>N</c> is the min word size. Words that are less than this length bypass
+    ///   <item><description><c>name</c> is a name for the step (such as "Plural").</description></item>
+    ///   <item><description><c>N</c> is the min word size. Words that are less than this length bypass
     ///       the step completely, as an optimization. Note: N can be zero, in this case this 
     ///       implementation will automatically calculate the appropriate value from the underlying 
-    ///       rules.</item>
-    ///   <item><c>B</c> is a "boolean" flag specifying how exceptions in the rules are matched.
+    ///       rules.</description></item>
+    ///   <item><description><c>B</c> is a "boolean" flag specifying how exceptions in the rules are matched.
     ///       A value of 1 indicates whole-word pattern matching, a value of 0 indicates that 
-    ///       exceptions are actually suffixes and should be matched with ends-with.</item>
-    ///   <item><c>conds</c> are an optional list of conditions to enter the step at all. If
+    ///       exceptions are actually suffixes and should be matched with ends-with.</description></item>
+    ///   <item><description><c>conds</c> are an optional list of conditions to enter the step at all. If
     ///       the list is non-empty, then a word must end with one of these conditions or it will
-    ///       bypass the step completely as an optimization.</item>
+    ///       bypass the step completely as an optimization.</description></item>
     /// </list>
     /// </para>
     /// <a href="http://www.inf.ufrgs.br/~viviane/rslp/index.htm">RSLP description</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
index 4b48902..4440167 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Reverse
     /// compatibility when creating <see cref="ReverseStringFilter"/>, or when using any of
     /// its static methods:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are handled correctly</item>
+    ///     <item><description> As of 3.1, supplementary characters are handled correctly</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index ad8e0ea..bb086a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.Ru
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="RussianAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, <see cref="StandardTokenizer"/> is used, Snowball stemming is done with
-    ///        <see cref="SnowballFilter"/>, and Snowball stopwords are used by default.</item>
+    ///     <item><description> As of 3.1, <see cref="StandardTokenizer"/> is used, Snowball stemming is done with
+    ///        <see cref="SnowballFilter"/>, and Snowball stopwords are used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
index bcaa1d6..a6d2be2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Snowball
     /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
     /// dependent settings as <see cref="StandardAnalyzer"/>, with the following addition:
     /// <list type="bullet">
-    ///   <item> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.</item>
+    ///   <item><description> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.</description></item>
     /// </list>
     /// </para> </summary>
     /// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
index d9f8672..8ac2021 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
@@ -28,8 +28,8 @@ namespace Lucene.Net.Analysis.Snowball
     /// Available stemmers are listed in Lucene.Net.Tartarus.Snowball.Ext.
     /// <para><b>NOTE</b>: <see cref="SnowballFilter"/> expects lowercased text.
     /// <list type="bullet">
-    ///     <item>For the Turkish language, see <see cref="Tr.TurkishLowerCaseFilter"/>.</item>
-    ///     <item>For other languages, see <see cref="Core.LowerCaseFilter"/>.</item>
+    ///     <item><description>For the Turkish language, see <see cref="Tr.TurkishLowerCaseFilter"/>.</description></item>
+    ///     <item><description>For other languages, see <see cref="Core.LowerCaseFilter"/>.</description></item>
     /// </list>
     /// </para>
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
index 888431b..f5b42e0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
@@ -30,12 +30,12 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ClassicAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, <see cref="StopFilter"/> correctly handles Unicode 4.0
-    ///         supplementary characters in stopwords</item>
-    ///     <item> As of 2.9, <see cref="StopFilter"/> preserves position
-    ///        increments</item>
-    ///     <item> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
-    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///     <item><description> As of 3.1, <see cref="StopFilter"/> correctly handles Unicode 4.0
+    ///         supplementary characters in stopwords</description></item>
+    ///     <item><description> As of 2.9, <see cref="StopFilter"/> preserves position
+    ///        increments</description></item>
+    ///     <item><description> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
+    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// 
     /// <see cref="ClassicAnalyzer"/> was named <see cref="StandardAnalyzer"/> in Lucene versions prior to 3.1. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
index 111e23f..1bd65af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
@@ -27,11 +27,11 @@ namespace Lucene.Net.Analysis.Standard
     /// <para> This should be a good tokenizer for most European-language documents:
     /// 
     /// <list type="bullet">
-    ///     <item>Splits words at punctuation characters, removing punctuation. However, a 
-    ///         dot that's not followed by whitespace is considered part of a token.</item>
-    ///     <item>Splits words at hyphens, unless there's a number in the token, in which case
-    ///         the whole token is interpreted as a product number and is not split.</item>
-    ///     <item>Recognizes email addresses and internet hostnames as one token.</item>
+    ///     <item><description>Splits words at punctuation characters, removing punctuation. However, a 
+    ///         dot that's not followed by whitespace is considered part of a token.</description></item>
+    ///     <item><description>Splits words at hyphens, unless there's a number in the token, in which case
+    ///         the whole token is interpreted as a product number and is not split.</description></item>
+    ///     <item><description>Recognizes email addresses and internet hostnames as one token.</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
index d7f8515..ca6c60c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
@@ -30,17 +30,17 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StandardAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///   <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///        from their combining characters. If you use a previous version number,
-    ///        you get the exact broken behavior for backwards compatibility.</item>
-    ///   <item> As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
+    ///        you get the exact broken behavior for backwards compatibility.</description></item>
+    ///   <item><description> As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
     ///        and <see cref="StopFilter"/> correctly handles Unicode 4.0 supplementary characters
     ///        in stopwords.  <see cref="ClassicTokenizer"/> and <see cref="ClassicAnalyzer"/> 
     ///        are the pre-3.1 implementations of <see cref="StandardTokenizer"/> and
-    ///        <see cref="StandardAnalyzer"/>.</item>
-    ///   <item> As of 2.9, <see cref="StopFilter"/> preserves position increments</item>
-    ///   <item> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
-    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///        <see cref="StandardAnalyzer"/>.</description></item>
+    ///   <item><description> As of 2.9, <see cref="StopFilter"/> preserves position increments</description></item>
+    ///   <item><description> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
+    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
index 3fa7bb8..5d89a29 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
@@ -41,12 +41,12 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StandardTokenizer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///     <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///         from their combining characters. If you use a previous version number,
-    ///         you get the exact broken behavior for backwards compatibility.</item>
-    ///     <item> As of 3.1, StandardTokenizer implements Unicode text segmentation.
+    ///         you get the exact broken behavior for backwards compatibility.</description></item>
+    ///     <item><description> As of 3.1, StandardTokenizer implements Unicode text segmentation.
     ///         If you use a previous version number, you get the exact behavior of
-    ///         <see cref="ClassicTokenizer"/> for backwards compatibility.</item>
+    ///         <see cref="ClassicTokenizer"/> for backwards compatibility.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
index aa66336..edfcbb8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
@@ -29,14 +29,14 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
-    ///     <item>&lt;KATAKANA&gt;: A sequence of katakana characters</item>
-    ///     <item>&lt;HANGUL&gt;: A sequence of Hangul characters</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
+    ///     <item><description>&lt;KATAKANA&gt;: A sequence of katakana characters</description></item>
+    ///     <item><description>&lt;HANGUL&gt;: A sequence of Hangul characters</description></item>
     /// </list>
     /// </summary>
     public sealed class StandardTokenizerImpl : IStandardTokenizerInterface

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
index 292b7bd..31642c6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
@@ -33,21 +33,21 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;URL&gt;: A URL</item>
-    ///     <item>&lt;EMAIL&gt;: An email address</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;URL&gt;: A URL</description></item>
+    ///     <item><description>&lt;EMAIL&gt;: An email address</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
     /// </list>
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="UAX29URLEmailTokenizer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///     <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///         from their combining characters. If you use a previous version number,
-    ///         you get the exact broken behavior for backwards compatibility.</item>
+    ///         you get the exact broken behavior for backwards compatibility.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
index 547a62c..c95f064 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
@@ -29,16 +29,16 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;URL&gt;: A URL</item>
-    ///     <item>&lt;EMAIL&gt;: An email address</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
-    ///     <item>&lt;KATAKANA&gt;: A sequence of katakana characters</item>
-    ///     <item>&lt;HANGUL&gt;: A sequence of Hangul characters</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;URL&gt;: A URL</description></item>
+    ///     <item><description>&lt;EMAIL&gt;: An email address</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
+    ///     <item><description>&lt;KATAKANA&gt;: A sequence of katakana characters</description></item>
+    ///     <item><description>&lt;HANGUL&gt;: A sequence of Hangul characters</description></item>
     /// </list>
     /// </summary>
     public sealed class UAX29URLEmailTokenizerImpl : IStandardTokenizerInterface

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
index 7016143..74b969c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
@@ -26,21 +26,21 @@ namespace Lucene.Net.Analysis.Synonym
     /// <summary>
     /// Parser for the Solr synonyms format.
     /// <list type="bullet">
-    ///     <item> Blank lines and lines starting with '#' are comments.</item>
-    ///     <item> Explicit mappings match any token sequence on the LHS of "=>"
+    ///     <item><description> Blank lines and lines starting with '#' are comments.</description></item>
+    ///     <item><description> Explicit mappings match any token sequence on the LHS of "=>"
     ///         and replace with all alternatives on the RHS.  These types of mappings
     ///         ignore the expand parameter in the constructor.
     ///         Example:
     ///         <code>i-pod, i pod => ipod</code>
-    ///     </item>
-    ///     <item> Equivalent synonyms may be separated with commas and give
+    ///     </description></item>
+    ///     <item><description> Equivalent synonyms may be separated with commas and give
     ///         no explicit mapping.  In this case the mapping behavior will
     ///         be taken from the expand parameter in the constructor.  This allows
     ///         the same synonym file to be used in different synonym handling strategies.
     ///         Example:
     ///         <code>ipod, i-pod, i pod</code>
-    ///     </item>
-    ///     <item> Multiple synonym mapping entries are merged.
+    ///     </description></item>
+    ///     <item><description> Multiple synonym mapping entries are merged.
     ///         Example:
     ///         <code>
     ///             foo => foo bar
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Synonym
     ///             is equivalent to
     ///             foo => foo bar, baz
     ///         </code>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// @lucene.experimental
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
index 80699e6..d08941c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
@@ -49,9 +49,9 @@ namespace Lucene.Net.Analysis.Synonym
     /// <see cref="SynonymMap.Parser"/> class name. The default is <c>solr</c>.
     /// A custom <see cref="SynonymMap.Parser"/> is expected to have a constructor taking:
     /// <list type="bullet">
-    ///     <item><c><see cref="bool"/> dedup</c> - true if duplicates should be ignored, false otherwise</item>
-    ///     <item><c><see cref="bool"/> expand</c> - true if conflation groups should be expanded, false if they are one-directional</item>
-    ///     <item><c><see cref="Analyzer"/> analyzer</c> - an analyzer used for each raw synonym</item>
+    ///     <item><description><c><see cref="bool"/> dedup</c> - true if duplicates should be ignored, false otherwise</description></item>
+    ///     <item><description><c><see cref="bool"/> expand</c> - true if conflation groups should be expanded, false if they are one-directional</description></item>
+    ///     <item><description><c><see cref="Analyzer"/> analyzer</c> - an analyzer used for each raw synonym</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
index 0885069..5b84fde 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Th
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ThaiAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, a set of Thai stopwords is used by default</item>
+    ///     <item><description> As of 3.6, a set of Thai stopwords is used by default</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index a3ec443..6761686 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -33,10 +33,10 @@ namespace Lucene.Net.Analysis.Util
     /// <para>
     /// The typical lifecycle for a factory consumer is:
     /// <list type="bullet">
-    ///     <item>Create factory via its constructor (or via XXXFactory.ForName)</item>
-    ///     <item>(Optional) If the factory uses resources such as files, 
-    ///         <see cref="IResourceLoaderAware.Inform(IResourceLoader)"/> is called to initialize those resources.</item>
-    ///     <item>Consumer calls create() to obtain instances.</item>
+    ///     <item><description>Create factory via its constructor (or via XXXFactory.ForName)</description></item>
+    ///     <item><description>(Optional) If the factory uses resources such as files, 
+    ///         <see cref="IResourceLoaderAware.Inform(IResourceLoader)"/> is called to initialize those resources.</description></item>
+    ///     <item><description>Consumer calls create() to obtain instances.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index e006ea5..447fb98 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -41,8 +41,8 @@ namespace Lucene.Net.Analysis.Util
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="CharArrayMap"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.1, supplementary characters are
-    ///       properly lowercased.</item>
+    ///   <item><description> As of 3.1, supplementary characters are
+    ///       properly lowercased.</description></item>
     /// </list>
     /// Before 3.1 supplementary characters could not be
     /// lowercased correctly due to the lack of Unicode 4

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index 9ef33c4..e3ba728 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Util
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
     ///         detect token codepoints. See <see cref="IsTokenChar(int)"/> and
-    ///         <see cref="Normalize(int)"/> for details.</item>
+    ///         <see cref="Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index 5687823..631879d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -134,9 +134,9 @@ namespace Lucene.Net.Analysis.Util
         /// <para>
         /// The snowball format is the following:
         /// <list type="bullet">
-        ///     <item>Lines may contain multiple words separated by whitespace.</item>
-        ///     <item>The comment character is the vertical line (&#124;).</item>
-        ///     <item>Lines may contain trailing comments.</item>
+        ///     <item><description>Lines may contain multiple words separated by whitespace.</description></item>
+        ///     <item><description>The comment character is the vertical line (&#124;).</description></item>
+        ///     <item><description>Lines may contain trailing comments.</description></item>
         /// </list>
         /// </para>
         /// </summary>
@@ -177,9 +177,9 @@ namespace Lucene.Net.Analysis.Util
         /// <para>
         /// The snowball format is the following:
         /// <list type="bullet">
-        ///     <item>Lines may contain multiple words separated by whitespace.</item>
-        ///     <item>The comment character is the vertical line (&#124;).</item>
-        ///     <item>Lines may contain trailing comments.</item>
+        ///     <item><description>Lines may contain multiple words separated by whitespace.</description></item>
+        ///     <item><description>The comment character is the vertical line (&#124;).</description></item>
+        ///     <item><description>Lines may contain trailing comments.</description></item>
         /// </list>
         /// </para>
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
index bc2fa88..b1d289d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
@@ -38,16 +38,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationAttributeFactory</c> in the analysis-icu package 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
index 92ce4a0..4028f0c 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
@@ -45,16 +45,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationKeyAnalyzer</c> in the analysis-icu package 
@@ -76,8 +76,8 @@ namespace Lucene.Net.Collation
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="CollationKeyAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 4.0, Collation Keys are directly encoded as bytes. Previous
-    ///   versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.</item>
+    ///   <item><description> As of 4.0, Collation Keys are directly encoded as bytes. Previous
+    ///   versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
index 4e053d7..d498f84 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
@@ -40,16 +40,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationKeyFilter</c> in the analysis-icu package 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
index 6599f17..5293cb1 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
@@ -34,26 +34,26 @@ namespace Lucene.Net.Collation
     /// <para>
     /// This factory can be created in two ways: 
     /// <list type="bullet">
-    ///  <item>Based upon a system collator associated with a <see cref="System.Globalization.CultureInfo"/>.</item>
-    ///  <item>Based upon a tailored ruleset.</item>
+    ///  <item><description>Based upon a system collator associated with a <see cref="System.Globalization.CultureInfo"/>.</description></item>
+    ///  <item><description>Based upon a tailored ruleset.</description></item>
     /// </list>
     /// </para>
     /// <para>
     /// Using a System collator:
     /// <list type="bullet">
-    ///  <item>language: ISO-639 language code (mandatory)</item>
-    ///  <item>country: ISO-3166 country code (optional)</item>
-    ///  <item>variant: vendor or browser-specific code (optional)</item>
-    ///  <item>strength: 'primary','secondary','tertiary', or 'identical' (optional)</item>
-    ///  <item>decomposition: 'no','canonical', or 'full' (optional)</item>
+    ///  <item><description>language: ISO-639 language code (mandatory)</description></item>
+    ///  <item><description>country: ISO-3166 country code (optional)</description></item>
+    ///  <item><description>variant: vendor or browser-specific code (optional)</description></item>
+    ///  <item><description>strength: 'primary','secondary','tertiary', or 'identical' (optional)</description></item>
+    ///  <item><description>decomposition: 'no','canonical', or 'full' (optional)</description></item>
     /// </list>
     /// </para>
     /// <para>
     /// Using a Tailored ruleset:
     /// <list type="bullet">
-    ///  <item>custom: UTF-8 text file containing rules supported by RuleBasedCollator (mandatory)</item>
-    ///  <item>strength: 'primary','secondary','tertiary', or 'identical' (optional)</item>
-    ///  <item>decomposition: 'no','canonical', or 'full' (optional)</item>
+    ///  <item><description>custom: UTF-8 text file containing rules supported by RuleBasedCollator (mandatory)</description></item>
+    ///  <item><description>strength: 'primary','secondary','tertiary', or 'identical' (optional)</description></item>
+    ///  <item><description>decomposition: 'no','canonical', or 'full' (optional)</description></item>
     /// </list>
     /// 
     /// <code>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs b/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
index c5db947..6dc81c9 100644
--- a/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Facet.Taxonomy
     /// <summary>
     /// Returns 3 arrays for traversing the taxonomy:
     /// <list type="bullet">
-    /// <item> <see cref="Parents"/>: <c>Parents[i]</c> denotes the parent of category
-    /// ordinal <c>i</c>.</item>
-    /// <item> <see cref="Children"/>: <c>Children[i]</c> denotes a child of category ordinal
-    /// <c>i</c>.</item>
-    /// <item> <see cref="Siblings"/>: <c>Siblings[i]</c> denotes the sibling of category
-    /// ordinal <c>i</c>.</item>
+    /// <item><description> <see cref="Parents"/>: <c>Parents[i]</c> denotes the parent of category
+    /// ordinal <c>i</c>.</description></item>
+    /// <item><description> <see cref="Children"/>: <c>Children[i]</c> denotes a child of category ordinal
+    /// <c>i</c>.</description></item>
+    /// <item><description> <see cref="Siblings"/>: <c>Siblings[i]</c> denotes the sibling of category
+    /// ordinal <c>i</c>.</description></item>
     /// </list>
     /// 
     /// To traverse the taxonomy tree, you typically start with <c>Children[0]</c>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs b/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
index cbe3742..ca4d6db 100644
--- a/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
@@ -31,15 +31,15 @@ namespace Lucene.Net.Facet.Taxonomy
     /// A TaxonomyReader holds a list of categories. Each category has a serial
     /// number which we call an "ordinal", and a hierarchical "path" name:
     /// <list type="bullet">
-    /// <item>
+    /// <item><description>
     /// The ordinal is an integer that starts at 0 for the first category (which is
     /// always the root category), and grows contiguously as more categories are
     /// added; Note that once a category is added, it can never be deleted.
-    /// </item>
-    /// <item>
+    /// </description></item>
+    /// <item><description>
     /// The path is a CategoryPath object specifying the category's position in the
     /// hierarchy.
-    /// </item>
+    /// </description></item>
     /// </list>
     /// </para>
     /// <b>Notes about concurrent access to the taxonomy:</b>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
index 05f8e92..c6845e9 100644
--- a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
@@ -155,35 +155,35 @@ namespace Lucene.Net.Search.Highlight
         /// <para/>
         /// In my tests the speeds to recreate 1000 token streams using this method are:
         /// <list type="bullet">
-        ///     <item>
+        ///     <item><description>
         ///     with TermVector offset only data stored - 420  milliseconds 
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     with TermVector offset AND position data stored - 271 milliseconds
         ///     (nb timings for TermVector with position data are based on a tokenizer with contiguous
         ///     positions - no overlaps or gaps)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     The cost of not using TermPositionVector to store
         ///     pre-parsed content and using an analyzer to re-parse the original content:
         ///     - reanalyzing the original content - 980 milliseconds
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// 
         /// The re-analyze timings will typically vary depending on -
         /// <list type="number">
-        ///     <item>
+        ///     <item><description>
         ///     The complexity of the analyzer code (timings above were using a
         ///     stemmer/lowercaser/stopword combo)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     The  number of other fields (Lucene reads ALL fields off the disk 
         ///     when accessing just one document field - can cost dear!)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     Use of compression on field storage - could be faster due to compression (less disk IO)
         ///     or slower (more CPU burn) depending on the content.
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// </summary>
         /// <param name="tpv"></param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
index de0fd45..fc64a70 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
@@ -43,9 +43,9 @@ namespace Lucene.Net.Search.PostingsHighlight
         /// <summary>
         /// Creates <see cref="PassageScorer"/> with these default values:
         /// <list type="bullet">
-        ///     <item><c>k1 = 1.2</c></item>
-        ///     <item><c>b = 0.75</c></item>
-        ///     <item><c>pivot = 87</c></item>
+        ///     <item><description><c>k1 = 1.2</c></description></item>
+        ///     <item><description><c>b = 0.75</c></description></item>
+        ///     <item><description><c>pivot = 87</c></description></item>
         /// </list>
         /// </summary>
         public PassageScorer()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
index 6211042..7562228 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
@@ -46,10 +46,10 @@ namespace Lucene.Net.Search.PostingsHighlight
     /// <para/>
     /// You can customize the behavior by subclassing this highlighter, some important hooks:
     /// <list type="bullet">
-    ///     <item><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</item>
-    ///     <item><see cref="GetScorer(string)"/>: Customize how passages are ranked.</item>
-    ///     <item><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</item>
-    ///     <item><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</item>
+    ///     <item><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</description></item>
+    ///     <item><see cref="GetScorer(string)"/>: Customize how passages are ranked.</description></item>
+    ///     <item><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</description></item>
+    ///     <item><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</description></item>
     /// </list>
     /// <para/>
     /// <b>WARNING</b>: The code is very new and probably still has some exciting bugs!

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs b/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
index 1ee6f1c..db9b8d1 100644
--- a/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
+++ b/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Index.Sorter
     /// Note that this class is intended to used with <see cref="SortingMergePolicy"/>,
     /// and for other purposes has some limitations:
     /// <list type="bullet">
-    ///    <item>Cannot yet be used with <see cref="IndexSearcher.SearchAfter(ScoreDoc, Query, Filter, int, Sort)">
-    ///    IndexSearcher.SearchAfter</see></item>
-    ///    <item>Filling sort field values is not yet supported.</item>
+    ///    <item><description>Cannot yet be used with <see cref="IndexSearcher.SearchAfter(ScoreDoc, Query, Filter, int, Sort)">
+    ///    IndexSearcher.SearchAfter</see></description></item>
+    ///    <item><description>Filling sort field values is not yet supported.</description></item>
     /// </list>
     /// @lucene.experimental
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Queries/CustomScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/CustomScoreQuery.cs b/src/Lucene.Net.Queries/CustomScoreQuery.cs
index 1ee6639..e997f3c 100644
--- a/src/Lucene.Net.Queries/CustomScoreQuery.cs
+++ b/src/Lucene.Net.Queries/CustomScoreQuery.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Queries
     /// <summary>
     /// Query that sets document score as a programmatic function of several (sub) scores:
     /// <list type="bullet">
-    ///    <item>the score of its subQuery (any query)</item>
-    ///    <item>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</item>
+    ///    <item>the score of its subQuery (any query)</description></item>
+    ///    <item>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</description></item>
     /// </list>
     /// Subclasses can modify the computation by overriding <see cref="GetCustomScoreProvider"/>.
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
index 16b2fe0..f9d0312 100644
--- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
+++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
@@ -86,11 +86,11 @@ namespace Lucene.Net.Queries.Mlt
     /// <para/>
     /// Thus you:
     /// <list type="bullet">
-    ///     <item>do your normal, Lucene setup for searching,</item>
-    ///     <item>create a MoreLikeThis,</item>
-    ///     <item>get the text of the doc you want to find similarities to</item>
-    ///     <item>then call one of the <see cref="Like"/> calls to generate a similarity query</item>
-    ///     <item>call the searcher to find the similar docs</item>
+    ///     <item><description>do your normal, Lucene setup for searching,</description></item>
+    ///     <item><description>create a MoreLikeThis,</description></item>
+    ///     <item><description>get the text of the doc you want to find similarities to</description></item>
+    ///     <item><description>then call one of the <see cref="Like"/> calls to generate a similarity query</description></item>
+    ///     <item><description>call the searcher to find the similar docs</description></item>
     /// </list>
     /// <para/>
     /// <b>More Advanced Usage</b>
@@ -103,15 +103,15 @@ namespace Lucene.Net.Queries.Mlt
     /// may want to call the other set methods to control how the similarity queries are
     /// generated:
     /// <list type="bullet">
-    ///     <item><see cref="MinTermFreq"/></item>
-    ///     <item><see cref="MinDocFreq"/></item>
-    ///     <item><see cref="MaxDocFreq"/></item>
-    ///     <item><see cref="SetMaxDocFreqPct(int)"/></item>
-    ///     <item><see cref="MinWordLen"/></item>
-    ///     <item><see cref="MaxWordLen"/></item>
-    ///     <item><see cref="MaxQueryTerms"/></item>
-    ///     <item><see cref="MaxNumTokensParsed"/></item>
-    ///     <item><see cref="StopWords"/></item>
+    ///     <item><description><see cref="MinTermFreq"/></description></item>
+    ///     <item><description><see cref="MinDocFreq"/></description></item>
+    ///     <item><description><see cref="MaxDocFreq"/></description></item>
+    ///     <item><description><see cref="SetMaxDocFreqPct(int)"/></description></item>
+    ///     <item><description><see cref="MinWordLen"/></description></item>
+    ///     <item><description><see cref="MaxWordLen"/></description></item>
+    ///     <item><description><see cref="MaxQueryTerms"/></description></item>
+    ///     <item><description><see cref="MaxNumTokensParsed"/></description></item>
+    ///     <item><description><see cref="StopWords"/></description></item>
     /// </list>
     /// </summary>
     /// <remarks>
@@ -650,12 +650,12 @@ namespace Lucene.Net.Queries.Mlt
         /// Each array has 6 elements.
         /// The elements are:
         /// <list type="bullet">
-        ///     <item>The word (<see cref="string"/>)</item>
-        ///     <item>The top field that this word comes from (<see cref="string"/>)</item>
-        ///     <item>The score for this word (<see cref="float"/>)</item>
-        ///     <item>The IDF value (<see cref="float"/>)</item>
-        ///     <item>The frequency of this word in the index (<see cref="int"/>)</item>
-        ///     <item>The frequency of this word in the source document (<see cref="int"/>)</item>
+        ///     <item><description>The word (<see cref="string"/>)</description></item>
+        ///     <item><description>The top field that this word comes from (<see cref="string"/>)</description></item>
+        ///     <item><description>The score for this word (<see cref="float"/>)</description></item>
+        ///     <item><description>The IDF value (<see cref="float"/>)</description></item>
+        ///     <item><description>The frequency of this word in the index (<see cref="int"/>)</description></item>
+        ///     <item><description>The frequency of this word in the source document (<see cref="int"/>)</description></item>
         /// </list>
         /// This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
         /// This method is exposed so that you can identify the "interesting words" in a document.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
index 070802a..3a35db8 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
@@ -34,19 +34,19 @@ namespace Lucene.Net.QueryParsers.Classic
     /// A Query is a series of clauses.
     /// A clause may be prefixed by:
     /// <list type="bullet">
-    /// <item> a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating
-    /// that the clause is required or prohibited respectively; or</item>
-    /// <item> a term followed by a colon, indicating the field to be searched.
-    /// This enables one to construct queries which search multiple fields.</item>
+    /// <item><description> a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating
+    /// that the clause is required or prohibited respectively; or</description></item>
+    /// <item><description> a term followed by a colon, indicating the field to be searched.
+    /// This enables one to construct queries which search multiple fields.</description></item>
     /// </list>
     /// 
     /// <para/>
     /// A clause may be either:
     /// <list type="bullet">
-    /// <item> a term, indicating all the documents that contain this term; or</item>
-    /// <item> a nested query, enclosed in parentheses.  Note that this may be used
+    /// <item><description> a term, indicating all the documents that contain this term; or</description></item>
+    /// <item><description> a nested query, enclosed in parentheses.  Note that this may be used
     /// with a <c>+</c>/<c>-</c> prefix to require any of a set of
-    /// terms.</item>
+    /// terms.</description></item>
     /// </list>
     /// 
     /// <para/>
@@ -95,7 +95,7 @@ namespace Lucene.Net.QueryParsers.Classic
     /// <b>NOTE</b>: You must specify the required <see cref="LuceneVersion" /> compatibility when
     /// creating QueryParser:
     /// <list type="bullet">
-    /// <item>As of 3.1, <see cref="QueryParserBase.AutoGeneratePhraseQueries"/> is false by default.</item>
+    /// <item><description>As of 3.1, <see cref="QueryParserBase.AutoGeneratePhraseQueries"/> is false by default.</description></item>
     /// </list>
     /// </summary>
     public class QueryParser : QueryParserBase

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs b/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
index 8bc7be4..f113c99 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
@@ -195,19 +195,19 @@ namespace Lucene.Net.QueryParsers.Flexible.Core
         /// In this method the three phases are executed:
         /// <para/>
         /// <list type="number">
-        ///     <item>
+        ///     <item><description>
         ///     the query string is parsed using the
         ///     text parser returned by <see cref="SyntaxParser"/>, the result is a query
         ///     node tree.
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     the query node tree is processed by the
         ///     processor returned by <see cref="QueryNodeProcessor"/>.
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     a object is built from the query node
         ///     tree using the builder returned by <see cref="QueryBuilder"/>.
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// </summary>
         /// <param name="query">the query string</param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
index 1940263..dedcd9f 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
@@ -54,25 +54,25 @@ namespace Lucene.Net.QueryParsers.Flexible.Standard
     /// <para/>
     /// A Query is a series of clauses. A clause may be prefixed by:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating that
     ///     the clause is required or prohibited respectively; or
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     a term followed by a colon, indicating the field to be searched. This
     ///     enables one to construct queries which search multiple fields.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// 
     /// A clause may be either:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     a term, indicating all the documents that contain this term; or
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     a nested query, enclosed in parentheses. Note that this may be used with
     ///     a <c>+</c>/<c>-</c> prefix to require any of a set of terms.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// 
     /// Thus, in BNF, the query grammar is:

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs b/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
index 01af234..7438bdd 100644
--- a/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
@@ -69,14 +69,14 @@ namespace Lucene.Net.QueryParsers.Simple
     /// to decipher what it can; however, this may mean odd or unexpected results.
     /// <h4>Query Operators</h4>
     /// <list type="bullet">
-    ///  <item>'<c>+</c>' specifies <c>AND</c> operation: <c>token1+token2</c></item>
-    ///  <item>'<c>|</c>' specifies <c>OR</c> operation: <c>token1|token2</c></item>
-    ///  <item>'<c>-</c>' negates a single token: <c>-token0</c></item>
-    ///  <item>'<c>"</c>' creates phrases of terms: <c>"term1 term2 ..."</c></item>
-    ///  <item>'<c>*</c>' at the end of terms specifies prefix query: <c>term*</c></item>
-    ///  <item>'<c>~</c>N' at the end of terms specifies fuzzy query: <c>term~1</c></item>
-    ///  <item>'<c>~</c>N' at the end of phrases specifies near query: <c>"term1 term2"~5</c></item>
-    ///  <item>'<c>(</c>' and '<c>)</c>' specifies precedence: <c>token1 + (token2 | token3)</c></item>
+    ///  <item><description>'<c>+</c>' specifies <c>AND</c> operation: <c>token1+token2</c></description></item>
+    ///  <item><description>'<c>|</c>' specifies <c>OR</c> operation: <c>token1|token2</c></description></item>
+    ///  <item><description>'<c>-</c>' negates a single token: <c>-token0</c></description></item>
+    ///  <item><description>'<c>"</c>' creates phrases of terms: <c>"term1 term2 ..."</c></description></item>
+    ///  <item><description>'<c>*</c>' at the end of terms specifies prefix query: <c>term*</c></description></item>
+    ///  <item><description>'<c>~</c>N' at the end of terms specifies fuzzy query: <c>term~1</c></description></item>
+    ///  <item><description>'<c>~</c>N' at the end of phrases specifies near query: <c>"term1 term2"~5</c></description></item>
+    ///  <item><description>'<c>(</c>' and '<c>)</c>' specifies precedence: <c>token1 + (token2 | token3)</c></description></item>
     /// </list>
     /// <para/>
     /// The default operator is <c>OR</c> if no other operator is specified.
@@ -99,20 +99,20 @@ namespace Lucene.Net.QueryParsers.Simple
     /// beyond the first character do not need to be escaped.
     /// For example:
     /// <list type="bullet">
-    ///   <item><c>-term1</c>   -- Specifies <c>NOT</c> operation against <c>term1</c></item>
-    ///   <item><c>\-term1</c>  -- Searches for the term <c>-term1</c>.</item>
-    ///   <item><c>term-1</c>   -- Searches for the term <c>term-1</c>.</item>
-    ///   <item><c>term\-1</c>  -- Searches for the term <c>term-1</c>.</item>
+    ///   <item><description><c>-term1</c>   -- Specifies <c>NOT</c> operation against <c>term1</c></description></item>
+    ///   <item><description><c>\-term1</c>  -- Searches for the term <c>-term1</c>.</description></item>
+    ///   <item><description><c>term-1</c>   -- Searches for the term <c>term-1</c>.</description></item>
+    ///   <item><description><c>term\-1</c>  -- Searches for the term <c>term-1</c>.</description></item>
     /// </list>
     /// <para/>
     /// The '<c>*</c>' operator is a special case. On individual terms (not phrases) the last
     /// character of a term that is '<c>*</c>' must be escaped; however, any '<c>*</c>' characters
     /// before the last character do not need to be escaped:
     /// <list type="bullet">
-    ///   <item><c>term1*</c>  --  Searches for the prefix <c>term1</c></item>
-    ///   <item><c>term1\*</c> --  Searches for the term <c>term1*</c></item>
-    ///   <item><c>term*1</c>  --  Searches for the term <c>term*1</c></item>
-    ///   <item><c>term\*1</c> --  Searches for the term <c>term*1</c></item>
+    ///   <item><description><c>term1*</c>  --  Searches for the prefix <c>term1</c></description></item>
+    ///   <item><description><c>term1\*</c> --  Searches for the term <c>term1*</c></description></item>
+    ///   <item><description><c>term*1</c>  --  Searches for the term <c>term*1</c></description></item>
+    ///   <item><description><c>term\*1</c> --  Searches for the term <c>term*1</c></description></item>
     /// </list>
     /// <para/>
     /// Note that above examples consider the terms before text processing.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs b/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
index 7fba250..8fbc219 100644
--- a/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
+++ b/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
@@ -29,13 +29,13 @@ namespace Lucene.Net.QueryParsers.Surround.Query
     /// Operations:
     /// 
     /// <list type="bullet">
-    ///     <item>create for a field name and an indexreader.</item>
+    ///     <item><description>create for a field name and an indexreader.</description></item>
     /// 
-    ///     <item>add a weighted Term - this should add a corresponding SpanTermQuery, or increase the weight of an existing one.</item>
+    ///     <item><description>add a weighted Term - this should add a corresponding SpanTermQuery, or increase the weight of an existing one.</description></item>
     /// 
-    ///     <item>add a weighted subquery SpanNearQuery</item>
+    ///     <item><description>add a weighted subquery SpanNearQuery</description></item>
     /// 
-    ///     <item>create a clause for SpanNearQuery from the things added above.</item>
+    ///     <item><description>create a clause for SpanNearQuery from the things added above.</description></item>
     /// </list>
     /// <para/>
     /// For this, create an array of SpanQuery's from the added ones.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs b/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
index 78a27db..2069099 100644
--- a/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
+++ b/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
@@ -39,15 +39,15 @@ namespace Lucene.Net.Sandbox.Queries
     /// <para/>
     /// Limitations:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     Fields containing <see cref="int.MaxValue"/> or more unique values
     ///     are unsupported.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     Selectors other than the default <see cref="Selector.MIN"/> require 
     ///     optional codec support. However several codecs provided by Lucene,
     ///     including the current default codec, support this.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// </summary>
     public class SortedSetSortField : SortField

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
index 61ed934..ac3d79e 100644
--- a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
+++ b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
@@ -41,27 +41,27 @@ namespace Lucene.Net.Spatial.Prefix
     /// 
     /// <h4>Characteristics:</h4>
     /// <list type="bullet">
-    /// <item>Can index any shape; however only
+    /// <item><description>Can index any shape; however only
     /// <see cref="RecursivePrefixTreeStrategy">RecursivePrefixTreeStrategy</see>
-    /// can effectively search non-point shapes.</item>
-    /// <item>Can index a variable number of shapes per field value. This strategy
+    /// can effectively search non-point shapes.</description></item>
+    /// <item><description>Can index a variable number of shapes per field value. This strategy
     /// can do it via multiple calls to <see cref="CreateIndexableFields(IShape)"/>
     /// for a document or by giving it some sort of Shape aggregate (e.g. NTS
     /// WKT MultiPoint).  The shape's boundary is approximated to a grid precision.
-    /// </item>
-    /// <item>Can query with any shape.  The shape's boundary is approximated to a grid
-    /// precision.</item>
-    /// <item>Only <see cref="SpatialOperation.Intersects"/>
+    /// </description></item>
+    /// <item><description>Can query with any shape.  The shape's boundary is approximated to a grid
+    /// precision.</description></item>
+    /// <item><description>Only <see cref="SpatialOperation.Intersects"/>
     /// is supported.  If only points are indexed then this is effectively equivalent
-    /// to IsWithin.</item>
-    /// <item>The strategy supports <see cref="MakeDistanceValueSource(IPoint, double)"/>
+    /// to IsWithin.</description></item>
+    /// <item><description>The strategy supports <see cref="MakeDistanceValueSource(IPoint, double)"/>
     /// even for multi-valued data, so long as the indexed data is all points; the
     /// behavior is undefined otherwise.  However, <c>it will likely be removed in
     /// the future</c> in lieu of using another strategy with a more scalable
     /// implementation.  Use of this call is the only
     /// circumstance in which a cache is used.  The cache is simple but as such
     /// it doesn't scale to large numbers of points nor is it real-time-search
-    /// friendly.</item>
+    /// friendly.</description></item>
     /// </list>
     /// 
     /// <h4>Implementation:</h4>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/SpatialStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/SpatialStrategy.cs b/src/Lucene.Net.Spatial/SpatialStrategy.cs
index 348ca7e..bb39500 100644
--- a/src/Lucene.Net.Spatial/SpatialStrategy.cs
+++ b/src/Lucene.Net.Spatial/SpatialStrategy.cs
@@ -32,11 +32,11 @@ namespace Lucene.Net.Spatial
     /// Different implementations will support different features. A strategy should
     /// document these common elements:
     /// <list type="bullet">
-    ///     <item>Can it index more than one shape per field?</item>
-    ///     <item>What types of shapes can be indexed?</item>
-    ///     <item>What types of query shapes can be used?</item>
-    ///     <item>What types of query operations are supported? This might vary per shape.</item>
-    ///     <item>Does it use the <see cref="FieldCache"/>, or some other type of cache?  When?</item>
+    ///     <item><description>Can it index more than one shape per field?</description></item>
+    ///     <item><description>What types of shapes can be indexed?</description></item>
+    ///     <item><description>What types of query shapes can be used?</description></item>
+    ///     <item><description>What types of query operations are supported? This might vary per shape.</description></item>
+    ///     <item><description>Does it use the <see cref="FieldCache"/>, or some other type of cache?  When?</description></item>
     /// </list>
     /// If a strategy only supports certain shapes at index or query time, then in
     /// general it will throw an exception if given an incompatible one.  It will not

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs b/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
index e8a0e6a..28a0050 100644
--- a/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
+++ b/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
@@ -32,11 +32,11 @@ namespace Lucene.Net.Spatial.Vector
     /// 
     /// <h4>Characteristics:</h4>
     /// <list type="bullet">
-    ///     <item>Only indexes points; just one per field value.</item>
-    ///     <item>Can query by a rectangle or circle.</item>
-    ///     <item><see cref="SpatialOperation.Intersects"/> and <see cref="SpatialOperation.IsWithin"/> is supported.</item>
-    ///     <item>Uses the FieldCache for <see cref="SpatialStrategy.MakeDistanceValueSource(IPoint)"/> and for
-    ///     searching with a Circle.</item>
+    ///     <item><description>Only indexes points; just one per field value.</description></item>
+    ///     <item><description>Can query by a rectangle or circle.</description></item>
+    ///     <item><description><see cref="SpatialOperation.Intersects"/> and <see cref="SpatialOperation.IsWithin"/> is supported.</description></item>
+    ///     <item><description>Uses the FieldCache for <see cref="SpatialStrategy.MakeDistanceValueSource(IPoint)"/> and for
+    ///     searching with a Circle.</description></item>
     /// </list>
     /// 
     /// <h4>Implementation:</h4>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs b/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
index 5f8988b..4010f80 100644
--- a/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
+++ b/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Search.Spell
     ///  
     ///  Notes:
     ///  <list type="bullet">
-    ///    <item> This metric treats full unicode codepoints as characters</item>
-    ///    <item> This metric scales raw edit distances into a floating point score
-    ///         based upon the shortest of the two terms</item>
-    ///    <item> Transpositions of two adjacent codepoints are treated as primitive 
-    ///         edits.</item>
-    ///    <item> Edits are applied in parallel: for example, "ab" and "bca" have 
-    ///         distance 3.</item>
+    ///    <item><description> This metric treats full unicode codepoints as characters</description></item>
+    ///    <item><description> This metric scales raw edit distances into a floating point score
+    ///         based upon the shortest of the two terms</description></item>
+    ///    <item><description> Transpositions of two adjacent codepoints are treated as primitive 
+    ///         edits.</description></item>
+    ///    <item><description> Edits are applied in parallel: for example, "ab" and "bca" have 
+    ///         distance 3.</description></item>
     ///  </list>
     ///  
     ///  NOTE: this class is not particularly efficient. It is only intended

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
index 8c58e5a..0f85629 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
@@ -65,21 +65,21 @@ namespace Lucene.Net.Search.Suggest.Analyzing
     /// There are some limitations:
     /// <list type="number">
     /// 
-    ///   <item> A lookup from a query like "net" in English won't
+    ///   <item><description> A lookup from a query like "net" in English won't
     ///        be any different than "net " (ie, user added a
     ///        trailing space) because analyzers don't reflect
     ///        when they've seen a token separator and when they
-    ///        haven't.</item>
+    ///        haven't.</description></item>
     /// 
-    ///   <item> If you're using <see cref="Analysis.Core.StopFilter"/>, and the user will
+    ///   <item><description> If you're using <see cref="Analysis.Core.StopFilter"/>, and the user will
     ///        type "fast apple", but so far all they've typed is
     ///        "fast a", again because the analyzer doesn't convey whether
     ///        it's seen a token separator after the "a",
     ///        <see cref="Analysis.Core.StopFilter"/> will remove that "a" causing
-    ///        far more matches than you'd expect.</item>
+    ///        far more matches than you'd expect.</description></item>
     /// 
-    ///   <item> Lookups with the empty string return no results
-    ///        instead of all results.</item>
+    ///   <item><description> Lookups with the empty string return no results
+    ///        instead of all results.</description></item>
     /// </list>
     /// 
     /// @lucene.experimental

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs b/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
index 4026e36..af9fabe 100644
--- a/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
@@ -31,19 +31,19 @@ namespace Lucene.Net.Search.Suggest
     /// </para>
     /// <b>NOTE:</b> 
     ///  <list type="bullet">
-    ///    <item>
+    ///    <item><description>
     ///      The term and (optionally) payload fields have to be
     ///      stored
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      The weight field can be stored or can be a <see cref="NumericDocValues"/>.
     ///      If the weight field is not defined, the value of the weight is <c>0</c>
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      if any of the term or (optionally) payload fields supplied
     ///      do not have a value for a document, then the document is 
     ///      skipped by the dictionary
-    ///    </item>
+    ///    </description></item>
     ///  </list>
     /// </summary>
     public class DocumentDictionary : IDictionary

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs b/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
index 401e46a..baf0e9f 100644
--- a/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
@@ -33,15 +33,15 @@ namespace Lucene.Net.Search.Suggest
     /// </para>
     /// <b>NOTE:</b> 
     ///  <list type="bullet">
-    ///    <item>
+    ///    <item><description>
     ///      The term and (optionally) payload fields have to be
     ///      stored
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      if the term or (optionally) payload fields supplied
     ///      do not have a value for a document, then the document is 
     ///      rejected by the dictionary
-    ///    </item>
+    ///    </description></item>
     ///  </list>
     ///  <para>
     ///  In practice the <see cref="ValueSource"/> will likely be obtained

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
index e6f48da..35d6ab0 100644
--- a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
@@ -30,20 +30,20 @@ namespace Lucene.Net.Search.Suggest
     /// <para>Format allowed: 1 entry per line:</para>
     /// <para>An entry can be: </para>
     /// <list type="number">
-    /// <item>suggestion</item>
-    /// <item>suggestion <see cref="fieldDelimiter"/> weight</item>
-    /// <item>suggestion <see cref="fieldDelimiter"/> weight <see cref="fieldDelimiter"/> payload</item>
+    /// <item><description>suggestion</description></item>
+    /// <item><description>suggestion <see cref="fieldDelimiter"/> weight</description></item>
+    /// <item><description>suggestion <see cref="fieldDelimiter"/> weight <see cref="fieldDelimiter"/> payload</description></item>
     /// </list>
     /// where the default <see cref="fieldDelimiter"/> is <see cref="DEFAULT_FIELD_DELIMITER"/> (a tab)
     /// <para>
     /// <b>NOTE:</b> 
     /// <list type="number">
-    /// <item>In order to have payload enabled, the first entry has to have a payload</item>
-    /// <item>If the weight for an entry is not specified then a value of 1 is used</item>
-    /// <item>A payload cannot be specified without having the weight specified for an entry</item>
-    /// <item>If the payload for an entry is not specified (assuming payload is enabled) 
-    ///  then an empty payload is returned</item>
-    /// <item>An entry cannot have more than two <see cref="fieldDelimiter"/>s</item>
+    /// <item><description>In order to have payload enabled, the first entry has to have a payload</description></item>
+    /// <item><description>If the weight for an entry is not specified then a value of 1 is used</description></item>
+    /// <item><description>A payload cannot be specified without having the weight specified for an entry</description></item>
+    /// <item><description>If the payload for an entry is not specified (assuming payload is enabled) 
+    ///  then an empty payload is returned</description></item>
+    /// <item><description>An entry cannot have more than two <see cref="fieldDelimiter"/>s</description></item>
     /// </list>
     /// </para>
     /// <c>Example:</c><para/>


[3/9] lucenenet git commit: SWEEP: Changed to in documentation comments

Posted by ni...@apache.org.
SWEEP: Changed <item></item> to <item><description></description></item> in documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/7099a846
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/7099a846
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/7099a846

Branch: refs/heads/master
Commit: 7099a8465f2dcd9f76c5ee8c2eff11d0a36774d3
Parents: cfeaf28
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Thu Jun 1 21:32:26 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Thu Jun 1 21:32:26 2017 +0700

----------------------------------------------------------------------
 .../Analysis/Ar/ArabicAnalyzer.cs               |  6 +--
 .../Analysis/Ar/ArabicLetterTokenizer.cs        |  4 +-
 .../Analysis/Ar/ArabicNormalizer.cs             | 10 ++---
 .../Analysis/Ar/ArabicStemmer.cs                |  4 +-
 .../Analysis/Ca/CatalanAnalyzer.cs              |  4 +-
 .../Analysis/Cjk/CJKTokenizer.cs                |  6 +--
 .../Analysis/Cjk/CJKWidthFilter.cs              |  4 +-
 .../Analysis/Ckb/SoraniNormalizer.cs            | 12 +++---
 .../Analysis/Cn/ChineseFilter.cs                | 12 +++---
 .../Analysis/Cn/ChineseTokenizer.cs             |  4 +-
 .../Analysis/CommonGrams/CommonGramsFilter.cs   |  8 ++--
 .../CommonGrams/CommonGramsQueryFilter.cs       | 14 +++----
 .../Compound/CompoundWordTokenFilterBase.cs     |  6 +--
 .../DictionaryCompoundWordTokenFilter.cs        |  4 +-
 .../Compound/Hyphenation/TernaryTree.cs         |  4 +-
 .../HyphenationCompoundWordTokenFilter.cs       |  4 +-
 ...HyphenationCompoundWordTokenFilterFactory.cs | 18 ++++----
 .../Analysis/Core/LetterTokenizer.cs            |  4 +-
 .../Analysis/Core/LowerCaseFilter.cs            |  2 +-
 .../Analysis/Core/LowerCaseTokenizer.cs         |  4 +-
 .../Analysis/Core/SimpleAnalyzer.cs             |  4 +-
 .../Analysis/Core/StopAnalyzer.cs               |  6 +--
 .../Analysis/Core/StopFilter.cs                 |  4 +-
 .../Analysis/Core/StopFilterFactory.cs          | 18 ++++----
 .../Analysis/Core/WhitespaceAnalyzer.cs         |  4 +-
 .../Analysis/Core/WhitespaceTokenizer.cs        |  4 +-
 .../Analysis/Cz/CzechAnalyzer.cs                |  8 ++--
 .../Analysis/De/GermanAnalyzer.cs               | 10 ++---
 .../Analysis/De/GermanNormalizationFilter.cs    |  8 ++--
 .../Analysis/De/GermanStemmer.cs                | 12 +++---
 .../Analysis/El/GreekAnalyzer.cs                |  6 +--
 .../Analysis/El/GreekLowerCaseFilter.cs         |  2 +-
 .../Analysis/En/EnglishPossessiveFilter.cs      |  4 +-
 .../Analysis/Es/SpanishAnalyzer.cs              |  2 +-
 .../Analysis/Fa/PersianNormalizer.cs            |  6 +--
 .../Analysis/Fr/FrenchAnalyzer.cs               | 10 ++---
 .../Analysis/Hi/HindiAnalyzer.cs                |  2 +-
 .../Analysis/Hi/HindiNormalizer.cs              |  4 +-
 .../Analysis/It/ItalianAnalyzer.cs              |  6 +--
 .../Analysis/Lv/LatvianStemmer.cs               | 18 ++++----
 .../Miscellaneous/ASCIIFoldingFilter.cs         | 32 +++++++-------
 .../Miscellaneous/WordDelimiterFilter.cs        | 44 ++++++++++----------
 .../Analysis/NGram/EdgeNGramTokenizer.cs        | 12 +++---
 .../Analysis/NGram/NGramTokenFilter.cs          | 10 ++---
 .../Analysis/NGram/NGramTokenizer.cs            |  6 +--
 .../Analysis/Nl/DutchAnalyzer.cs                | 12 +++---
 .../Analysis/Pattern/PatternTokenizer.cs        |  4 +-
 .../Analysis/Pattern/PatternTokenizerFactory.cs |  4 +-
 .../Analysis/Pt/PortugueseAnalyzer.cs           |  2 +-
 .../Analysis/Pt/RSLPStemmerBase.cs              | 28 ++++++-------
 .../Analysis/Reverse/ReverseStringFilter.cs     |  2 +-
 .../Analysis/Ru/RussianAnalyzer.cs              |  4 +-
 .../Analysis/Snowball/SnowballAnalyzer.cs       |  2 +-
 .../Analysis/Snowball/SnowballFilter.cs         |  4 +-
 .../Analysis/Standard/ClassicAnalyzer.cs        | 12 +++---
 .../Analysis/Standard/ClassicTokenizer.cs       | 10 ++---
 .../Analysis/Standard/StandardAnalyzer.cs       | 14 +++----
 .../Analysis/Standard/StandardTokenizer.cs      |  8 ++--
 .../Analysis/Standard/StandardTokenizerImpl.cs  | 16 +++----
 .../Analysis/Standard/UAX29URLEmailTokenizer.cs | 20 ++++-----
 .../Standard/UAX29URLEmailTokenizerImpl.cs      | 20 ++++-----
 .../Analysis/Synonym/SolrSynonymParser.cs       | 14 +++----
 .../Analysis/Synonym/SynonymFilterFactory.cs    |  6 +--
 .../Analysis/Th/ThaiAnalyzer.cs                 |  2 +-
 .../Analysis/Util/AbstractAnalysisFactory.cs    |  8 ++--
 .../Analysis/Util/CharArrayMap.cs               |  4 +-
 .../Analysis/Util/CharTokenizer.cs              |  4 +-
 .../Analysis/Util/WordlistLoader.cs             | 12 +++---
 .../Collation/CollationAttributeFactory.cs      | 12 +++---
 .../Collation/CollationKeyAnalyzer.cs           | 16 +++----
 .../Collation/CollationKeyFilter.cs             | 12 +++---
 .../Collation/CollationKeyFilterFactory.cs      | 20 ++++-----
 .../Taxonomy/ParallelTaxonomyArrays.cs          | 12 +++---
 src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs |  8 ++--
 .../Highlight/TokenSources.cs                   | 24 +++++------
 .../PostingsHighlight/PassageScorer.cs          |  6 +--
 .../PostingsHighlight/PostingsHighlighter.cs    |  8 ++--
 .../Index/Sorter/BlockJoinComparatorSource.cs   |  6 +--
 src/Lucene.Net.Queries/CustomScoreQuery.cs      |  4 +-
 src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs      | 40 +++++++++---------
 .../Classic/QueryParser.cs                      | 16 +++----
 .../Flexible/Core/QueryParserHelper.cs          | 12 +++---
 .../Flexible/Standard/StandardQueryParser.cs    | 16 +++----
 .../Simple/SimpleQueryParser.cs                 | 32 +++++++-------
 .../Surround/Query/SpanNearClauseFactory.cs     |  8 ++--
 .../Queries/SortedSetSortField.cs               |  8 ++--
 .../Prefix/PrefixTreeStrategy.cs                | 20 ++++-----
 src/Lucene.Net.Spatial/SpatialStrategy.cs       | 10 ++---
 .../Vector/PointVectorStrategy.cs               | 10 ++---
 .../Spell/LuceneLevenshteinDistance.cs          | 14 +++----
 .../Suggest/Analyzing/AnalyzingSuggester.cs     | 12 +++---
 .../Suggest/DocumentDictionary.cs               | 12 +++---
 .../Suggest/DocumentValueSourceDictionary.cs    |  8 ++--
 .../Suggest/FileDictionary.cs                   | 18 ++++----
 .../Suggest/Fst/FSTCompletionBuilder.cs         | 26 ++++++------
 src/Lucene.Net/Analysis/Analyzer.cs             | 32 +++++++-------
 src/Lucene.Net/Analysis/Token.cs                | 28 ++++++-------
 .../IPositionIncrementAttribute.cs              |  8 ++--
 src/Lucene.Net/Analysis/TokenStream.cs          | 28 ++++++-------
 src/Lucene.Net/Codecs/Codec.cs                  |  8 ++--
 src/Lucene.Net/Codecs/DocValuesFormat.cs        | 10 ++---
 src/Lucene.Net/Codecs/PostingsFormat.cs         | 10 ++---
 src/Lucene.Net/Document/Field.cs                |  4 +-
 src/Lucene.Net/Index/AutomatonTermsEnum.cs      |  6 +--
 src/Lucene.Net/Index/DocTermOrds.cs             | 22 +++++-----
 .../Index/DocumentsWriterDeleteQueue.cs         | 12 +++---
 .../Index/FlushByRamOrCountsPolicy.cs           | 12 +++---
 src/Lucene.Net/Index/FlushPolicy.cs             |  8 ++--
 src/Lucene.Net/Index/IndexReader.cs             |  8 ++--
 src/Lucene.Net/Store/CompoundFileDirectory.cs   | 34 +++++++--------
 src/Lucene.Net/Store/Directory.cs               | 12 +++---
 src/Lucene.Net/Store/FSDirectory.cs             | 12 +++---
 src/Lucene.Net/Support/C5.Support.cs            | 30 ++++++-------
 .../Support/Codecs/DefaultCodecFactory.cs       | 16 +++----
 .../Codecs/DefaultDocValuesFormatFactory.cs     | 16 +++----
 .../Codecs/DefaultPostingsFormatFactory.cs      | 16 +++----
 src/Lucene.Net/Support/HashMap.cs               | 12 +++---
 src/Lucene.Net/Support/IO/Buffer.cs             | 24 +++++------
 src/Lucene.Net/Support/IO/ByteBuffer.cs         | 10 ++---
 src/Lucene.Net/Support/IO/FileSupport.cs        |  4 +-
 src/Lucene.Net/Support/IO/LongBuffer.cs         |  8 ++--
 .../Support/IO/LongToByteBufferAdapter.cs       |  8 ++--
 src/Lucene.Net/Support/LinkedHashMap.cs         | 12 +++---
 src/Lucene.Net/Support/StringExtensions.cs      |  4 +-
 src/Lucene.Net/Util/ArrayUtil.cs                | 10 ++---
 125 files changed, 701 insertions(+), 701 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
index c9a3495..095d92f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
@@ -37,9 +37,9 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// The analysis package contains three primary components:
     /// <list type="bullet">
-    ///     <item><see cref="ArabicNormalizationFilter"/>: Arabic orthographic normalization.</item>
-    ///     <item><see cref="ArabicStemFilter"/>: Arabic light stemming</item>
-    ///     <item>Arabic stop words file: a set of default Arabic stop words.</item>
+    ///     <item><description><see cref="ArabicNormalizationFilter"/>: Arabic orthographic normalization.</description></item>
+    ///     <item><description><see cref="ArabicStemFilter"/>: Arabic light stemming</description></item>
+    ///     <item><description>Arabic stop words file: a set of default Arabic stop words.</description></item>
     /// </list>
     /// </summary>
     public sealed class ArabicAnalyzer : StopwordAnalyzerBase

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
index ae875e4..84ccc30 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicLetterTokenizer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Ar
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="ArabicLetterTokenizer"/>:
     /// <list type="bullet">
-    /// <item>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
+    /// <item><description>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
     /// detect token characters. See <see cref="IsTokenChar(int)"/> and
-    /// <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    /// <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
index 9733198..7556a43 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicNormalizer.cs
@@ -26,11 +26,11 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// Normalization is defined as:
     /// <list type="bullet">
-    ///     <item> Normalization of hamza with alef seat to a bare alef.</item>
-    ///     <item> Normalization of teh marbuta to heh</item>
-    ///     <item> Normalization of dotless yeh (alef maksura) to yeh.</item>
-    ///     <item> Removal of Arabic diacritics (the harakat)</item>
-    ///     <item> Removal of tatweel (stretching character).</item>
+    ///     <item><description> Normalization of hamza with alef seat to a bare alef.</description></item>
+    ///     <item><description> Normalization of teh marbuta to heh</description></item>
+    ///     <item><description> Normalization of dotless yeh (alef maksura) to yeh.</description></item>
+    ///     <item><description> Removal of Arabic diacritics (the harakat)</description></item>
+    ///     <item><description> Removal of tatweel (stretching character).</description></item>
     /// </list>
     /// </summary>
     public class ArabicNormalizer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
index 444b5d3..a7de3af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicStemmer.cs
@@ -26,8 +26,8 @@ namespace Lucene.Net.Analysis.Ar
     /// <para/>
     /// Stemming is defined as:
     /// <list type="bullet">
-    ///     <item> Removal of attached definite article, conjunction, and prepositions.</item>
-    ///     <item> Stemming of common suffixes.</item>
+    ///     <item><description> Removal of attached definite article, conjunction, and prepositions.</description></item>
+    ///     <item><description> Stemming of common suffixes.</description></item>
     /// </list>
     /// </summary>
     public class ArabicStemmer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index ab39999..ba84523 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -33,8 +33,8 @@ namespace Lucene.Net.Analysis.Ca
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating CatalanAnalyzer:
     /// <list>
-    ///   <item> As of 3.6, <see cref="ElisionFilter"/> with a set of Catalan 
-    ///        contractions is used by default.</item>
+    ///   <item><description> As of 3.6, <see cref="ElisionFilter"/> with a set of Catalan 
+    ///        contractions is used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
index 901320b..babbee1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKTokenizer.cs
@@ -33,9 +33,9 @@ namespace Lucene.Net.Analysis.Cjk
     /// </para>
     /// Additionally, the following is applied to Latin text (such as English):
     /// <list type="bullet">
-    ///     <item>Text is converted to lowercase.</item>
-    ///     <item>Numeric digits, '+', '#', and '_' are tokenized as letters.</item>
-    ///     <item>Full-width forms are converted to half-width forms.</item>
+    ///     <item><description>Text is converted to lowercase.</description></item>
+    ///     <item><description>Numeric digits, '+', '#', and '_' are tokenized as letters.</description></item>
+    ///     <item><description>Full-width forms are converted to half-width forms.</description></item>
     /// </list>
     /// For more info on Asian language (Chinese, Japanese, and Korean) text segmentation:
     /// please search  <a

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
index 64018e2..b109aac 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKWidthFilter.cs
@@ -24,8 +24,8 @@ namespace Lucene.Net.Analysis.Cjk
     /// <summary>
     /// A <see cref="TokenFilter"/> that normalizes CJK width differences:
     /// <list type="bullet">
-    ///   <item>Folds fullwidth ASCII variants into the equivalent basic latin</item>
-    ///   <item>Folds halfwidth Katakana variants into the equivalent kana</item>
+    ///   <item><description>Folds fullwidth ASCII variants into the equivalent basic latin</description></item>
+    ///   <item><description>Folds halfwidth Katakana variants into the equivalent kana</description></item>
     /// </list>
     /// <para>
     /// NOTE: this filter can be viewed as a (practical) subset of NFKC/NFKD

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
index 19135d9..78e6750 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniNormalizer.cs
@@ -26,12 +26,12 @@ namespace Lucene.Net.Analysis.Ckb
     /// <para/>
     /// Normalization consists of:
     /// <list type="bullet">
-    ///   <item>Alternate forms of 'y' (0064, 0649) are converted to 06CC (FARSI YEH)</item>
-    ///   <item>Alternate form of 'k' (0643) is converted to 06A9 (KEHEH)</item>
-    ///   <item>Alternate forms of vowel 'e' (0647+200C, word-final 0647, 0629) are converted to 06D5 (AE)</item>
-    ///   <item>Alternate (joining) form of 'h' (06BE) is converted to 0647</item>
-    ///   <item>Alternate forms of 'rr' (0692, word-initial 0631) are converted to 0695 (REH WITH SMALL V BELOW)</item>
-    ///   <item>Harakat, tatweel, and formatting characters such as directional controls are removed.</item>
+    ///   <item><description>Alternate forms of 'y' (0064, 0649) are converted to 06CC (FARSI YEH)</description></item>
+    ///   <item><description>Alternate form of 'k' (0643) is converted to 06A9 (KEHEH)</description></item>
+    ///   <item><description>Alternate forms of vowel 'e' (0647+200C, word-final 0647, 0629) are converted to 06D5 (AE)</description></item>
+    ///   <item><description>Alternate (joining) form of 'h' (06BE) is converted to 0647</description></item>
+    ///   <item><description>Alternate forms of 'rr' (0692, word-initial 0631) are converted to 0695 (REH WITH SMALL V BELOW)</description></item>
+    ///   <item><description>Harakat, tatweel, and formatting characters such as directional controls are removed.</description></item>
     /// </list>
     /// </summary>
     public class SoraniNormalizer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
index 61e6576..47ff4a5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseFilter.cs
@@ -27,15 +27,15 @@ namespace Lucene.Net.Analysis.Cn
     /// <summary>
     /// A <see cref="TokenFilter"/> with a stop word table.  
     /// <list type="bullet">
-    ///     <item>Numeric tokens are removed.</item>
-    ///     <item>English tokens must be larger than 1 character.</item>
-    ///     <item>One Chinese character as one Chinese word.</item>
+    ///     <item><description>Numeric tokens are removed.</description></item>
+    ///     <item><description>English tokens must be larger than 1 character.</description></item>
+    ///     <item><description>One Chinese character as one Chinese word.</description></item>
     /// </list>
     /// TO DO:
     /// <list type="number">
-    ///     <item>Add Chinese stop words, such as \ue400</item>
-    ///     <item>Dictionary based Chinese word extraction</item>
-    ///     <item>Intelligent Chinese word extraction</item>
+    ///     <item><description>Add Chinese stop words, such as \ue400</description></item>
+    ///     <item><description>Dictionary based Chinese word extraction</description></item>
+    ///     <item><description>Intelligent Chinese word extraction</description></item>
     /// </list>
     /// </summary>
     /// @deprecated (3.1) Use <see cref="Core.StopFilter"/> instead, which has the same functionality.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
index cd98aca..9b127df 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseTokenizer.cs
@@ -34,8 +34,8 @@ namespace Lucene.Net.Analysis.Cn
     /// For example, if the Chinese text
     /// "C1C2C3C4" is to be indexed:
     /// <list type="bullet">
-    ///     <item>The tokens returned from ChineseTokenizer are C1, C2, C3, C4.</item>
-    ///     <item>The tokens returned from the CJKTokenizer are C1C2, C2C3, C3C4.</item>
+    ///     <item><description>The tokens returned from ChineseTokenizer are C1, C2, C3, C4.</description></item>
+    ///     <item><description>The tokens returned from the CJKTokenizer are C1C2, C2C3, C3C4.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
index fcd9b7a..b4bd0fd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsFilter.cs
@@ -34,10 +34,10 @@ namespace Lucene.Net.Analysis.CommonGrams
     /// use of <see cref="PositionIncrementAttribute.PositionIncrement"/>. Bigrams have a type
     /// of <see cref="GRAM_TYPE"/> Example:
     /// <list type="bullet">
-    ///     <item>input:"the quick brown fox"</item>
-    ///     <item>output:|"the","the-quick"|"brown"|"fox"|</item>
-    ///     <item>"the-quick" has a position increment of 0 so it is in the same position
-    /// as "the" "the-quick" has a term.type() of "gram"</item>
+    ///     <item><description>input:"the quick brown fox"</description></item>
+    ///     <item><description>output:|"the","the-quick"|"brown"|"fox"|</description></item>
+    ///     <item><description>"the-quick" has a position increment of 0 so it is in the same position
+    /// as "the" "the-quick" has a term.type() of "gram"</description></item>
     /// </list>
     /// </summary>
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
index 07e7b53..2b59887 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/CommonGrams/CommonGramsQueryFilter.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.CommonGrams
     /// <para/>
     /// Example:
     /// <list type="bullet">
-    ///     <item>query input to CommonGramsFilter: "the rain in spain falls mainly"</item>
-    ///     <item>output of CommomGramsFilter/input to CommonGramsQueryFilter:
-    ///     |"the, "the-rain"|"rain" "rain-in"|"in, "in-spain"|"spain"|"falls"|"mainly"</item>
-    ///     <item>output of CommonGramsQueryFilter:"the-rain", "rain-in" ,"in-spain",
-    ///     "falls", "mainly"</item>
+    ///     <item><description>query input to CommonGramsFilter: "the rain in spain falls mainly"</description></item>
+    ///     <item><description>output of CommomGramsFilter/input to CommonGramsQueryFilter:
+    ///     |"the, "the-rain"|"rain" "rain-in"|"in, "in-spain"|"spain"|"falls"|"mainly"</description></item>
+    ///     <item><description>output of CommonGramsQueryFilter:"the-rain", "rain-in" ,"in-spain",
+    ///     "falls", "mainly"</description></item>
     /// </list>
     /// </summary>
     /// <remarks>
@@ -84,8 +84,8 @@ namespace Lucene.Net.Analysis.CommonGrams
         /// Output bigrams whenever possible to optimize queries. Only output unigrams
         /// when they are not a member of a bigram. Example:
         /// <list type="bullet">
-        ///     <item>input: "the rain in spain falls mainly"</item>
-        ///     <item>output:"the-rain", "rain-in" ,"in-spain", "falls", "mainly"</item>
+        ///     <item><description>input: "the rain in spain falls mainly"</description></item>
+        ///     <item><description>output:"the-rain", "rain-in" ,"in-spain", "falls", "mainly"</description></item>
         /// </list>
         /// </summary>
         public override bool IncrementToken()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
index f479951..5e176af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/CompoundWordTokenFilterBase.cs
@@ -30,10 +30,10 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
-    ///     <item>As of 4.4, <see cref="CompoundWordTokenFilterBase"/> doesn't update offsets.</item>
+    ///     dictionaries.</description></item>
+    ///     <item><description>As of 4.4, <see cref="CompoundWordTokenFilterBase"/> doesn't update offsets.</description></item>
     /// </list>
     /// </summary>
     public abstract class CompoundWordTokenFilterBase : TokenFilter

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
index 12ce070..063c731 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/DictionaryCompoundWordTokenFilter.cs
@@ -31,9 +31,9 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
+    ///     dictionaries.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
index 82feaec..d3758df 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/Hyphenation/TernaryTree.cs
@@ -98,8 +98,8 @@ namespace Lucene.Net.Analysis.Compound.Hyphenation
         /// reserved:
         /// </para>
         /// <list type="bullet">
-        ///     <item>0x0000 as string terminator</item>
-        ///     <item>0xFFFF to indicate that the branch starting at this node is compressed</item>
+        ///     <item><description>0x0000 as string terminator</description></item>
+        ///     <item><description>0xFFFF to indicate that the branch starting at this node is compressed</description></item>
         /// </list>
         /// <para>
         /// This shouldn't be a problem if we give the usual semantics to strings since

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
index 533b76e..0e263ed 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilter.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Compound
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CompoundWordTokenFilterBase"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
     ///     supplementary characters in strings and char arrays provided as compound word
-    ///     dictionaries.</item>
+    ///     dictionaries.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
index c2f69c4..8c53368 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/HyphenationCompoundWordTokenFilterFactory.cs
@@ -29,15 +29,15 @@ namespace Lucene.Net.Analysis.Compound
     /// <para/>
     /// This factory accepts the following parameters:
     /// <list type="bullet">
-    ///     <item><code>hyphenator</code> (mandatory): path to the FOP xml hyphenation pattern. 
-    ///     See <a href="http://offo.sourceforge.net/hyphenation/">http://offo.sourceforge.net/hyphenation/</a>.</item>
-    ///     <item><code>encoding</code> (optional): encoding of the xml hyphenation file. defaults to UTF-8.</item>
-    ///     <item><code>dictionary</code> (optional): dictionary of words. defaults to no dictionary.</item>
-    ///     <item><code>minWordSize</code> (optional): minimal word length that gets decomposed. defaults to 5.</item>
-    ///     <item><code>minSubwordSize</code> (optional): minimum length of subwords. defaults to 2.</item>
-    ///     <item><code>maxSubwordSize</code> (optional): maximum length of subwords. defaults to 15.</item>
-    ///     <item><code>onlyLongestMatch</code> (optional): if true, adds only the longest matching subword 
-    ///     to the stream. defaults to false.</item>
+    ///     <item><description><code>hyphenator</code> (mandatory): path to the FOP xml hyphenation pattern. 
+    ///     See <a href="http://offo.sourceforge.net/hyphenation/">http://offo.sourceforge.net/hyphenation/</a>.</description></item>
+    ///     <item><description><code>encoding</code> (optional): encoding of the xml hyphenation file. defaults to UTF-8.</description></item>
+    ///     <item><description><code>dictionary</code> (optional): dictionary of words. defaults to no dictionary.</description></item>
+    ///     <item><description><code>minWordSize</code> (optional): minimal word length that gets decomposed. defaults to 5.</description></item>
+    ///     <item><description><code>minSubwordSize</code> (optional): minimum length of subwords. defaults to 2.</description></item>
+    ///     <item><description><code>maxSubwordSize</code> (optional): maximum length of subwords. defaults to 15.</description></item>
+    ///     <item><description><code>onlyLongestMatch</code> (optional): if true, adds only the longest matching subword 
+    ///     to the stream. defaults to false.</description></item>
     /// </list>
     /// <para>
     /// <code>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
index 1be2e65..4b45693 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LetterTokenizer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="LetterTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an <see cref="int"/> based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an <see cref="int"/> based API to normalize and
     ///     detect token characters. See <see cref="CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
index 36bde21..5f9ee42 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseFilter.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.Core
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating LowerCaseFilter:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are properly lowercased.</item>
+    ///     <item><description> As of 3.1, supplementary characters are properly lowercased.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
index 6db79e8..a3408b2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/LowerCaseTokenizer.cs
@@ -35,9 +35,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="LowerCaseTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="Util.CharTokenizer"/> uses an int based API to normalize and
     ///     detect token characters. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
index 80586d0..0d49f35 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/SimpleAnalyzer.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="Util.CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="LowerCaseTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="LowerCaseTokenizer"/> uses an int based API to normalize and
     ///     detect token codepoints. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
index e91072e..0a4d34c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StopAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, StopFilter correctly handles Unicode 4.0
-    ///         supplementary characters in stopwords</item>
-    ///     <item> As of 2.9, position increments are preserved</item>
+    ///     <item><description> As of 3.1, StopFilter correctly handles Unicode 4.0
+    ///         supplementary characters in stopwords</description></item>
+    ///     <item><description> As of 2.9, position increments are preserved</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
index 2515426..e8ae3b7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilter.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StopFilter"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, StopFilter correctly handles Unicode 4.0
+    ///     <item><description>As of 3.1, StopFilter correctly handles Unicode 4.0
     ///         supplementary characters in stopwords and position
-    ///         increments are preserved</item>
+    ///         increments are preserved</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
index 0b5feb8..9ff7e7b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopFilterFactory.cs
@@ -39,30 +39,30 @@ namespace Lucene.Net.Analysis.Core
     /// All attributes are optional:
     /// </para>
     /// <list type="bullet">
-    ///     <item><c>ignoreCase</c> defaults to <c>false</c></item>
-    ///     <item><c>words</c> should be the name of a stopwords file to parse, if not 
+    ///     <item><description><c>ignoreCase</c> defaults to <c>false</c></description></item>
+    ///     <item><description><c>words</c> should be the name of a stopwords file to parse, if not 
     ///      specified the factory will use <see cref="StopAnalyzer.ENGLISH_STOP_WORDS_SET"/>
-    ///     </item>
-    ///     <item><c>format</c> defines how the <c>words</c> file will be parsed, 
+    ///     </description></item>
+    ///     <item><description><c>format</c> defines how the <c>words</c> file will be parsed, 
     ///      and defaults to <c>wordset</c>.  If <c>words</c> is not specified, 
     ///      then <c>format</c> must not be specified.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para>
     /// The valid values for the <c>format</c> option are:
     /// </para>
     /// <list type="bullet">
-    ///  <item><c>wordset</c> - This is the default format, which supports one word per 
+    ///  <item><description><c>wordset</c> - This is the default format, which supports one word per 
     ///      line (including any intra-word whitespace) and allows whole line comments 
     ///      begining with the "#" character.  Blank lines are ignored.  See 
     ///      <see cref="WordlistLoader.GetLines"/> for details.
-    ///  </item>
-    ///  <item><c>snowball</c> - This format allows for multiple words specified on each 
+    ///  </description></item>
+    ///  <item><description><c>snowball</c> - This format allows for multiple words specified on each 
     ///      line, and trailing comments may be specified using the vertical line ("&#124;"). 
     ///      Blank lines are ignored.  See 
     ///      <see cref="WordlistLoader.GetSnowballWordSet(System.IO.TextReader, Net.Util.LuceneVersion)"/> 
     ///      for details.
-    ///  </item>
+    ///  </description></item>
     /// </list>
     /// </summary>
     public class StopFilterFactory : TokenFilterFactory, IResourceLoaderAware

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
index 6becd82..09e8028 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceAnalyzer.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="WhitespaceTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="WhitespaceTokenizer"/> uses an int based API to normalize and
     ///     detect token codepoints. See <see cref="Util.CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="Util.CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
index 98db5e7..cee9568 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Core
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="WhitespaceTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
     ///     detect token characters. See <see cref="CharTokenizer.IsTokenChar(int)"/> and
-    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</item>
+    ///     <see cref="CharTokenizer.Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
index 5efdf4b..cffbe49 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
@@ -38,10 +38,10 @@ namespace Lucene.Net.Analysis.Cz
     /// You must specify the required <see cref="LuceneVersion"/> compatibility when creating
     /// <see cref="CzechAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, words are stemmed with <see cref="CzechStemFilter"/></item>
-    ///     <item>As of 2.9, StopFilter preserves position increments</item>
-    ///     <item>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
-    ///     <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///     <item><description>As of 3.1, words are stemmed with <see cref="CzechStemFilter"/></description></item>
+    ///     <item><description>As of 2.9, StopFilter preserves position increments</description></item>
+    ///     <item><description>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+    ///     <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 19a46d4..1a6a350 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -45,11 +45,11 @@ namespace Lucene.Net.Analysis.De
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating GermanAnalyzer:
     /// <list>
-    ///   <item> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.</item>
-    ///   <item> As of 3.1, Snowball stemming is done with SnowballFilter, and 
-    ///        Snowball stopwords are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///   <item><description> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with SnowballFilter, and 
+    ///        Snowball stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
index 7160e1c..fc4073a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilter.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Analysis.De
     /// It allows for the fact that ä, ö and ü are sometimes written as ae, oe and ue.
     /// <para>
     /// <list>
-    ///     <item> 'ß' is replaced by 'ss'</item>
-    ///     <item> 'ä', 'ö', 'ü' are replaced by 'a', 'o', 'u', respectively.</item>
-    ///     <item> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.</item>
-    ///     <item> 'ue' is replaced by 'u', when not following a vowel or q.</item>
+    ///     <item><description> 'ß' is replaced by 'ss'</description></item>
+    ///     <item><description> 'ä', 'ö', 'ü' are replaced by 'a', 'o', 'u', respectively.</description></item>
+    ///     <item><description> 'ae' and 'oe' are replaced by 'a', and 'o', respectively.</description></item>
+    ///     <item><description> 'ue' is replaced by 'u', when not following a vowel or q.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
index 99f2455..47e9074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemmer.cs
@@ -176,12 +176,12 @@ namespace Lucene.Net.Analysis.De
         /// Do some substitutions for the term to reduce overstemming:
         /// 
         /// <list type="bullet">
-        /// <item>Substitute Umlauts with their corresponding vowel: äöü -> aou,
-        ///   "ß" is substituted by "ss"</item>
-        /// <item>Substitute a second char of a pair of equal characters with
-        ///   an asterisk: ?? -> ?*</item>
-        /// <item>Substitute some common character combinations with a token:
-        ///   sch/ch/ei/ie/ig/st -> $/§/%/&amp;/#/!</item>
+        /// <item><description>Substitute Umlauts with their corresponding vowel: äöü -> aou,
+        ///   "ß" is substituted by "ss"</description></item>
+        /// <item><description>Substitute a second char of a pair of equal characters with
+        ///   an asterisk: ?? -> ?*</description></item>
+        /// <item><description>Substitute some common character combinations with a token:
+        ///   sch/ch/ei/ie/ig/st -> $/§/%/&amp;/#/!</description></item>
         /// </list>
         /// </summary>
         private void Substitute(StringBuilder buffer)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index db0e978..061ed9e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.El
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="GreekAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.1, StandardFilter and GreekStemmer are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///   <item><description> As of 3.1, StandardFilter and GreekStemmer are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// </para>
     /// <para><c>NOTE</c>: This class uses the same <see cref="LuceneVersion"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
index e501475..85f4bd3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.El
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="GreekLowerCaseFilter"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are properly lowercased.</item>
+    ///     <item><description> As of 3.1, supplementary characters are properly lowercased.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index 888e7a8..5e8ac98 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -26,9 +26,9 @@ namespace Lucene.Net.Analysis.En
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="EnglishPossessiveFilter"/>:
     /// <list type="bullet">
-    ///    <item> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and 
+    ///    <item><description> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and 
     ///         U+FF07 FULLWIDTH APOSTROPHE are also treated as
-    ///         quotation marks.</item>
+    ///         quotation marks.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index de05df7..b537856 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Es
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="SpanishAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, <see cref="SpanishLightStemFilter"/> is used for less aggressive stemming.</item>
+    ///     <item><description> As of 3.6, <see cref="SpanishLightStemFilter"/> is used for less aggressive stemming.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
index 81a2cb2..7840ab1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizer.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Analysis.Fa
     /// <para>
     /// Normalization is defined as:
     /// <list type="bullet">
-    ///     <item>Normalization of various heh + hamza forms and heh goal to heh.</item>
-    ///     <item>Normalization of farsi yeh and yeh barree to arabic yeh</item>
-    ///     <item>Normalization of persian keheh to arabic kaf</item>
+    ///     <item><description>Normalization of various heh + hamza forms and heh goal to heh.</description></item>
+    ///     <item><description>Normalization of farsi yeh and yeh barree to arabic yeh</description></item>
+    ///     <item><description>Normalization of persian keheh to arabic kaf</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index a2a99ac..1d117a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -40,12 +40,12 @@ namespace Lucene.Net.Analysis.Fr
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating FrenchAnalyzer:
     /// <list type="bullet">
-    ///   <item> As of 3.6, <see cref="FrenchLightStemFilter"/> is used for less aggressive stemming.</item>
-    ///   <item> As of 3.1, Snowball stemming is done with <see cref="SnowballFilter"/>, 
+    ///   <item><description> As of 3.6, <see cref="FrenchLightStemFilter"/> is used for less aggressive stemming.</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with <see cref="SnowballFilter"/>, 
     ///        <see cref="LowerCaseFilter"/> is used prior to <see cref="StopFilter"/>, and <see cref="ElisionFilter"/> and 
-    ///        Snowball stopwords are used by default.</item>
-    ///   <item> As of 2.9, <see cref="StopFilter"/> preserves position
-    ///        increments</item>
+    ///        Snowball stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, <see cref="StopFilter"/> preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 9ee40ac..28198f2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Analysis.Hi
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating HindiAnalyzer:
     /// <list type="bullet">
-    ///     <item> As of 3.6, StandardTokenizer is used for tokenization</item>
+    ///     <item><description> As of 3.6, StandardTokenizer is used for tokenization</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
index 45144a6..96bb1f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiNormalizer.cs
@@ -36,8 +36,8 @@ namespace Lucene.Net.Analysis.Hi
     /// Leah S. Larkey, Margaret E. Connell, and Nasreen AbdulJaleel.
     /// http://maroo.cs.umass.edu/pub/web/getpdf.php?id=454:
     /// <list type="bullet">
-    ///     <item>Internal Zero-width joiner and Zero-width non-joiners are removed</item>
-    ///     <item>In addition to chandrabindu, NA+halant is normalized to anusvara</item>
+    ///     <item><description>Internal Zero-width joiner and Zero-width non-joiners are removed</description></item>
+    ///     <item><description>In addition to chandrabindu, NA+halant is normalized to anusvara</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index 6ef83dc..d428e63 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Analysis.It
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ItalianAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, <see cref="ItalianLightStemFilter"/> is used for less aggressive stemming.</item>
-    ///     <item> As of 3.2, <see cref="ElisionFilter"/> with a set of Italian 
-    ///        contractions is used by default.</item>
+    ///     <item><description> As of 3.6, <see cref="ItalianLightStemFilter"/> is used for less aggressive stemming.</description></item>
+    ///     <item><description> As of 3.2, <see cref="ElisionFilter"/> with a set of Italian 
+    ///        contractions is used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
index cb75bef..6b37b79 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianStemmer.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Analysis.Lv
     /// This is a light version of the algorithm in Karlis Kreslin's PhD thesis
     /// <c>A stemming algorithm for Latvian</c> with the following modifications:
     /// <list type="bullet">
-    ///   <item>Only explicitly stems noun and adjective morphology</item>
-    ///   <item>Stricter length/vowel checks for the resulting stems (verb etc suffix stripping is removed)</item>
-    ///   <item>Removes only the primary inflectional suffixes: case and number for nouns ; 
-    ///       case, number, gender, and definitiveness for adjectives.</item>
-    ///   <item>Palatalization is only handled when a declension II,V,VI noun suffix is removed.</item>
+    ///   <item><description>Only explicitly stems noun and adjective morphology</description></item>
+    ///   <item><description>Stricter length/vowel checks for the resulting stems (verb etc suffix stripping is removed)</description></item>
+    ///   <item><description>Removes only the primary inflectional suffixes: case and number for nouns ; 
+    ///       case, number, gender, and definitiveness for adjectives.</description></item>
+    ///   <item><description>Palatalization is only handled when a declension II,V,VI noun suffix is removed.</description></item>
     /// </list>
     /// </para>
     /// </summary>
@@ -94,10 +94,10 @@ namespace Lucene.Net.Analysis.Lv
         /// <summary>
         /// Most cases are handled except for the ambiguous ones:
         /// <list type="bullet">
-        ///     <item> s -> Å¡</item>
-        ///     <item> t -> Å¡</item>
-        ///     <item> d -> ž</item>
-        ///     <item> z -> ž</item>
+        ///     <item><description> s -> Å¡</description></item>
+        ///     <item><description> t -> Å¡</description></item>
+        ///     <item><description> d -> ž</description></item>
+        ///     <item><description> z -> ž</description></item>
         /// </list>
         /// </summary>
         private int Unpalatalize(char[] s, int len)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
index 582a461..f735ef7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ASCIIFoldingFilter.cs
@@ -30,22 +30,22 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// those characters with reasonable ASCII alternatives are converted:
     /// 
     /// <ul>
-    ///   <item>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></item>
-    ///   <item>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></item>
-    ///   <item>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></item>
-    ///   <item>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></item>
-    ///   <item>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></item>
-    ///   <item>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></item>
-    ///   <item>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></item>
-    ///   <item>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></item>
-    ///   <item>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></item>
-    ///   <item>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></item>
-    ///   <item>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></item>
-    ///   <item>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></item>
-    ///   <item>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></item>
-    ///   <item>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></item>
-    ///   <item>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></item>
-    ///   <item>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></item>
+    ///   <item><description>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></description></item>
+    ///   <item><description>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></description></item>
+    ///   <item><description>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></description></item>
+    ///   <item><description>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></description></item>
+    ///   <item><description>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></description></item>
+    ///   <item><description>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></description></item>
+    ///   <item><description>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></description></item>
+    ///   <item><description>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></description></item>
+    ///   <item><description>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></description></item>
+    ///   <item><description>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></description></item>
+    ///   <item><description>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></description></item>
+    ///   <item><description>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></description></item>
+    ///   <item><description>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></description></item>
+    ///   <item><description>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></description></item>
+    ///   <item><description>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></description></item>
+    ///   <item><description>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></description></item>
     /// </ul>
     /// <para/>
     /// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
index f79ef5e..1e8ac7d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
@@ -98,41 +98,41 @@ namespace Lucene.Net.Analysis.Miscellaneous
     /// Splits words into subwords and performs optional transformations on subword
     /// groups. Words are split into subwords with the following rules:
     /// <list type="bullet">
-    ///     <item>split on intra-word delimiters (by default, all non alpha-numeric
-    ///         characters): <c>"Wi-Fi"</c> &#8594; <c>"Wi", "Fi"</c></item>
-    ///     <item>split on case transitions: <c>"PowerShot"</c> &#8594;
-    ///         <c>"Power", "Shot"</c></item>
-    ///     <item>split on letter-number transitions: <c>"SD500"</c> &#8594;
-    ///         <c>"SD", "500"</c></item>
-    ///     <item>leading and trailing intra-word delimiters on each subword are ignored:
+    ///     <item><description>split on intra-word delimiters (by default, all non alpha-numeric
+    ///         characters): <c>"Wi-Fi"</c> &#8594; <c>"Wi", "Fi"</c></description></item>
+    ///     <item><description>split on case transitions: <c>"PowerShot"</c> &#8594;
+    ///         <c>"Power", "Shot"</c></description></item>
+    ///     <item><description>split on letter-number transitions: <c>"SD500"</c> &#8594;
+    ///         <c>"SD", "500"</c></description></item>
+    ///     <item><description>leading and trailing intra-word delimiters on each subword are ignored:
     ///         <c>"//hello---there, 'dude'"</c> &#8594;
-    ///         <c>"hello", "there", "dude"</c></item>
-    ///     <item>trailing "'s" are removed for each subword: <c>"O'Neil's"</c>
+    ///         <c>"hello", "there", "dude"</c></description></item>
+    ///     <item><description>trailing "'s" are removed for each subword: <c>"O'Neil's"</c>
     ///         &#8594; <c>"O", "Neil"</c>
     ///         <ul>
-    ///             <item>Note: this step isn't performed in a separate filter because of possible
-    ///                 subword combinations.</item>
+    ///             <item><description>Note: this step isn't performed in a separate filter because of possible
+    ///                 subword combinations.</description></item>
     ///         </ul>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para/>
     /// The <b>combinations</b> parameter affects how subwords are combined:
     /// <list type="bullet">
-    ///     <item>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
-    ///         &#8594; <c>0:"Power", 1:"Shot"</c> (0 and 1 are the token positions)</item>
-    ///     <item>combinations="1" means that in addition to the subwords, maximum runs of
+    ///     <item><description>combinations="0" causes no subword combinations: <code>"PowerShot"</code>
+    ///         &#8594; <c>0:"Power", 1:"Shot"</c> (0 and 1 are the token positions)</description></item>
+    ///     <item><description>combinations="1" means that in addition to the subwords, maximum runs of
     ///         non-numeric subwords are catenated and produced at the same position of the
     ///         last subword in the run:
     ///         <ul>
-    ///             <item><c>"PowerShot"</c> &#8594;
-    ///                 <c>0:"Power", 1:"Shot" 1:"PowerShot"</c></item>
-    ///             <item><c>"A's+B's&amp;C's"</c> -gt; <c>0:"A", 1:"B", 2:"C", 2:"ABC"</c>
-    ///             </item>
-    ///             <item><c>"Super-Duper-XL500-42-AutoCoder!"</c> &#8594;
+    ///             <item><description><c>"PowerShot"</c> &#8594;
+    ///                 <c>0:"Power", 1:"Shot" 1:"PowerShot"</c></description></item>
+    ///             <item><description><c>"A's+B's&amp;C's"</c> -gt; <c>0:"A", 1:"B", 2:"C", 2:"ABC"</c>
+    ///             </description></item>
+    ///             <item><description><c>"Super-Duper-XL500-42-AutoCoder!"</c> &#8594;
     ///                 <c>0:"Super", 1:"Duper", 2:"XL", 2:"SuperDuperXL", 3:"500" 4:"42", 5:"Auto", 6:"Coder", 6:"AutoCoder"</c>
-    ///             </item>
+    ///             </description></item>
     ///         </ul>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// <para/>
     /// One use for <see cref="WordDelimiterFilter"/> is to help match words with different

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
index ed2cb3d..7ecb1e5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/EdgeNGramTokenizer.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Analysis.NGram
     /// </para>
     /// <para>As of Lucene 4.4, this tokenizer
     /// <list type="bullet">
-    ///     <item>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage</item>
-    ///     <item>doesn't trim the input,</item>
-    ///     <item>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones</item>
-    ///     <item>doesn't support backward n-grams anymore.</item>
-    ///     <item>supports <see cref="Util.CharTokenizer.IsTokenChar(int)"/> pre-tokenization,</item>
-    ///     <item>correctly handles supplementary characters.</item>
+    ///     <item><description>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage</description></item>
+    ///     <item><description>doesn't trim the input,</description></item>
+    ///     <item><description>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones</description></item>
+    ///     <item><description>doesn't support backward n-grams anymore.</description></item>
+    ///     <item><description>supports <see cref="Util.CharTokenizer.IsTokenChar(int)"/> pre-tokenization,</description></item>
+    ///     <item><description>correctly handles supplementary characters.</description></item>
     /// </list>
     /// </para>
     /// <para>Although <b style="color:red">highly</b> discouraged, it is still possible

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
index 2b0af35..416c96f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenFilter.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Analysis.NGram
     /// <para>You must specify the required <see cref="LuceneVersion"/> compatibility when
     /// creating a <see cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:
     /// <list type="bullet">
-    ///     <item>handles supplementary characters correctly,</item>
-    ///     <item>emits all n-grams for the same token at the same position,</item>
-    ///     <item>does not modify offsets,</item>
-    ///     <item>sorts n-grams by their offset in the original token first, then
+    ///     <item><description>handles supplementary characters correctly,</description></item>
+    ///     <item><description>emits all n-grams for the same token at the same position,</description></item>
+    ///     <item><description>does not modify offsets,</description></item>
+    ///     <item><description>sorts n-grams by their offset in the original token first, then
     ///         increasing length (meaning that "abc" will give "a", "ab", "abc", "b", "bc",
-    ///         "c").</item>
+    ///         "c").</description></item>
     /// </list>
     /// </para>
     /// <para>You can make this filter use the old behavior by providing a version &lt;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
index 0fe3792..bd62835 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
@@ -85,11 +85,11 @@ namespace Lucene.Net.Analysis.NGram
     /// </para>
     /// <para>This tokenizer changed a lot in Lucene 4.4 in order to:
     /// <list type="bullet">
-    ///     <item>tokenize in a streaming fashion to support streams which are larger
+    ///     <item><description>tokenize in a streaming fashion to support streams which are larger
     ///         than 1024 chars (limit of the previous version),</item>
-    ///     <item>count grams based on unicode code points instead of java chars (and
+    ///     <item><description>count grams based on unicode code points instead of java chars (and
     ///         never split in the middle of surrogate pairs),</item>
-    ///     <item>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
+    ///     <item><description>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
     ///         before computing n-grams.</item>
     /// </list>
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
index 0718e8d..07ce34a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
@@ -41,14 +41,14 @@ namespace Lucene.Net.Analysis.Nl
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="DutchAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.6, <see cref="DutchAnalyzer(LuceneVersion, CharArraySet)"/> and
+    ///   <item><description> As of 3.6, <see cref="DutchAnalyzer(LuceneVersion, CharArraySet)"/> and
     ///        <see cref="DutchAnalyzer(LuceneVersion, CharArraySet, CharArraySet)"/> also populate
-    ///        the default entries for the stem override dictionary</item>
-    ///   <item> As of 3.1, Snowball stemming is done with SnowballFilter, 
+    ///        the default entries for the stem override dictionary</description></item>
+    ///   <item><description> As of 3.1, Snowball stemming is done with SnowballFilter, 
     ///        LowerCaseFilter is used prior to StopFilter, and Snowball 
-    ///        stopwords are used by default.</item>
-    ///   <item> As of 2.9, StopFilter preserves position
-    ///        increments</item>
+    ///        stopwords are used by default.</description></item>
+    ///   <item><description> As of 2.9, StopFilter preserves position
+    ///        increments</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
index 53f58bd..8717692 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizer.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Pattern
     /// for the input stream.  It takes two arguments:  "pattern" and "group".
     /// <para/>
     /// <list type="bullet">
-    ///     <item>"pattern" is the regular expression.</item>
-    ///     <item>"group" says which group to extract into tokens.</item>
+    ///     <item><description>"pattern" is the regular expression.</description></item>
+    ///     <item><description>"group" says which group to extract into tokens.</description></item>
     /// </list>
     /// <para>
     /// group=-1 (the default) is equivalent to "split".  In this case, the tokens will

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
index 20e7897..c62521a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pattern/PatternTokenizerFactory.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Analysis.Pattern
     /// for the input stream.  It takes two arguments:  "pattern" and "group".
     /// <para/>
     /// <list type="bullet">
-    ///     <item>"pattern" is the regular expression.</item>
-    ///     <item>"group" says which group to extract into tokens.</item>
+    ///     <item><description>"pattern" is the regular expression.</description></item>
+    ///     <item><description>"group" says which group to extract into tokens.</description></item>
     /// </list>
     /// <para>
     /// group=-1 (the default) is equivalent to "split".  In this case, the tokens will

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index c31fccd..5f09576 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Analysis.Pt
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="PortugueseAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.</item>
+    ///     <item><description> As of 3.6, PortugueseLightStemFilter is used for less aggressive stemming.</description></item>
     /// </list>
     /// </para>
     /// </summary>


[4/9] lucenenet git commit: SWEEP: Lucene.Net.Index: Fixed up documentation comments for types starting with M-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/Terms.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/Terms.cs b/src/Lucene.Net/Index/Terms.cs
index 215a505..69e2ca0 100644
--- a/src/Lucene.Net/Index/Terms.cs
+++ b/src/Lucene.Net/Index/Terms.cs
@@ -24,7 +24,8 @@ namespace Lucene.Net.Index
     using CompiledAutomaton = Lucene.Net.Util.Automaton.CompiledAutomaton;
 
     /// <summary>
-    /// Access to the terms in a specific field.  See <seealso cref="Fields"/>.
+    /// Access to the terms in a specific field.  See <see cref="Fields"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -34,7 +35,7 @@ namespace Lucene.Net.Index
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected Terms()
         {
@@ -42,25 +43,25 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns an iterator that will step through all
-        ///  terms. this method will not return null.  If you have
-        ///  a previous TermsEnum, for example from a different
-        ///  field, you can pass it for possible reuse if the
-        ///  implementation can do so.
+        /// terms. This method will not return <c>null</c>.  If you have
+        /// a previous <see cref="TermsEnum"/>, for example from a different
+        /// field, you can pass it for possible reuse if the
+        /// implementation can do so.
         /// </summary>
         public abstract TermsEnum GetIterator(TermsEnum reuse);
 
         /// <summary>
-        /// Returns a TermsEnum that iterates over all terms that
-        ///  are accepted by the provided {@link
-        ///  CompiledAutomaton}.  If the <code>startTerm</code> is
-        ///  provided then the returned enum will only accept terms
-        ///  > <code>startTerm</code>, but you still must call
-        ///  next() first to get to the first term.  Note that the
-        ///  provided <code>startTerm</code> must be accepted by
-        ///  the automaton.
+        /// Returns a <see cref="TermsEnum"/> that iterates over all terms that
+        /// are accepted by the provided 
+        /// <see cref="CompiledAutomaton"/>.  If the <paramref name="startTerm"/> is
+        /// provided then the returned enum will only accept terms
+        /// &gt; <paramref name="startTerm"/>, but you still must call
+        /// <see cref="TermsEnum.Next()"/> first to get to the first term.  Note that the
+        /// provided <paramref name="startTerm"/> must be accepted by
+        /// the automaton.
         ///
-        /// <p><b>NOTE</b>: the returned TermsEnum cannot
-        /// seek</p>.
+        /// <para><b>NOTE</b>: the returned <see cref="TermsEnum"/> cannot
+        /// seek</para>.
         /// </summary>
         public virtual TermsEnum Intersect(CompiledAutomaton compiled, BytesRef startTerm)
         {
@@ -105,11 +106,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return the BytesRef Comparer used to sort terms
-        ///  provided by the iterator.  this method may return null
-        ///  if there are no terms.  this method may be invoked
-        ///  many times; it's best to cache a single instance &
-        ///  reuse it.
+        /// Return the <see cref="T:IComparer{BytesRef}"/> used to sort terms
+        /// provided by the iterator.  This method may return <c>null</c>
+        /// if there are no terms.  This method may be invoked
+        /// many times; it's best to cache a single instance &amp;
+        /// reuse it.
         /// </summary>
         public abstract IComparer<BytesRef> Comparer { get; }
 
@@ -123,53 +124,53 @@ namespace Lucene.Net.Index
         public abstract long Count { get; }
 
         /// <summary>
-        /// Returns the sum of <seealso cref="TermsEnum#totalTermFreq"/> for
-        ///  all terms in this field, or -1 if this measure isn't
-        ///  stored by the codec (or if this fields omits term freq
-        ///  and positions).  Note that, just like other term
-        ///  measures, this measure does not take deleted documents
-        ///  into account.
+        /// Returns the sum of <see cref="TermsEnum.TotalTermFreq"/> for
+        /// all terms in this field, or -1 if this measure isn't
+        /// stored by the codec (or if this fields omits term freq
+        /// and positions).  Note that, just like other term
+        /// measures, this measure does not take deleted documents
+        /// into account.
         /// </summary>
-        public abstract long SumTotalTermFreq { get; }
+        public abstract long SumTotalTermFreq { get; } // LUCENENET TODO: API Make GetSumTotalTermFreq() (conversion)
 
         /// <summary>
-        /// Returns the sum of <seealso cref="TermsEnum#docFreq()"/> for
-        ///  all terms in this field, or -1 if this measure isn't
-        ///  stored by the codec.  Note that, just like other term
-        ///  measures, this measure does not take deleted documents
-        ///  into account.
+        /// Returns the sum of <see cref="TermsEnum.DocFreq"/> for
+        /// all terms in this field, or -1 if this measure isn't
+        /// stored by the codec.  Note that, just like other term
+        /// measures, this measure does not take deleted documents
+        /// into account.
         /// </summary>
-        public abstract long SumDocFreq { get; }
+        public abstract long SumDocFreq { get; } // LUCENENET TODO: API Make GetSumDocFreq() (conversion)
 
         /// <summary>
         /// Returns the number of documents that have at least one
-        ///  term for this field, or -1 if this measure isn't
-        ///  stored by the codec.  Note that, just like other term
-        ///  measures, this measure does not take deleted documents
-        ///  into account.
+        /// term for this field, or -1 if this measure isn't
+        /// stored by the codec.  Note that, just like other term
+        /// measures, this measure does not take deleted documents
+        /// into account.
         /// </summary>
         public abstract int DocCount { get; }
 
         /// <summary>
         /// Returns true if documents in this field store
-        ///  per-document term frequency (<seealso cref="DocsEnum#freq"/>).
+        /// per-document term frequency (<see cref="DocsEnum.Freq"/>).
         /// </summary>
         public abstract bool HasFreqs { get; }
 
         /// <summary>
-        /// Returns true if documents in this field store offsets. </summary>
+        /// Returns <c>true</c> if documents in this field store offsets. </summary>
         public abstract bool HasOffsets { get; }
 
         /// <summary>
-        /// Returns true if documents in this field store positions. </summary>
+        /// Returns <c>true</c> if documents in this field store positions. </summary>
         public abstract bool HasPositions { get; }
 
         /// <summary>
-        /// Returns true if documents in this field store payloads. </summary>
+        /// Returns <c>true</c> if documents in this field store payloads. </summary>
         public abstract bool HasPayloads { get; }
 
         /// <summary>
-        /// Zero-length array of <seealso cref="Terms"/>. </summary>
+        /// Zero-length array of <see cref="Terms"/>. </summary>
         public static readonly Terms[] EMPTY_ARRAY = new Terms[0];
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermsEnum.cs b/src/Lucene.Net/Index/TermsEnum.cs
index 41733c5..61b6fc1 100644
--- a/src/Lucene.Net/Index/TermsEnum.cs
+++ b/src/Lucene.Net/Index/TermsEnum.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Index
     /// <summary>
     /// Iterator to seek (<see cref="SeekCeil(BytesRef)"/>, 
     /// <see cref="SeekExact(BytesRef)"/>) or step through 
-    /// (<see cref="Next"/> terms to obtain frequency information 
+    /// (<see cref="Next()"/> terms to obtain frequency information 
     /// (<see cref="DocFreq"/>), <see cref="DocsEnum"/> or 
     /// <see cref="DocsAndPositionsEnum"/> for the current term 
-    /// (<see cref="Docs"/>).
+    /// (<see cref="Docs(IBits, DocsEnum)"/>).
     ///
     /// <para/>Term enumerations are always ordered by
     /// <see cref="Comparer"/>.  Each term in the enumeration is
@@ -40,7 +40,7 @@ namespace Lucene.Net.Index
     /// <para/>The <see cref="TermsEnum"/> is unpositioned when you first obtain it
     /// and you must first successfully call <see cref="Next"/> or one
     /// of the <c>Seek</c> methods.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -95,9 +95,9 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Attempts to seek to the exact term, returning
-        /// true if the term is found.  If this returns false, the
-        /// enum is unpositioned.  For some codecs, <see cref="SeekExact"/> may
-        /// be substantially faster than <see cref="SeekCeil"/>.
+        /// <c>true</c> if the term is found.  If this returns <c>false</c>, the
+        /// enum is unpositioned.  For some codecs, <see cref="SeekExact(BytesRef)"/> may
+        /// be substantially faster than <see cref="SeekCeil(BytesRef)"/>.
         /// </summary>
         public virtual bool SeekExact(BytesRef text)
         {
@@ -157,8 +157,8 @@ namespace Lucene.Net.Index
         public abstract BytesRef Term { get; }
 
         /// <summary>
-        /// Returns ordinal position for current term.  this is an
-        /// optional method (the codec may throw <see cref="NotSupportedException"/>.
+        /// Returns ordinal position for current term.  This is an
+        /// optional property (the codec may throw <see cref="NotSupportedException"/>.
         /// Do not call this when the enum is unpositioned.
         /// </summary>
         public abstract long Ord { get; } // LUCENENET NOTE: Although this isn't a great candidate for a property, did so to make API consistent
@@ -172,22 +172,22 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns the total number of occurrences of this term
-        ///  across all documents (the sum of the freq() for each
-        ///  doc that has this term).  this will be -1 if the
-        ///  codec doesn't support this measure.  Note that, like
-        ///  other term measures, this measure does not take
-        ///  deleted documents into account.
+        /// across all documents (the sum of the Freq for each
+        /// doc that has this term).  This will be -1 if the
+        /// codec doesn't support this measure.  Note that, like
+        /// other term measures, this measure does not take
+        /// deleted documents into account.
         /// </summary>
         public abstract long TotalTermFreq { get; } // LUCENENET NOTE: Although this isn't a great candidate for a property, did so to make API consistent
 
         /// <summary>
         /// Get <see cref="DocsEnum"/> for the current term. Do not
         /// call this when the enum is unpositioned. This method
-        /// will not return null.
+        /// will not return <c>null</c>.
         /// </summary>
-        /// <param name="liveDocs"> unset bits are documents that should not
+        /// <param name="liveDocs"> Unset bits are documents that should not
         /// be returned </param>
-        /// <param name="reuse"> pass a prior <see cref="DocsEnum"/> for possible reuse  </param>
+        /// <param name="reuse"> Pass a prior <see cref="DocsEnum"/> for possible reuse </param>
         public DocsEnum Docs(IBits liveDocs, DocsEnum reuse)
         {
             return Docs(liveDocs, reuse, DocsFlags.FREQS);
@@ -197,26 +197,26 @@ namespace Lucene.Net.Index
         /// Get <see cref="DocsEnum"/> for the current term, with
         /// control over whether freqs are required. Do not
         /// call this when the enum is unpositioned. This method
-        /// will not return null.
+        /// will not return <c>null</c>.
         /// </summary>
-        /// <param name="liveDocs"> unset bits are documents that should not
+        /// <param name="liveDocs"> Unset bits are documents that should not
         /// be returned </param>
-        /// <param name="reuse"> pass a prior DocsEnum for possible reuse </param>
-        /// <param name="flags"> specifies which optional per-document values
+        /// <param name="reuse"> Pass a prior DocsEnum for possible reuse </param>
+        /// <param name="flags"> Specifies which optional per-document values
         ///        you require; <see cref="DocsFlags"/></param>
         /// <seealso cref="Docs(IBits, DocsEnum)"/>
         public abstract DocsEnum Docs(IBits liveDocs, DocsEnum reuse, DocsFlags flags);
 
         /// <summary>
-        /// Get <seealso cref="DocsAndPositionsEnum"/> for the current term.
-        ///  Do not call this when the enum is unpositioned.  this
-        ///  method will return null if positions were not
-        ///  indexed.
+        /// Get <see cref="DocsAndPositionsEnum"/> for the current term.
+        /// Do not call this when the enum is unpositioned.  This
+        /// method will return <c>null</c> if positions were not
+        /// indexed.
         /// </summary>
-        ///  <param name="liveDocs"> unset bits are documents that should not
-        ///  be returned </param>
-        ///  <param name="reuse"> pass a prior DocsAndPositionsEnum for possible reuse </param>
-        ///  <seealso cref= #docsAndPositions(Bits, DocsAndPositionsEnum, int)  </seealso>
+        /// <param name="liveDocs"> Unset bits are documents that should not
+        /// be returned </param>
+        /// <param name="reuse"> Pass a prior DocsAndPositionsEnum for possible reuse </param>
+        /// <seealso cref="DocsAndPositions(IBits, DocsAndPositionsEnum, DocsAndPositionsFlags)"/>
         public DocsAndPositionsEnum DocsAndPositions(IBits liveDocs, DocsAndPositionsEnum reuse)
         {
             return DocsAndPositions(liveDocs, reuse, DocsAndPositionsFlags.OFFSETS | DocsAndPositionsFlags.PAYLOADS);
@@ -228,20 +228,20 @@ namespace Lucene.Net.Index
         /// required.  Some codecs may be able to optimize their
         /// implementation when offsets and/or payloads are not required.
         /// Do not call this when the enum is unpositioned. This
-        /// will return null if positions were not indexed.
+        /// will return <c>null</c> if positions were not indexed.
         /// </summary>
-        /// <param name="liveDocs"> unset bits are documents that should not
+        /// <param name="liveDocs"> Unset bits are documents that should not
         /// be returned </param>
-        /// <param name="reuse"> pass a prior DocsAndPositionsEnum for possible reuse </param>
-        /// <param name="flags"> specifies which optional per-position values you
+        /// <param name="reuse"> Pass a prior DocsAndPositionsEnum for possible reuse </param>
+        /// <param name="flags"> Specifies which optional per-position values you
         ///         require; see <see cref="DocsAndPositionsFlags"/>. </param>
         public abstract DocsAndPositionsEnum DocsAndPositions(IBits liveDocs, DocsAndPositionsEnum reuse, DocsAndPositionsFlags flags);
 
         /// <summary>
-        /// Expert: Returns the TermsEnums internal state to position the <see cref="TermsEnum"/>
+        /// Expert: Returns the <see cref="TermsEnum"/>s internal state to position the <see cref="TermsEnum"/>
         /// without re-seeking the term dictionary.
         /// <para/>
-        /// NOTE: A seek by <see cref="GetTermState"/> might not capture the
+        /// NOTE: A seek by <see cref="GetTermState()"/> might not capture the
         /// <see cref="AttributeSource"/>'s state. Callers must maintain the
         /// <see cref="AttributeSource"/> states separately
         /// </summary>
@@ -272,7 +272,7 @@ namespace Lucene.Net.Index
         /// in <see cref="Lucene.Net.Search.MultiTermQuery"/>
         /// <para/><em>Please note:</em> this enum should be unmodifiable,
         /// but it is currently possible to add Attributes to it.
-        /// this should not be a problem, as the enum is always empty and
+        /// This should not be a problem, as the enum is always empty and
         /// the existence of unused Attributes does not matter.
         /// </summary>
         public static readonly TermsEnum EMPTY = new TermsEnumAnonymousInnerClassHelper();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermsHash.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermsHash.cs b/src/Lucene.Net/Index/TermsHash.cs
index 77c2d6e..d0a856c 100644
--- a/src/Lucene.Net/Index/TermsHash.cs
+++ b/src/Lucene.Net/Index/TermsHash.cs
@@ -26,13 +26,13 @@ namespace Lucene.Net.Index
     using Int32BlockPool = Lucene.Net.Util.Int32BlockPool;
 
     /// <summary>
-    /// this class implements <seealso cref="InvertedDocConsumer"/>, which
-    ///  is passed each token produced by the analyzer on each
-    ///  field.  It stores these tokens in a hash table, and
-    ///  allocates separate byte streams per token.  Consumers of
-    ///  this class, eg <seealso cref="FreqProxTermsWriter"/> and {@link
-    ///  TermVectorsConsumer}, write their own byte streams
-    ///  under each term.
+    /// This class implements <see cref="InvertedDocConsumer"/>, which
+    /// is passed each token produced by the analyzer on each
+    /// field.  It stores these tokens in a hash table, and
+    /// allocates separate byte streams per token.  Consumers of
+    /// this class, eg <see cref="FreqProxTermsWriter"/> and 
+    /// <see cref="TermVectorsConsumer"/>, write their own byte streams
+    /// under each term.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermsHashConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermsHashConsumerPerField.cs b/src/Lucene.Net/Index/TermsHashConsumerPerField.cs
index 3888c01..f5972df 100644
--- a/src/Lucene.Net/Index/TermsHashConsumerPerField.cs
+++ b/src/Lucene.Net/Index/TermsHashConsumerPerField.cs
@@ -21,10 +21,10 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Implement this class to plug into the TermsHash
-    ///  processor, which inverts & stores Tokens into a hash
-    ///  table and provides an API for writing bytes into
-    ///  multiple streams for each unique Token.
+    /// Implement this class to plug into the <see cref="TermsHash"/>
+    /// processor, which inverts &amp; stores <see cref="Analysis.Token"/>s into a hash
+    /// table and provides an API for writing bytes into
+    /// multiple streams for each unique <see cref="Analysis.Token"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermsHashPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermsHashPerField.cs b/src/Lucene.Net/Index/TermsHashPerField.cs
index 77aa688..1769e38 100644
--- a/src/Lucene.Net/Index/TermsHashPerField.cs
+++ b/src/Lucene.Net/Index/TermsHashPerField.cs
@@ -121,7 +121,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Collapse the hash table & sort in-place. </summary>
+        /// Collapse the hash table &amp; sort in-place. </summary>
         public int[] SortPostings(IComparer<BytesRef> termComp)
         {
             return bytesHash.Sort(termComp);
@@ -152,9 +152,11 @@ namespace Lucene.Net.Index
             return doCall || doNextCall;
         }
 
-        // Secondary entry point (for 2nd & subsequent TermsHash),
-        // because token text has already been "interned" into
-        // textStart, so we hash by textStart
+        /// <summary>
+        /// Secondary entry point (for 2nd &amp; subsequent <see cref="TermsHash"/>),
+        /// because token text has already been "interned" into
+        /// <paramref name="textStart"/>, so we hash by <paramref name="textStart"/>
+        /// </summary>
         public void Add(int textStart)
         {
             int termID = bytesHash.AddByPoolOffset(textStart);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ThreadAffinityDocumentsWriterThreadPool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ThreadAffinityDocumentsWriterThreadPool.cs b/src/Lucene.Net/Index/ThreadAffinityDocumentsWriterThreadPool.cs
index 9d0d5e3..34b49ba 100644
--- a/src/Lucene.Net/Index/ThreadAffinityDocumentsWriterThreadPool.cs
+++ b/src/Lucene.Net/Index/ThreadAffinityDocumentsWriterThreadPool.cs
@@ -26,14 +26,14 @@ namespace Lucene.Net.Index
     using ThreadState = Lucene.Net.Index.DocumentsWriterPerThreadPool.ThreadState; //javadoc
 
     /// <summary>
-    /// A <seealso cref="DocumentsWriterPerThreadPool"/> implementation that tries to assign an
-    /// indexing thread to the same <seealso cref="ThreadState"/> each time the thread tries to
-    /// obtain a <seealso cref="ThreadState"/>. Once a new <seealso cref="ThreadState"/> is created it is
+    /// A <see cref="DocumentsWriterPerThreadPool"/> implementation that tries to assign an
+    /// indexing thread to the same <see cref="ThreadState"/> each time the thread tries to
+    /// obtain a <see cref="ThreadState"/>. Once a new <see cref="ThreadState"/> is created it is
     /// associated with the creating thread. Subsequently, if the threads associated
-    /// <seealso cref="ThreadState"/> is not in use it will be associated with the requesting
-    /// thread. Otherwise, if the <seealso cref="ThreadState"/> is used by another thread
-    /// <seealso cref="ThreadAffinityDocumentsWriterThreadPool"/> tries to find the currently
-    /// minimal contended <seealso cref="ThreadState"/>.
+    /// <see cref="ThreadState"/> is not in use it will be associated with the requesting
+    /// thread. Otherwise, if the <see cref="ThreadState"/> is used by another thread
+    /// <see cref="ThreadAffinityDocumentsWriterThreadPool"/> tries to find the currently
+    /// minimal contended <seea cref="ThreadState"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -43,7 +43,7 @@ namespace Lucene.Net.Index
         private IDictionary<Thread, ThreadState> threadBindings = new ConcurrentDictionary<Thread, ThreadState>();
 
         /// <summary>
-        /// Creates a new <seealso cref="ThreadAffinityDocumentsWriterThreadPool"/> with a given maximum of <seealso cref="ThreadState"/>s.
+        /// Creates a new <see cref="ThreadAffinityDocumentsWriterThreadPool"/> with a given maximum of <see cref="ThreadState"/>s.
         /// </summary>
         public ThreadAffinityDocumentsWriterThreadPool(int maxNumPerThreads)
             : base(maxNumPerThreads)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TieredMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TieredMergePolicy.cs b/src/Lucene.Net/Index/TieredMergePolicy.cs
index f41f6c6..1fa5a72 100644
--- a/src/Lucene.Net/Index/TieredMergePolicy.cs
+++ b/src/Lucene.Net/Index/TieredMergePolicy.cs
@@ -57,7 +57,7 @@ namespace Lucene.Net.Index
     /// of the segments, always pro-rates by percent deletes,
     /// and does not apply any maximum segment size during
     /// forceMerge (unlike <see cref="LogByteSizeMergePolicy"/>).
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TrackingIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TrackingIndexWriter.cs b/src/Lucene.Net/Index/TrackingIndexWriter.cs
index 5789685..8d02cf7 100644
--- a/src/Lucene.Net/Index/TrackingIndexWriter.cs
+++ b/src/Lucene.Net/Index/TrackingIndexWriter.cs
@@ -27,15 +27,15 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Class that tracks changes to a delegated
-    ///  IndexWriter, used by {@link
-    ///  ControlledRealTimeReopenThread} to ensure specific
-    ///  changes are visible.   Create this class (passing your
-    ///  IndexWriter), and then pass this class to {@link
-    ///  ControlledRealTimeReopenThread}.
-    ///  Be sure to make all changes via the
-    ///  TrackingIndexWriter, otherwise {@link
-    ///  ControlledRealTimeReopenThread} won't know about the changes.
-    ///
+    /// <see cref="Index.IndexWriter"/>, used by 
+    /// <see cref="Search.ControlledRealTimeReopenThread{T}"/> to ensure specific
+    /// changes are visible.   Create this class (passing your
+    /// <see cref="Index.IndexWriter"/>), and then pass this class to
+    /// <see cref="Search.ControlledRealTimeReopenThread{T}"/>.
+    /// Be sure to make all changes via the
+    /// <see cref="TrackingIndexWriter"/>, otherwise
+    /// <see cref="Search.ControlledRealTimeReopenThread{T}"/> won't know about the changes.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -47,8 +47,8 @@ namespace Lucene.Net.Index
         private readonly AtomicInt64 indexingGen = new AtomicInt64(1);
 
         /// <summary>
-        /// Create a {@code TrackingIndexWriter} wrapping the
-        ///  provided <seealso cref="IndexWriter"/>.
+        /// Create a <see cref="TrackingIndexWriter"/> wrapping the
+        ///  provided <see cref="Index.IndexWriter"/>.
         /// </summary>
         public TrackingIndexWriter(IndexWriter writer)
         {
@@ -56,9 +56,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocument(Term,Iterable,Analyzer)}
-        ///  and returns the generation that reflects this change.
+        /// Calls 
+        /// <see cref="IndexWriter.UpdateDocument(Term, IEnumerable{IIndexableField}, Analyzer)"/>
+        /// and returns the generation that reflects this change.
         /// </summary>
         public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d, Analyzer a)
         {
@@ -68,9 +68,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocument(Term,Iterable)} and
-        ///  returns the generation that reflects this change.
+        /// Calls 
+        /// <see cref="IndexWriter.UpdateDocument(Term, IEnumerable{IIndexableField})"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d)
         {
@@ -80,9 +80,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocuments(Term,Iterable,Analyzer)}
-        ///  and returns the generation that reflects this change.
+        /// Calls 
+        /// <see cref="IndexWriter.UpdateDocuments(Term, IEnumerable{IEnumerable{IIndexableField}}, Analyzer)"/>
+        /// and returns the generation that reflects this change.
         /// </summary>
         public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer a)
         {
@@ -92,9 +92,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#updateDocuments(Term,Iterable)} and returns
-        ///  the generation that reflects this change.
+        /// Calls
+        /// <see cref="IndexWriter.UpdateDocuments(Term, IEnumerable{IEnumerable{IIndexableField}})"/> and returns
+        /// the generation that reflects this change.
         /// </summary>
         public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs)
         {
@@ -104,8 +104,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Term)"/> and
-        ///  returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.DeleteDocuments(Term)"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long DeleteDocuments(Term t)
         {
@@ -115,8 +115,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Term...)"/> and
-        ///  returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.DeleteDocuments(Term[])"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long DeleteDocuments(params Term[] terms)
         {
@@ -126,8 +126,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Query)"/> and
-        ///  returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.DeleteDocuments(Query)"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long DeleteDocuments(Query q)
         {
@@ -137,8 +137,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteDocuments(Query...)"/>
-        ///  and returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.DeleteDocuments(Query[])"/>
+        /// and returns the generation that reflects this change.
         /// </summary>
         public virtual long DeleteDocuments(params Query[] queries)
         {
@@ -148,8 +148,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#deleteAll"/> and returns the
-        ///  generation that reflects this change.
+        /// Calls <see cref="IndexWriter.DeleteAll()"/> and returns the
+        /// generation that reflects this change.
         /// </summary>
         public virtual long DeleteAll()
         {
@@ -159,9 +159,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#addDocument(Iterable,Analyzer)} and
-        ///  returns the generation that reflects this change.
+        /// Calls 
+        /// <see cref="IndexWriter.AddDocument(IEnumerable{IIndexableField}, Analyzer)"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long AddDocument(IEnumerable<IIndexableField> d, Analyzer a)
         {
@@ -171,9 +171,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls {@link
-        ///  IndexWriter#addDocuments(Iterable,Analyzer)} and
-        ///  returns the generation that reflects this change.
+        /// Calls 
+        /// <see cref="IndexWriter.AddDocuments(IEnumerable{IEnumerable{IIndexableField}}, Analyzer)"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer a)
         {
@@ -183,8 +183,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#addDocument(Iterable)"/>
-        ///  and returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.AddDocument(IEnumerable{IIndexableField})"/>
+        /// and returns the generation that reflects this change.
         /// </summary>
         public virtual long AddDocument(IEnumerable<IIndexableField> d)
         {
@@ -194,8 +194,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#addDocuments(Iterable)"/> and
-        ///  returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.AddDocuments(IEnumerable{IEnumerable{IIndexableField}})"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long AddDocuments(IEnumerable<IEnumerable<IIndexableField>> docs)
         {
@@ -205,8 +205,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#addIndexes(Directory...)"/> and
-        ///  returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.AddIndexes(Directory[])"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long AddIndexes(params Directory[] dirs)
         {
@@ -216,8 +216,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Calls <seealso cref="IndexWriter#addIndexes(IndexReader...)"/>
-        ///  and returns the generation that reflects this change.
+        /// Calls <see cref="IndexWriter.AddIndexes(IndexReader[])"/>
+        /// and returns the generation that reflects this change.
         /// </summary>
         public virtual long AddIndexes(params IndexReader[] readers)
         {
@@ -237,7 +237,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return the wrapped <seealso cref="IndexWriter"/>. </summary>
+        /// Return the wrapped <see cref="Index.IndexWriter"/>. </summary>
         public virtual IndexWriter IndexWriter
         {
             get
@@ -248,7 +248,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Return and increment current gen.
-        ///
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public virtual long GetAndIncrementGeneration()
@@ -257,9 +257,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Cals {@link
-        ///  IndexWriter#tryDeleteDocument(IndexReader,int)} and
-        ///  returns the generation that reflects this change.
+        /// Cals
+        /// <see cref="IndexWriter.TryDeleteDocument(IndexReader, int)"/> and
+        /// returns the generation that reflects this change.
         /// </summary>
         public virtual long TryDeleteDocument(IndexReader reader, int docID)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TwoPhaseCommit.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TwoPhaseCommit.cs b/src/Lucene.Net/Index/TwoPhaseCommit.cs
index 0fef4a6..af896b5 100644
--- a/src/Lucene.Net/Index/TwoPhaseCommit.cs
+++ b/src/Lucene.Net/Index/TwoPhaseCommit.cs
@@ -19,9 +19,9 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// An interface for implementations that support 2-phase commit. You can use
-    /// <seealso cref="TwoPhaseCommitTool"/> to execute a 2-phase commit algorithm over several
-    /// <seealso cref="ITwoPhaseCommit"/>s.
-    ///
+    /// <see cref="TwoPhaseCommitTool"/> to execute a 2-phase commit algorithm over several
+    /// <see cref="ITwoPhaseCommit"/>s.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public interface ITwoPhaseCommit
@@ -29,14 +29,14 @@ namespace Lucene.Net.Index
         /// <summary>
         /// The first stage of a 2-phase commit. Implementations should do as much work
         /// as possible in this method, but avoid actual committing changes. If the
-        /// 2-phase commit fails, <seealso cref="#rollback()"/> is called to discard all changes
+        /// 2-phase commit fails, <see cref="Rollback()"/> is called to discard all changes
         /// since last successful commit.
         /// </summary>
         void PrepareCommit();
 
         /// <summary>
         /// The second phase of a 2-phase commit. Implementations should ideally do
-        /// very little work in this method (following <seealso cref="#prepareCommit()"/>, and
+        /// very little work in this method (following <see cref="PrepareCommit()"/>, and
         /// after it returns, the caller can assume that the changes were successfully
         /// committed to the underlying storage.
         /// </summary>
@@ -44,8 +44,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Discards any changes that have occurred since the last commit. In a 2-phase
-        /// commit algorithm, where one of the objects failed to <seealso cref="#commit()"/> or
-        /// <seealso cref="#prepareCommit()"/>, this method is used to roll all other objects
+        /// commit algorithm, where one of the objects failed to <see cref="Commit()"/> or
+        /// <see cref="PrepareCommit()"/>, this method is used to roll all other objects
         /// back to their previous state.
         /// </summary>
         void Rollback();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TwoPhaseCommitTool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TwoPhaseCommitTool.cs b/src/Lucene.Net/Index/TwoPhaseCommitTool.cs
index 2b60ce6..06f0e76 100644
--- a/src/Lucene.Net/Index/TwoPhaseCommitTool.cs
+++ b/src/Lucene.Net/Index/TwoPhaseCommitTool.cs
@@ -24,9 +24,10 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// A utility for executing 2-phase commit on several objects.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= ITwoPhaseCommit
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="ITwoPhaseCommit"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -39,8 +40,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Thrown by <seealso cref="TwoPhaseCommitTool#execute(TwoPhaseCommit...)"/> when an
-        /// object fails to prepareCommit().
+        /// Thrown by <see cref="TwoPhaseCommitTool.Execute(ITwoPhaseCommit[])"/> when an
+        /// object fails to <see cref="ITwoPhaseCommit.PrepareCommit()"/>.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE
@@ -76,8 +77,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Thrown by <seealso cref="TwoPhaseCommitTool#execute(TwoPhaseCommit...)"/> when an
-        /// object fails to commit().
+        /// Thrown by <see cref="TwoPhaseCommitTool.Execute(ITwoPhaseCommit[])"/> when an
+        /// object fails to <see cref="ITwoPhaseCommit.Commit()"/>.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE
@@ -112,7 +113,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// rollback all objects, discarding any exceptions that occur. </summary>
+        /// Rollback all objects, discarding any exceptions that occur. </summary>
         private static void Rollback(params ITwoPhaseCommit[] objects)
         {
             foreach (ITwoPhaseCommit tpc in objects)
@@ -136,24 +137,24 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Executes a 2-phase commit algorithm by first
-        /// <seealso cref="ITwoPhaseCommit#prepareCommit()"/> all objects and only if all succeed,
-        /// it proceeds with <seealso cref="ITwoPhaseCommit#commit()"/>. If any of the objects
+        /// <see cref="ITwoPhaseCommit.PrepareCommit()"/> all objects and only if all succeed,
+        /// it proceeds with <see cref="ITwoPhaseCommit.Commit()"/>. If any of the objects
         /// fail on either the preparation or actual commit, it terminates and
-        /// <seealso cref="ITwoPhaseCommit#rollback()"/> all of them.
-        /// <p>
-        /// <b>NOTE:</b> it may happen that an object fails to commit, after few have
-        /// already successfully committed. this tool will still issue a rollback
+        /// <see cref="ITwoPhaseCommit.Rollback()"/> all of them.
+        /// <para/>
+        /// <b>NOTE:</b> It may happen that an object fails to commit, after few have
+        /// already successfully committed. This tool will still issue a rollback
         /// instruction on them as well, but depending on the implementation, it may
         /// not have any effect.
-        /// <p>
-        /// <b>NOTE:</b> if any of the objects are {@code null}, this method simply
+        /// <para/>
+        /// <b>NOTE:</b> if any of the objects are <c>null</c>, this method simply
         /// skips over them.
         /// </summary>
         /// <exception cref="PrepareCommitFailException">
         ///           if any of the objects fail to
-        ///           <seealso cref="ITwoPhaseCommit#prepareCommit()"/> </exception>
+        ///           <see cref="ITwoPhaseCommit.PrepareCommit()"/> </exception>
         /// <exception cref="CommitFailException">
-        ///           if any of the objects fail to <seealso cref="ITwoPhaseCommit#commit()"/> </exception>
+        ///           if any of the objects fail to <see cref="ITwoPhaseCommit.Commit()"/> </exception>
         public static void Execute(params ITwoPhaseCommit[] objects)
         {
             ITwoPhaseCommit tpc = null;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TwoStoredFieldsConsumers.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TwoStoredFieldsConsumers.cs b/src/Lucene.Net/Index/TwoStoredFieldsConsumers.cs
index 5f4e59c..7be6a56 100644
--- a/src/Lucene.Net/Index/TwoStoredFieldsConsumers.cs
+++ b/src/Lucene.Net/Index/TwoStoredFieldsConsumers.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Just switches between two <seealso cref="DocFieldConsumer"/>s. </summary>
+    /// Just switches between two <see cref="DocFieldConsumer"/>s. </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs b/src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs
index d536b5e..7ec2912 100644
--- a/src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs
+++ b/src/Lucene.Net/Index/UpgradeIndexMergePolicy.cs
@@ -23,39 +23,42 @@ namespace Lucene.Net.Index
     using Constants = Lucene.Net.Util.Constants;
 
     /// <summary>
-    /// this <seealso cref="MergePolicy"/> is used for upgrading all existing segments of
-    /// an index when calling <seealso cref="IndexWriter#forceMerge(int)"/>.
-    /// All other methods delegate to the base {@code MergePolicy} given to the constructor.
-    /// this allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that
-    /// are created by previous Lucene versions. forceMerge does no longer really merge;
-    /// it is just used to &quot;forceMerge&quot; older segment versions away.
-    /// <p>In general one would use <seealso cref="IndexUpgrader"/>, but for a fully customizeable upgrade,
-    /// you can use this like any other {@code MergePolicy} and call <seealso cref="IndexWriter#forceMerge(int)"/>:
-    /// <pre class="prettyprint lang-java">
-    ///  IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer());
-    ///  iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
-    ///  IndexWriter w = new IndexWriter(dir, iwc);
-    ///  w.forceMerge(1);
-    ///  w.Dispose();
-    /// </pre>
-    /// <p><b>Warning:</b> this merge policy may reorder documents if the index was partially
-    /// upgraded before calling forceMerge (e.g., documents were added). If your application relies
+    /// This <see cref="MergePolicy"/> is used for upgrading all existing segments of
+    /// an index when calling <see cref="IndexWriter.ForceMerge(int)"/>.
+    /// All other methods delegate to the base <see cref="MergePolicy"/> given to the constructor.
+    /// This allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that
+    /// are created by previous Lucene versions. ForceMerge does no longer really merge;
+    /// it is just used to &quot;ForceMerge&quot; older segment versions away.
+    /// <para/>In general one would use <see cref="IndexUpgrader"/>, but for a fully customizeable upgrade,
+    /// you can use this like any other <see cref="MergePolicy"/> and call <see cref="IndexWriter.ForceMerge(int)"/>:
+    /// <code>
+    ///     IndexWriterConfig iwc = new IndexWriterConfig(LuceneVersion.LUCENE_XX, new KeywordAnalyzer());
+    ///     iwc.MergePolicy = new UpgradeIndexMergePolicy(iwc.MergePolicy);
+    ///     using (IndexWriter w = new IndexWriter(dir, iwc))
+    ///     {
+    ///         w.ForceMerge(1);
+    ///     }
+    /// </code>
+    /// <para/><b>Warning:</b> this merge policy may reorder documents if the index was partially
+    /// upgraded before calling <see cref="IndexWriter.ForceMerge(int)"/> (e.g., documents were added). If your application relies
     /// on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents
-    /// were added to the index is preserved), do a forceMerge(1) instead. Please note, the
-    /// delegate {@code MergePolicy} may also reorder documents.
-    /// @lucene.experimental </summary>
-    /// <seealso cref= IndexUpgrader </seealso>
+    /// were added to the index is preserved), do a <c>ForceMerge(1)</c> instead. Please note, the
+    /// delegate <see cref="MergePolicy"/> may also reorder documents.
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="IndexUpgrader"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
     public class UpgradeIndexMergePolicy : MergePolicy
     {
         /// <summary>
-        /// Wrapped <seealso cref="MergePolicy"/>. </summary>
+        /// Wrapped <see cref="MergePolicy"/>. </summary>
         protected readonly MergePolicy m_base;
 
         /// <summary>
-        /// Wrap the given <seealso cref="MergePolicy"/> and intercept forceMerge requests to
+        /// Wrap the given <see cref="MergePolicy"/> and intercept <see cref="IndexWriter.ForceMerge(int)"/> requests to
         /// only upgrade segments written with previous Lucene versions.
         /// </summary>
         public UpgradeIndexMergePolicy(MergePolicy @base)
@@ -64,8 +67,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns if the given segment should be upgraded. The default implementation
-        /// will return {@code !Constants.LUCENE_MAIN_VERSION.equals(si.getVersion())},
+        /// Returns <c>true</c> if the given segment should be upgraded. The default implementation
+        /// will return <c>!Constants.LUCENE_MAIN_VERSION.Equals(si.Version)</c>,
         /// so all segments created with a different version number than this Lucene version will
         /// get upgraded.
         /// </summary>


[6/9] lucenenet git commit: SWEEP: Lucene.Net.Index: Fixed up documentation comments for types starting with M-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ParallelAtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ParallelAtomicReader.cs b/src/Lucene.Net/Index/ParallelAtomicReader.cs
index 95ee25e..8de62a6 100644
--- a/src/Lucene.Net/Index/ParallelAtomicReader.cs
+++ b/src/Lucene.Net/Index/ParallelAtomicReader.cs
@@ -26,19 +26,19 @@ namespace Lucene.Net.Index
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    /// An <seealso cref="AtomicReader"/> which reads multiple, parallel indexes.  Each index
+    /// An <see cref="AtomicReader"/> which reads multiple, parallel indexes.  Each index
     /// added must have the same number of documents, but typically each contains
     /// different fields. Deletions are taken from the first reader.
     /// Each document contains the union of the fields of all documents
     /// with the same document number.  When searching, matches for a
     /// query term are from the first index added that has the field.
     ///
-    /// <p>this is useful, e.g., with collections that have large fields which
+    /// <para/>This is useful, e.g., with collections that have large fields which
     /// change rarely and small fields that change more frequently.  The smaller
     /// fields may be re-indexed in a new index and both indexes may be searched
     /// together.
     ///
-    /// <p><strong>Warning:</strong> It is up to you to make sure all indexes
+    /// <para/><strong>Warning:</strong> It is up to you to make sure all indexes
     /// are created and modified the same way. For example, if you add
     /// documents to one index, you need to add the same documents in the
     /// same order to the other indexes. <em>Failure to do so will result in
@@ -67,8 +67,8 @@ namespace Lucene.Net.Index
         private readonly IDictionary<string, AtomicReader> tvFieldToReader = new SortedDictionary<string, AtomicReader>(StringComparer.Ordinal);
 
         /// <summary>
-        /// Create a ParallelAtomicReader based on the provided
-        ///  readers; auto-closes the given readers on <seealso cref="#close()"/>.
+        /// Create a <see cref="ParallelAtomicReader"/> based on the provided
+        /// readers; auto-disposes the given <paramref name="readers"/> on <see cref="IndexReader.Dispose()"/>.
         /// </summary>
         public ParallelAtomicReader(params AtomicReader[] readers)
             : this(true, readers)
@@ -76,8 +76,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create a ParallelAtomicReader based on the provided
-        ///  readers.
+        /// Create a <see cref="ParallelAtomicReader"/> based on the provided
+        /// <paramref name="readers"/>.
         /// </summary>
         public ParallelAtomicReader(bool closeSubReaders, params AtomicReader[] readers)
             : this(closeSubReaders, readers, readers)
@@ -85,9 +85,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: create a ParallelAtomicReader based on the provided
-        ///  readers and storedFieldReaders; when a document is
-        ///  loaded, only storedFieldsReaders will be used.
+        /// Expert: create a <see cref="ParallelAtomicReader"/> based on the provided
+        /// <paramref name="readers"/> and <paramref name="storedFieldsReaders"/>; when a document is
+        /// loaded, only <paramref name="storedFieldsReaders"/> will be used.
         /// </summary>
         public ParallelAtomicReader(bool closeSubReaders, AtomicReader[] readers, AtomicReader[] storedFieldsReaders)
         {
@@ -229,11 +229,12 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// {@inheritDoc}
-        /// <p>
+        /// Get the <see cref="Index.FieldInfos"/> describing all fields in
+        /// this reader.
+        /// <para/>
         /// NOTE: the returned field numbers will likely not
         /// correspond to the actual field numbers in the underlying
-        /// readers, and codec metadata (<seealso cref="FieldInfo#getAttribute(String)"/>
+        /// readers, and codec metadata (<see cref="FieldInfo.GetAttribute(string)"/>
         /// will be unavailable.
         /// </summary>
         public override FieldInfos FieldInfos

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ParallelCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ParallelCompositeReader.cs b/src/Lucene.Net/Index/ParallelCompositeReader.cs
index c35642a..7d552e1 100644
--- a/src/Lucene.Net/Index/ParallelCompositeReader.cs
+++ b/src/Lucene.Net/Index/ParallelCompositeReader.cs
@@ -24,27 +24,27 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// An <seealso cref="CompositeReader"/> which reads multiple, parallel indexes.  Each index added
+    /// A <see cref="CompositeReader"/> which reads multiple, parallel indexes.  Each index added
     /// must have the same number of documents, and exactly the same hierarchical subreader structure,
     /// but typically each contains different fields. Deletions are taken from the first reader.
     /// Each document contains the union of the fields of all
     /// documents with the same document number.  When searching, matches for a
     /// query term are from the first index added that has the field.
     ///
-    /// <p>this is useful, e.g., with collections that have large fields which
+    /// <para/>This is useful, e.g., with collections that have large fields which
     /// change rarely and small fields that change more frequently.  The smaller
     /// fields may be re-indexed in a new index and both indexes may be searched
     /// together.
     ///
-    /// <p><strong>Warning:</strong> It is up to you to make sure all indexes
+    /// <para/><strong>Warning:</strong> It is up to you to make sure all indexes
     /// are created and modified the same way. For example, if you add
     /// documents to one index, you need to add the same documents in the
     /// same order to the other indexes. <em>Failure to do so will result in
     /// undefined behavior</em>.
-    /// A good strategy to create suitable indexes with <seealso cref="IndexWriter"/> is to use
-    /// <seealso cref="LogDocMergePolicy"/>, as this one does not reorder documents
-    /// during merging (like {@code TieredMergePolicy}) and triggers merges
-    /// by number of documents per segment. If you use different <seealso cref="MergePolicy"/>s
+    /// A good strategy to create suitable indexes with <see cref="IndexWriter"/> is to use
+    /// <see cref="LogDocMergePolicy"/>, as this one does not reorder documents
+    /// during merging (like <see cref="TieredMergePolicy"/>) and triggers merges
+    /// by number of documents per segment. If you use different <see cref="MergePolicy"/>s
     /// it might happen that the segment structure of your index is no longer predictable.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -56,8 +56,8 @@ namespace Lucene.Net.Index
         private readonly ISet<IndexReader> completeReaderSet = new IdentityHashSet<IndexReader>();
 
         /// <summary>
-        /// Create a ParallelCompositeReader based on the provided
-        ///  readers; auto-closes the given readers on <seealso cref="#close()"/>.
+        /// Create a <see cref="ParallelCompositeReader"/> based on the provided
+        /// readers; auto-disposes the given <paramref name="readers"/> on <see cref="IndexReader.Dispose()"/>.
         /// </summary>
         public ParallelCompositeReader(params CompositeReader[] readers)
             : this(true, readers)
@@ -65,8 +65,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create a ParallelCompositeReader based on the provided
-        ///  readers.
+        /// Create a <see cref="ParallelCompositeReader"/> based on the provided
+        /// <paramref name="readers"/>.
         /// </summary>
         public ParallelCompositeReader(bool closeSubReaders, params CompositeReader[] readers)
             : this(closeSubReaders, readers, readers)
@@ -74,9 +74,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: create a ParallelCompositeReader based on the provided
-        ///  readers and storedFieldReaders; when a document is
-        ///  loaded, only storedFieldsReaders will be used.
+        /// Expert: create a <see cref="ParallelCompositeReader"/> based on the provided
+        /// <paramref name="readers"/> and <paramref name="storedFieldReaders"/>; when a document is
+        /// loaded, only <paramref name="storedFieldReaders"/> will be used.
         /// </summary>
         public ParallelCompositeReader(bool closeSubReaders, CompositeReader[] readers, CompositeReader[] storedFieldReaders)
             : base(PrepareSubReaders(readers, storedFieldReaders))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/PersistentSnapshotDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/PersistentSnapshotDeletionPolicy.cs b/src/Lucene.Net/Index/PersistentSnapshotDeletionPolicy.cs
index b90405a..5d14564 100644
--- a/src/Lucene.Net/Index/PersistentSnapshotDeletionPolicy.cs
+++ b/src/Lucene.Net/Index/PersistentSnapshotDeletionPolicy.cs
@@ -30,21 +30,21 @@ namespace Lucene.Net.Index
     using IOUtils = Lucene.Net.Util.IOUtils;
 
     /// <summary>
-    /// A <seealso cref="SnapshotDeletionPolicy"/> which adds a persistence layer so that
+    /// A <see cref="SnapshotDeletionPolicy"/> which adds a persistence layer so that
     /// snapshots can be maintained across the life of an application. The snapshots
-    /// are persisted in a <seealso cref="Directory"/> and are committed as soon as
-    /// <seealso cref="#snapshot()"/> or <seealso cref="#release(IndexCommit)"/> is called.
-    /// <p>
-    /// <b>NOTE:</b> Sharing <seealso cref="PersistentSnapshotDeletionPolicy"/>s that write to
-    /// the same directory across <seealso cref="IndexWriter"/>s will corrupt snapshots. You
-    /// should make sure every <seealso cref="IndexWriter"/> has its own
-    /// <seealso cref="PersistentSnapshotDeletionPolicy"/> and that they all write to a
-    /// different <seealso cref="Directory"/>.  It is OK to use the same
-    /// Directory that holds the index.
-    ///
-    /// <p> this class adds a <seealso cref="#release(long)"/> method to
-    /// release commits from a previous snapshot's <seealso cref="IndexCommit#getGeneration"/>.
+    /// are persisted in a <see cref="Directory"/> and are committed as soon as
+    /// <see cref="Snapshot()"/> or <see cref="Release(IndexCommit)"/> is called.
+    /// <para/>
+    /// <b>NOTE:</b> Sharing <see cref="PersistentSnapshotDeletionPolicy"/>s that write to
+    /// the same directory across <see cref="IndexWriter"/>s will corrupt snapshots. You
+    /// should make sure every <see cref="IndexWriter"/> has its own
+    /// <see cref="PersistentSnapshotDeletionPolicy"/> and that they all write to a
+    /// different <see cref="Directory"/>.  It is OK to use the same
+    /// <see cref="Directory"/> that holds the index.
     ///
+    /// <para/> This class adds a <see cref="Release(long)"/> method to
+    /// release commits from a previous snapshot's <see cref="IndexCommit.Generation"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -66,17 +66,17 @@ namespace Lucene.Net.Index
         private readonly Directory dir;
 
         /// <summary>
-        /// <seealso cref="PersistentSnapshotDeletionPolicy"/> wraps another
-        /// <seealso cref="IndexDeletionPolicy"/> to enable flexible
-        /// snapshotting, passing <seealso cref="OpenMode#CREATE_OR_APPEND"/>
+        /// <see cref="PersistentSnapshotDeletionPolicy"/> wraps another
+        /// <see cref="IndexDeletionPolicy"/> to enable flexible
+        /// snapshotting, passing <see cref="OpenMode.CREATE_OR_APPEND"/>
         /// by default.
         /// </summary>
         /// <param name="primary">
-        ///          the <seealso cref="IndexDeletionPolicy"/> that is used on non-snapshotted
+        ///          the <see cref="IndexDeletionPolicy"/> that is used on non-snapshotted
         ///          commits. Snapshotted commits, by definition, are not deleted until
-        ///          explicitly released via <seealso cref="#release"/>. </param>
+        ///          explicitly released via <see cref="Release(IndexCommit)"/>. </param>
         /// <param name="dir">
-        ///          the <seealso cref="Directory"/> which will be used to persist the snapshots
+        ///          the <see cref="Directory"/> which will be used to persist the snapshots
         ///          information. </param>
         public PersistentSnapshotDeletionPolicy(IndexDeletionPolicy primary, Directory dir)
             : this(primary, dir, OpenMode.CREATE_OR_APPEND)
@@ -84,15 +84,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// <seealso cref="PersistentSnapshotDeletionPolicy"/> wraps another
-        /// <seealso cref="IndexDeletionPolicy"/> to enable flexible snapshotting.
+        /// <see cref="PersistentSnapshotDeletionPolicy"/> wraps another
+        /// <see cref="IndexDeletionPolicy"/> to enable flexible snapshotting.
         /// </summary>
         /// <param name="primary">
-        ///          the <seealso cref="IndexDeletionPolicy"/> that is used on non-snapshotted
+        ///          the <see cref="IndexDeletionPolicy"/> that is used on non-snapshotted
         ///          commits. Snapshotted commits, by definition, are not deleted until
-        ///          explicitly released via <seealso cref="#release"/>. </param>
+        ///          explicitly released via <see cref="Release(IndexCommit)"/>. </param>
         /// <param name="dir">
-        ///          the <seealso cref="Directory"/> which will be used to persist the snapshots
+        ///          the <see cref="Directory"/> which will be used to persist the snapshots
         ///          information. </param>
         /// <param name="mode">
         ///          specifies whether a new index should be created, deleting all
@@ -120,7 +120,7 @@ namespace Lucene.Net.Index
         /// Snapshots the last commit. Once this method returns, the
         /// snapshot information is persisted in the directory.
         /// </summary>
-        /// <seealso cref= SnapshotDeletionPolicy#snapshot </seealso>
+        /// <seealso cref="SnapshotDeletionPolicy.Snapshot()"/>
         public override IndexCommit Snapshot()
         {
             lock (this)
@@ -156,7 +156,7 @@ namespace Lucene.Net.Index
         /// Deletes a snapshotted commit. Once this method returns, the snapshot
         /// information is persisted in the directory.
         /// </summary>
-        /// <seealso cref= SnapshotDeletionPolicy#release </seealso>
+        /// <seealso cref="SnapshotDeletionPolicy.Release(IndexCommit)"/>
         public override void Release(IndexCommit commit)
         {
             lock (this)
@@ -191,8 +191,8 @@ namespace Lucene.Net.Index
         /// Deletes a snapshotted commit by generation. Once this method returns, the snapshot
         /// information is persisted in the directory.
         /// </summary>
-        /// <seealso cref= IndexCommit#getGeneration </seealso>
-        /// <seealso cref= SnapshotDeletionPolicy#release </seealso>
+        /// <seealso cref="IndexCommit.Generation"/>
+        /// <seealso cref="SnapshotDeletionPolicy.Release(IndexCommit)"/>
         public virtual void Release(long gen)
         {
             lock (this)
@@ -279,7 +279,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns the file name the snapshots are currently
-        ///  saved to, or null if no snapshots have been saved.
+        /// saved to, or <c>null</c> if no snapshots have been saved.
         /// </summary>
         public virtual string LastSaveFile
         {
@@ -297,7 +297,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Reads the snapshots information from the given <seealso cref="Directory"/>. this
+        /// Reads the snapshots information from the given <see cref="Directory"/>. This
         /// method can be used if the snapshots information is needed, however you
         /// cannot instantiate the deletion policy (because e.g., some other process
         /// keeps a lock on the snapshots directory).

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/PrefixCodedTerms.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/PrefixCodedTerms.cs b/src/Lucene.Net/Index/PrefixCodedTerms.cs
index 7403384..b45d573 100644
--- a/src/Lucene.Net/Index/PrefixCodedTerms.cs
+++ b/src/Lucene.Net/Index/PrefixCodedTerms.cs
@@ -32,6 +32,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Prefix codes term instances (prefixes are shared)
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -136,7 +137,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Builds a PrefixCodedTerms: call add repeatedly, then finish. </summary>
+        /// Builds a <see cref="PrefixCodedTerms"/>: call add repeatedly, then finish. </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -186,8 +187,8 @@ namespace Lucene.Net.Index
                 }
             }
 
-            /// <summary>
-            /// return finalized form </summary>
+            /// <returns>
+            /// finalized form </returns>
             public virtual PrefixCodedTerms Finish()
             {
                 try

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/RandomAccessOrds.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/RandomAccessOrds.cs b/src/Lucene.Net/Index/RandomAccessOrds.cs
index 5d777d4..f14a52d 100644
--- a/src/Lucene.Net/Index/RandomAccessOrds.cs
+++ b/src/Lucene.Net/Index/RandomAccessOrds.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Extension of <seealso cref="SortedSetDocValues"/> that supports random access
+    /// Extension of <see cref="SortedSetDocValues"/> that supports random access
     /// to the ordinals of a document.
-    /// <p>
-    /// Operations via this API are independent of the iterator api (<seealso cref="#nextOrd()"/>)
+    /// <para/>
+    /// Operations via this API are independent of the iterator api (<see cref="SortedSetDocValues.NextOrd()"/>)
     /// and do not impact its state.
-    /// <p>
+    /// <para/>
     /// Codecs can optionally extend this API if they support constant-time access
     /// to ordinals for the document.
     /// </summary>
@@ -44,10 +44,10 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Retrieve the ordinal for the current document (previously
-        /// set by <seealso cref="#setDocument(int)"/> at the specified index.
-        /// <p>
-        /// An index ranges from {@code 0} to {@code cardinality()-1}.
-        /// The first ordinal value is at index {@code 0}, the next at index {@code 1},
+        /// set by <see cref="SortedSetDocValues.SetDocument(int)"/> at the specified index.
+        /// <para/>
+        /// An index ranges from <c>0</c> to <c>Cardinality()-1</c>.
+        /// The first ordinal value is at index <c>0</c>, the next at index <c>1</c>,
         /// and so on, as for array indexing. </summary>
         /// <param name="index"> index of the ordinal for the document. </param>
         /// <returns> ordinal for the document at the specified index. </returns>
@@ -55,7 +55,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns the cardinality for the current document (previously
-        /// set by <seealso cref="#setDocument(int)"/>.
+        /// set by <see cref="SortedSetDocValues.SetDocument(int)"/>.
         /// </summary>
         public abstract int Cardinality();
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ReaderManager.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ReaderManager.cs b/src/Lucene.Net/Index/ReaderManager.cs
index 30ba2d5..186344e 100644
--- a/src/Lucene.Net/Index/ReaderManager.cs
+++ b/src/Lucene.Net/Index/ReaderManager.cs
@@ -23,42 +23,42 @@ namespace Lucene.Net.Index
     using IndexSearcher = Lucene.Net.Search.IndexSearcher;
 
     /// <summary>
-    /// Utility class to safely share <seealso cref="DirectoryReader"/> instances across
-    /// multiple threads, while periodically reopening. this class ensures each
-    /// reader is closed only once all threads have finished using it.
+    /// Utility class to safely share <see cref="DirectoryReader"/> instances across
+    /// multiple threads, while periodically reopening. This class ensures each
+    /// reader is disposed only once all threads have finished using it.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= SearcherManager
-    ///
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="SearcherManager"/>
     public sealed class ReaderManager : ReferenceManager<DirectoryReader>
     {
         /// <summary>
-        /// Creates and returns a new ReaderManager from the given
-        /// <seealso cref="IndexWriter"/>.
+        /// Creates and returns a new <see cref="ReaderManager"/> from the given
+        /// <see cref="IndexWriter"/>.
         /// </summary>
         /// <param name="writer">
-        ///          the IndexWriter to open the IndexReader from. </param>
+        ///          the <see cref="IndexWriter"/> to open the <see cref="IndexReader"/> from. </param>
         /// <param name="applyAllDeletes">
-        ///          If <code>true</code>, all buffered deletes will be applied (made
-        ///          visible) in the <seealso cref="IndexSearcher"/> / <seealso cref="DirectoryReader"/>.
-        ///          If <code>false</code>, the deletes may or may not be applied, but
-        ///          remain buffered (in IndexWriter) so that they will be applied in
+        ///          If <c>true</c>, all buffered deletes will be applied (made
+        ///          visible) in the <see cref="IndexSearcher"/> / <see cref="DirectoryReader"/>.
+        ///          If <c>false</c>, the deletes may or may not be applied, but
+        ///          remain buffered (in <see cref="IndexWriter"/>) so that they will be applied in
         ///          the future. Applying deletes can be costly, so if your app can
         ///          tolerate deleted documents being returned you might gain some
-        ///          performance by passing <code>false</code>. See
-        ///          <seealso cref="DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)"/>.
+        ///          performance by passing <c>false</c>. See
+        ///          <see cref="DirectoryReader.OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>.
         /// </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public ReaderManager(IndexWriter writer, bool applyAllDeletes)
         {
             Current = DirectoryReader.Open(writer, applyAllDeletes);
         }
 
         /// <summary>
-        /// Creates and returns a new ReaderManager from the given <seealso cref="Directory"/>. </summary>
-        /// <param name="dir"> the directory to open the DirectoryReader on.
+        /// Creates and returns a new <see cref="ReaderManager"/> from the given <see cref="Directory"/>. </summary>
+        /// <param name="dir"> the directory to open the <see cref="DirectoryReader"/> on.
         /// </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public ReaderManager(Directory dir)
         {
             Current = DirectoryReader.Open(dir);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ReaderSlice.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ReaderSlice.cs b/src/Lucene.Net/Index/ReaderSlice.cs
index e12cb92..494fbaf 100644
--- a/src/Lucene.Net/Index/ReaderSlice.cs
+++ b/src/Lucene.Net/Index/ReaderSlice.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Subreader slice from a parent composite reader.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -30,7 +30,7 @@ namespace Lucene.Net.Index
     public sealed class ReaderSlice
     {
         /// <summary>
-        /// Zero-length {@code ReaderSlice} array. </summary>
+        /// Zero-length <see cref="ReaderSlice"/> array. </summary>
         public static readonly ReaderSlice[] EMPTY_ARRAY = new ReaderSlice[0];
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ReaderUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ReaderUtil.cs b/src/Lucene.Net/Index/ReaderUtil.cs
index 065f60d..1ac8360 100644
--- a/src/Lucene.Net/Index/ReaderUtil.cs
+++ b/src/Lucene.Net/Index/ReaderUtil.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Common util methods for dealing with <seealso cref="IndexReader"/>s and <seealso cref="IndexReaderContext"/>s.
-    ///
+    /// Common util methods for dealing with <see cref="IndexReader"/>s and <see cref="IndexReaderContext"/>s.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -48,7 +48,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns index of the searcher/reader for document <code>n</code> in the
+        /// Returns index of the searcher/reader for document <c>n</c> in the
         /// array used to construct this searcher/reader.
         /// </summary>
         public static int SubIndex(int n, int[] docStarts) // find
@@ -82,7 +82,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns index of the searcher/reader for document <code>n</code> in the
+        /// Returns index of the searcher/reader for document <c>n</c> in the
         /// array used to construct this searcher/reader.
         /// </summary>
         public static int SubIndex(int n, IList<AtomicReaderContext> leaves) // find

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/ReadersAndUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/ReadersAndUpdates.cs b/src/Lucene.Net/Index/ReadersAndUpdates.cs
index b3d520c..c17a4c0 100644
--- a/src/Lucene.Net/Index/ReadersAndUpdates.cs
+++ b/src/Lucene.Net/Index/ReadersAndUpdates.cs
@@ -39,9 +39,11 @@ namespace Lucene.Net.Index
     using NumericDocValuesField = NumericDocValuesField;
     using TrackingDirectoryWrapper = Lucene.Net.Store.TrackingDirectoryWrapper;
 
-    // Used by IndexWriter to hold open SegmentReaders (for
-    // searching or merging), plus pending deletes and updates,
-    // for a given segment
+    /// <summary>
+    /// Used by <see cref="IndexWriter"/> to hold open <see cref="SegmentReader"/>s (for
+    /// searching or merging), plus pending deletes and updates,
+    /// for a given segment
+    /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -159,7 +161,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a <seealso cref="SegmentReader"/>. </summary>
+        /// Returns a <see cref="SegmentReader"/>. </summary>
         public virtual SegmentReader GetReader(IOContext context)
         {
             if (reader == null)
@@ -287,8 +289,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a ref to a clone. NOTE: you should decRef() the reader when you're
-        /// dont (ie do not call close()).
+        /// Returns a ref to a clone. NOTE: you should <see cref="DecRef()"/> the reader when you're
+        /// done (ie do not call <see cref="IndexReader.Dispose()"/>).
         /// </summary>
         public virtual SegmentReader GetReadOnlyClone(IOContext context)
         {
@@ -735,178 +737,6 @@ namespace Lucene.Net.Index
             }
         }
 
-        /*
-	  private class IterableAnonymousInnerClassHelper : IEnumerable<Number>
-	  {
-		  private readonly ReadersAndUpdates OuterInstance;
-
-		  private Lucene.Net.Index.SegmentReader Reader;
-		  private string Field;
-		  private Lucene.Net.Index.NumericDocValuesFieldUpdates FieldUpdates;
-
-		  public IterableAnonymousInnerClassHelper(ReadersAndUpdates outerInstance, Lucene.Net.Index.SegmentReader reader, string field, Lucene.Net.Index.NumericDocValuesFieldUpdates fieldUpdates)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.Reader = reader;
-			  this.Field = field;
-			  this.FieldUpdates = fieldUpdates;
-			  currentValues = reader.GetNumericDocValues(field);
-			  docsWithField = reader.GetDocsWithField(field);
-			  maxDoc = reader.MaxDoc;
-			  updatesIter = fieldUpdates.Iterator();
-		  }
-
-		  internal readonly NumericDocValues currentValues;
-		  internal readonly Bits docsWithField;
-		  internal readonly int maxDoc;
-		  internal readonly NumericDocValuesFieldUpdates.Iterator updatesIter;
-		  public virtual IEnumerator<Number> GetEnumerator()
-		  {
-			updatesIter.Reset();
-			return new IteratorAnonymousInnerClassHelper(this);
-		  }
-
-		  private class IteratorAnonymousInnerClassHelper : IEnumerator<Number>
-		  {
-			  private readonly IterableAnonymousInnerClassHelper OuterInstance;
-
-			  public IteratorAnonymousInnerClassHelper(IterableAnonymousInnerClassHelper outerInstance)
-			  {
-                  this.OuterInstance = outerInstance;
-				  curDoc = -1;
-				  updateDoc = updatesIter.NextDoc();
-			  }
-
-			  internal int curDoc;
-			  internal int updateDoc;
-
-			  public virtual bool HasNext()
-			  {
-				return curDoc < maxDoc - 1;
-			  }
-
-			  public virtual Number Next()
-			  {
-				if (++curDoc >= maxDoc)
-				{
-				  throw new NoSuchElementException("no more documents to return values for");
-				}
-				if (curDoc == updateDoc) // this document has an updated value
-				{
-				  long? value = updatesIter.value(); // either null (unset value) or updated value
-				  updateDoc = updatesIter.nextDoc(); // prepare for next round
-				  return value;
-				}
-				else
-				{
-				  // no update for this document
-				  Debug.Assert(curDoc < updateDoc);
-				  if (currentValues != null && docsWithField.Get(curDoc))
-				  {
-					// only read the current value if the document had a value before
-					return currentValues.Get(curDoc);
-				  }
-				  else
-				  {
-					return null;
-				  }
-				}
-			  }
-
-			  public virtual void Remove()
-			  {
-				throw new System.NotSupportedException("this iterator does not support removing elements");
-			  }
-		  }
-	  }*/
-        /*
-	  private class IterableAnonymousInnerClassHelper2 : IEnumerable<BytesRef>
-	  {
-		  private readonly ReadersAndUpdates OuterInstance;
-
-		  private Lucene.Net.Index.SegmentReader Reader;
-		  private string Field;
-		  private Lucene.Net.Index.BinaryDocValuesFieldUpdates DvFieldUpdates;
-
-		  public IterableAnonymousInnerClassHelper2(ReadersAndUpdates outerInstance, Lucene.Net.Index.SegmentReader reader, string field, Lucene.Net.Index.BinaryDocValuesFieldUpdates dvFieldUpdates)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.Reader = reader;
-			  this.Field = field;
-			  this.DvFieldUpdates = dvFieldUpdates;
-			  currentValues = reader.GetBinaryDocValues(field);
-			  docsWithField = reader.GetDocsWithField(field);
-			  maxDoc = reader.MaxDoc;
-			  updatesIter = dvFieldUpdates.Iterator();
-		  }
-
-		  internal readonly BinaryDocValues currentValues;
-		  internal readonly Bits docsWithField;
-		  internal readonly int maxDoc;
-		  internal readonly BinaryDocValuesFieldUpdates.Iterator updatesIter;
-		  public virtual IEnumerator<BytesRef> GetEnumerator()
-		  {
-			updatesIter.Reset();
-			return new IteratorAnonymousInnerClassHelper2(this);
-		  }
-
-		  private class IteratorAnonymousInnerClassHelper2 : IEnumerator<BytesRef>
-		  {
-			  private readonly IterableAnonymousInnerClassHelper2 OuterInstance;
-
-			  public IteratorAnonymousInnerClassHelper2(IterableAnonymousInnerClassHelper2 outerInstance)
-			  {
-                  this.OuterInstance = outerInstance;
-				  curDoc = -1;
-				  updateDoc = updatesIter.nextDoc();
-				  scratch = new BytesRef();
-			  }
-
-			  internal int curDoc;
-			  internal int updateDoc;
-			  internal BytesRef scratch;
-
-			  public virtual bool HasNext()
-			  {
-				return curDoc < maxDoc - 1;
-			  }
-
-			  public virtual BytesRef Next()
-			  {
-				if (++curDoc >= maxDoc)
-				{
-				  throw new NoSuchElementException("no more documents to return values for");
-				}
-				if (curDoc == updateDoc) // this document has an updated value
-				{
-				  BytesRef value = updatesIter.value(); // either null (unset value) or updated value
-				  updateDoc = updatesIter.nextDoc(); // prepare for next round
-				  return value;
-				}
-				else
-				{
-				  // no update for this document
-				  Debug.Assert(curDoc < updateDoc);
-				  if (currentValues != null && docsWithField.get(curDoc))
-				  {
-					// only read the current value if the document had a value before
-					currentValues.get(curDoc, scratch);
-					return scratch;
-				  }
-				  else
-				  {
-					return null;
-				  }
-				}
-			  }
-
-			  public virtual void Remove()
-			  {
-				throw new System.NotSupportedException("this iterator does not support removing elements");
-			  }
-		  }
-	  }*/
-
         /// <summary>
         /// Returns a reader for merge. this method applies field updates if there are
         /// any and marks that this segment is currently merging.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentCommitInfo.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentCommitInfo.cs b/src/Lucene.Net/Index/SegmentCommitInfo.cs
index b74e52e..e788d38 100644
--- a/src/Lucene.Net/Index/SegmentCommitInfo.cs
+++ b/src/Lucene.Net/Index/SegmentCommitInfo.cs
@@ -24,10 +24,10 @@ namespace Lucene.Net.Index
     using Directory = Lucene.Net.Store.Directory;
 
     /// <summary>
-    /// Embeds a [read-only] SegmentInfo and adds per-commit
-    ///  fields.
-    ///
-    ///  @lucene.experimental
+    /// Embeds a [read-only] <see cref="SegmentInfo"/> and adds per-commit
+    /// fields.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -35,7 +35,7 @@ namespace Lucene.Net.Index
     public class SegmentCommitInfo
     {
         /// <summary>
-        /// The <seealso cref="SegmentInfo"/> that we wrap. </summary>
+        /// The <see cref="SegmentInfo"/> that we wrap. </summary>
         public SegmentInfo Info { get; private set; }
 
         // How many deleted docs in the segment:
@@ -65,13 +65,13 @@ namespace Lucene.Net.Index
         /// Sole constructor.
         /// </summary>
         /// <param name="info">
-        ///          <seealso cref="SegmentInfo"/> that we wrap </param>
+        ///          <see cref="SegmentInfo"/> that we wrap </param>
         /// <param name="delCount">
         ///          number of deleted documents in this segment </param>
         /// <param name="delGen">
         ///          deletion generation number (used to name deletion files) </param>
         /// <param name="fieldInfosGen">
-        ///          FieldInfos generation number (used to name field-infos files)
+        ///          <see cref="FieldInfos"/> generation number (used to name field-infos files)
         ///  </param>
         public SegmentCommitInfo(SegmentInfo info, int delCount, long delGen, long fieldInfosGen)
         {
@@ -127,8 +127,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Called if there was an exception while writing
-        ///  deletes, so that we don't try to write to the same
-        ///  file more than once.
+        /// deletes, so that we don't try to write to the same
+        /// file more than once.
         /// </summary>
         internal virtual void AdvanceNextWriteDelGen()
         {
@@ -136,7 +136,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Called when we succeed in writing a new FieldInfos generation. </summary>
+        /// Called when we succeed in writing a new <see cref="FieldInfos"/> generation. </summary>
         internal virtual void AdvanceFieldInfosGen()
         {
             fieldInfosGen = nextWriteFieldInfosGen;
@@ -146,7 +146,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Called if there was an exception while writing a new generation of
-        /// FieldInfos, so that we don't try to write to the same file more than once.
+        /// <see cref="FieldInfos"/>, so that we don't try to write to the same file more than once.
         /// </summary>
         internal virtual void AdvanceNextWriteFieldInfosGen()
         {
@@ -155,8 +155,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns total size in bytes of all files for this
-        ///  segment.
-        /// <p><b>NOTE:</b> this value is not correct for 3.0 segments
+        /// segment.
+        /// <para/><b>NOTE:</b> this value is not correct for 3.0 segments
         /// that have shared docstores. To get the correct value, upgrade!
         /// </summary>
         public virtual long GetSizeInBytes()
@@ -176,7 +176,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns all files in use by this segment. </summary>
-        public virtual ICollection<string> Files()
+        public virtual ICollection<string> Files() // LUCENENET TODO: API Rename GetFiles()
         {
             // Start from the wrapped info's files:
             ISet<string> files = new HashSet<string>(Info.GetFiles());
@@ -215,7 +215,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns true if there are any deletions for the
+        /// Returns <c>true</c> if there are any deletions for the
         /// segment at this commit.
         /// </summary>
         public virtual bool HasDeletions
@@ -224,14 +224,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns true if there are any field updates for the segment in this commit. </summary>
+        /// Returns <c>true</c> if there are any field updates for the segment in this commit. </summary>
         public virtual bool HasFieldUpdates
         {
             get { return fieldInfosGen != -1; }
         }
 
         /// <summary>
-        /// Returns the next available generation number of the FieldInfos files. </summary>
+        /// Returns the next available generation number of the <see cref="FieldInfos"/> files. </summary>
         public virtual long NextFieldInfosGen
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentCoreReaders.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentCoreReaders.cs b/src/Lucene.Net/Index/SegmentCoreReaders.cs
index ef9e870..e9b03fb 100644
--- a/src/Lucene.Net/Index/SegmentCoreReaders.cs
+++ b/src/Lucene.Net/Index/SegmentCoreReaders.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Holds core readers that are shared (unchanged) when
-    /// SegmentReader is cloned or reopened
+    /// <see cref="SegmentReader"/> is cloned or reopened
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentDocValues.cs b/src/Lucene.Net/Index/SegmentDocValues.cs
index d7992a5..8d65b07 100644
--- a/src/Lucene.Net/Index/SegmentDocValues.cs
+++ b/src/Lucene.Net/Index/SegmentDocValues.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
     using IOUtils = Lucene.Net.Util.IOUtils;
 
     /// <summary>
-    /// Manages the <seealso cref="DocValuesProducer"/> held by <seealso cref="SegmentReader"/> and
+    /// Manages the <see cref="DocValuesProducer"/> held by <see cref="SegmentReader"/> and
     /// keeps track of their reference counting.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -82,7 +82,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the <seealso cref="DocValuesProducer"/> for the given generation. </summary>
+        /// Returns the <see cref="DocValuesProducer"/> for the given generation. </summary>
         internal DocValuesProducer GetDocValuesProducer(long? gen, SegmentCommitInfo si, IOContext context, Directory dir, DocValuesFormat dvFormat, IList<FieldInfo> infos, int termsIndexDivisor)
         {
             lock (this)
@@ -103,7 +103,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Decrement the reference count of the given <seealso cref="DocValuesProducer"/>
+        /// Decrement the reference count of the given <see cref="DocValuesProducer"/>
         /// generations.
         /// </summary>
         internal void DecRef(IList<long?> dvProducersGens)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentInfo.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentInfo.cs b/src/Lucene.Net/Index/SegmentInfo.cs
index 2c342b6..1109130 100644
--- a/src/Lucene.Net/Index/SegmentInfo.cs
+++ b/src/Lucene.Net/Index/SegmentInfo.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
     /// <summary>
     /// Information about a segment such as it's name, directory, and files related
     /// to the segment.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -42,13 +42,13 @@ namespace Lucene.Net.Index
         // TODO: remove these from this class, for now this is the representation
         /// <summary>
         /// Used by some member fields to mean not present (e.g.,
-        ///  norms, deletions).
+        /// norms, deletions).
         /// </summary>
         public static readonly int NO = -1; // e.g. no norms; no deletes;
 
         /// <summary>
         /// Used by some member fields to mean present (e.g.,
-        ///  norms, deletions).
+        /// norms, deletions).
         /// </summary>
         public static readonly int YES = 1; // e.g. have norms; have deletes;
 
@@ -68,7 +68,6 @@ namespace Lucene.Net.Index
 
         private IDictionary<string, string> diagnostics;
 
-        /// @deprecated not used anymore
         [Obsolete("not used anymore")]
         private IDictionary<string, string> attributes;
 
@@ -79,6 +78,9 @@ namespace Lucene.Net.Index
         // see Constants.LUCENE_MAIN_VERSION.
         private string version;
 
+        /// <summary>
+        /// Gets or Sets diagnostics saved into the segment when it was written.
+        /// </summary>
         public IDictionary<string, string> Diagnostics
         {
             set
@@ -92,9 +94,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Construct a new complete SegmentInfo instance from input.
-        /// <p>Note: this is public only to allow access from
-        /// the codecs package.</p>
+        /// Construct a new complete <see cref="SegmentInfo"/> instance from input.
+        /// <para>Note: this is public only to allow access from
+        /// the codecs package.</para>
         /// </summary>
         public SegmentInfo(Directory dir, string version, string name, int docCount, bool isCompoundFile, Codec codec, IDictionary<string, string> diagnostics)
             : this(dir, version, name, docCount, isCompoundFile, codec, diagnostics, null)
@@ -102,9 +104,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Construct a new complete SegmentInfo instance from input.
-        /// <p>Note: this is public only to allow access from
-        /// the codecs package.</p>
+        /// Construct a new complete <see cref="SegmentInfo"/> instance from input.
+        /// <para>Note: this is public only to allow access from
+        /// the codecs package.</para>
         /// </summary>
         public SegmentInfo(Directory dir, string version, string name, int docCount, bool isCompoundFile, Codec codec, IDictionary<string, string> diagnostics, IDictionary<string, string> attributes)
         {
@@ -121,7 +123,6 @@ namespace Lucene.Net.Index
 #pragma warning restore 612, 618
         }
 
-        /// @deprecated separate norms are not supported in >= 4.0
         [Obsolete("separate norms are not supported in >= 4.0")]
         internal bool HasSeparateNorms
         {
@@ -129,10 +130,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Mark whether this segment is stored as a compound file.
+        /// Gets or Sets whether this segment is stored as a compound file.
+        /// <c>true</c> if this is a compound file;
+        /// else, <c>false</c>
         /// </summary>
-        /// <param name="isCompoundFile"> true if this is a compound file;
-        /// else, false </param>
         public bool UseCompoundFile
         {
             set
@@ -146,7 +147,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Can only be called once. </summary>
+        /// Gets or Sets <see cref="Codecs.Codec"/> that wrote this segment.
+        /// Setter can only be called once. </summary>
         public Codec Codec
         {
             set
@@ -166,7 +168,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns number of documents in this segment (deletions
-        ///  are not taken into account).
+        /// are not taken into account).
         /// </summary>
         public int DocCount
         {
@@ -189,7 +191,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return all files referenced by this SegmentInfo. </summary>
+        /// Return all files referenced by this <see cref="SegmentInfo"/>. </summary>
         public ISet<string> GetFiles()
         {
             if (setFiles == null)
@@ -207,13 +209,13 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Used for debugging.  Format may suddenly change.
         ///
-        ///  <p>Current format looks like
-        ///  <code>_a(3.1):c45/4</code>, which means the segment's
-        ///  name is <code>_a</code>; it was created with Lucene 3.1 (or
-        ///  '?' if it's unknown); it's using compound file
-        ///  format (would be <code>C</code> if not compound); it
-        ///  has 45 documents; it has 4 deletions (this part is
-        ///  left off when there are no deletions).</p>
+        /// <para>Current format looks like
+        /// <c>_a(3.1):c45/4</c>, which means the segment's
+        /// name is <c>_a</c>; it was created with Lucene 3.1 (or
+        /// '?' if it's unknown); it's using compound file
+        /// format (would be <c>C</c> if not compound); it
+        /// has 45 documents; it has 4 deletions (this part is
+        /// left off when there are no deletions).</para>
         /// </summary>
         public string ToString(Directory dir, int delCount)
         {
@@ -239,8 +241,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// We consider another SegmentInfo instance equal if it
-        ///  has the same dir and same name.
+        /// We consider another <see cref="SegmentInfo"/> instance equal if it
+        /// has the same dir and same name.
         /// </summary>
         public override bool Equals(object obj)
         {
@@ -268,11 +270,11 @@ namespace Lucene.Net.Index
         /// Used by DefaultSegmentInfosReader to upgrade a 3.0 segment to record its
         /// version is "3.0". this method can be removed when we're not required to
         /// support 3x indexes anymore, e.g. in 5.0.
-        /// <p>
+        /// <para/>
         /// <b>NOTE:</b> this method is used for internal purposes only - you should
-        /// not modify the version of a SegmentInfo, or it may result in unexpected
+        /// not modify the version of a <see cref="SegmentInfo"/>, or it may result in unexpected
         /// exceptions thrown when you attempt to open the index.
-        ///
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public string Version
@@ -299,22 +301,20 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Add these files to the set of files written for this
-        ///  segment.
+        /// segment.
         /// </summary>
         public void AddFiles(ICollection<string> files)
         {
             CheckFileNames(files);
-            //SetFiles.AddAll(files);
             setFiles.UnionWith(files);
         }
 
         /// <summary>
         /// Add this file to the set of files written for this
-        ///  segment.
+        /// segment.
         /// </summary>
         public void AddFile(string file)
         {
-            //CheckFileNames(Collections.Singleton(file));
             CheckFileNames(new[] { file });
             setFiles.Add(file);
         }
@@ -334,7 +334,6 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Get a codec attribute value, or null if it does not exist
         /// </summary>
-        /// @deprecated no longer supported
         [Obsolete("no longer supported")]
         public string GetAttribute(string key)
         {
@@ -352,15 +351,14 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Puts a codec attribute value.
-        /// <p>
-        /// this is a key-value mapping for the field that the codec can use to store
+        /// <para/>
+        /// This is a key-value mapping for the field that the codec can use to store
         /// additional metadata, and will be available to the codec when reading the
-        /// segment via <seealso cref="#getAttribute(String)"/>
-        /// <p>
+        /// segment via <see cref="GetAttribute(string)"/>
+        /// <para/>
         /// If a value already exists for the field, it will be replaced with the new
         /// value.
         /// </summary>
-        /// @deprecated no longer supported
         [Obsolete("no longer supported")]
         public string PutAttribute(string key, string value)
         {
@@ -372,11 +370,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the internal codec attributes map.
+        /// Returns the internal codec attributes map. May be null if no mappings exist.
         /// </summary>
-        /// <returns> internal codec attributes map. May be null if no mappings exist.
-        /// </returns>
-        /// @deprecated no longer supported
         [Obsolete("no longer supported")]
         public IDictionary<string, string> Attributes
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentInfos.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentInfos.cs b/src/Lucene.Net/Index/SegmentInfos.cs
index 5ad2b31..4eb63bf 100644
--- a/src/Lucene.Net/Index/SegmentInfos.cs
+++ b/src/Lucene.Net/Index/SegmentInfos.cs
@@ -44,68 +44,68 @@ namespace Lucene.Net.Index
     /// <summary>
     /// A collection of segmentInfo objects with methods for operating on
     /// those segments in relation to the file system.
-    /// <p>
+    /// <para>
     /// The active segments in the index are stored in the segment info file,
-    /// <tt>segments_N</tt>. There may be one or more <tt>segments_N</tt> files in the
+    /// <c>segments_N</c>. There may be one or more <c>segments_N</c> files in the
     /// index; however, the one with the largest generation is the active one (when
     /// older segments_N files are present it's because they temporarily cannot be
     /// deleted, or, a writer is in the process of committing, or a custom
-    /// <seealso cref="Lucene.Net.Index.IndexDeletionPolicy IndexDeletionPolicy"/>
-    /// is in use). this file lists each segment by name and has details about the
+    /// <see cref="Lucene.Net.Index.IndexDeletionPolicy"/>
+    /// is in use). This file lists each segment by name and has details about the
     /// codec and generation of deletes.
-    /// </p>
-    /// <p>There is also a file <tt>segments.gen</tt>. this file contains
-    /// the current generation (the <tt>_N</tt> in <tt>segments_N</tt>) of the index.
-    /// this is used only as a fallback in case the current generation cannot be
+    /// </para>
+    /// <para>There is also a file <c>segments.gen</c>. this file contains
+    /// the current generation (the <c>_N</c> in <c>segments_N</c>) of the index.
+    /// This is used only as a fallback in case the current generation cannot be
     /// accurately determined by directory listing alone (as is the case for some NFS
-    /// clients with time-based directory cache expiration). this file simply contains
-    /// an <seealso cref="DataOutput#writeInt Int32"/> version header
-    /// (<seealso cref="#FORMAT_SEGMENTS_GEN_CURRENT"/>), followed by the
-    /// generation recorded as <seealso cref="DataOutput#writeLong Int64"/>, written twice.</p>
-    /// <p>
+    /// clients with time-based directory cache expiration). This file simply contains
+    /// an <see cref="Store.DataOutput.WriteInt32(int)"/> version header
+    /// (<see cref="FORMAT_SEGMENTS_GEN_CURRENT"/>), followed by the
+    /// generation recorded as <see cref="Store.DataOutput.WriteInt64(long)"/>, written twice.</para>
+    /// <para>
     /// Files:
-    /// <ul>
-    ///   <li><tt>segments.gen</tt>: GenHeader, Generation, Generation, Footer
-    ///   <li><tt>segments_N</tt>: Header, Version, NameCounter, SegCount,
+    /// <list type="bullet">
+    ///   <item><description><c>segments.gen</c>: GenHeader, Generation, Generation, Footer</description></item>
+    ///   <item><description><c>segments_N</c>: Header, Version, NameCounter, SegCount,
     ///    &lt;SegName, SegCodec, DelGen, DeletionCount, FieldInfosGen, UpdatesFiles&gt;<sup>SegCount</sup>,
-    ///    CommitUserData, Footer
-    /// </ul>
-    /// </p>
+    ///    CommitUserData, Footer</description></item>
+    /// </list>
+    /// </para>
     /// Data types:
-    /// <p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>GenHeader, NameCounter, SegCount, DeletionCount --&gt; <seealso cref="DataOutput#writeInt Int32"/></li>
-    ///   <li>Generation, Version, DelGen, Checksum, FieldInfosGen --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///   <li>SegName, SegCodec --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>CommitUserData --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    ///   <li>UpdatesFiles --&gt; <seealso cref="DataOutput#writeStringSet(Set) Set&lt;String&gt;"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// </p>
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; <see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/></description></item>
+    ///   <item><description>GenHeader, NameCounter, SegCount, DeletionCount --&gt; <see cref="Store.DataOutput.WriteInt32(int)"/></description></item>
+    ///   <item><description>Generation, Version, DelGen, Checksum, FieldInfosGen --&gt; <see cref="Store.DataOutput.WriteInt64(long)"/></description></item>
+    ///   <item><description>SegName, SegCodec --&gt; <see cref="Store.DataOutput.WriteString(string)"/></description></item>
+    ///   <item><description>CommitUserData --&gt; <see cref="Store.DataOutput.WriteStringStringMap(IDictionary{string, string})"/></description></item>
+    ///   <item><description>UpdatesFiles --&gt; <see cref="Store.DataOutput.WriteStringSet(ISet{string})"/></description></item>
+    ///   <item><description>Footer --&gt; <see cref="CodecUtil.WriteFooter(IndexOutput)"/></description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <p>
-    /// <ul>
-    ///   <li>Version counts how often the index has been changed by adding or deleting
-    ///       documents.</li>
-    ///   <li>NameCounter is used to generate names for new segment files.</li>
-    ///   <li>SegName is the name of the segment, and is used as the file name prefix for
-    ///       all of the files that compose the segment's index.</li>
-    ///   <li>DelGen is the generation count of the deletes file. If this is -1,
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>Version counts how often the index has been changed by adding or deleting
+    ///       documents.</description></item>
+    ///   <item><description>NameCounter is used to generate names for new segment files.</description></item>
+    ///   <item><description>SegName is the name of the segment, and is used as the file name prefix for
+    ///       all of the files that compose the segment's index.</description></item>
+    ///   <item><description>DelGen is the generation count of the deletes file. If this is -1,
     ///       there are no deletes. Anything above zero means there are deletes
-    ///       stored by <seealso cref="LiveDocsFormat"/>.</li>
-    ///   <li>DeletionCount records the number of deleted documents in this segment.</li>
-    ///   <li>SegCodec is the <seealso cref="Codec#getName() name"/> of the Codec that encoded
-    ///       this segment.</li>
-    ///   <li>CommitUserData stores an optional user-supplied opaque
-    ///       Map&lt;String,String&gt; that was passed to
-    ///       <seealso cref="IndexWriter#setCommitData(java.util.Map)"/>.</li>
-    ///   <li>FieldInfosGen is the generation count of the fieldInfos file. If this is -1,
+    ///       stored by <see cref="Codecs.LiveDocsFormat"/>.</description></item>
+    ///   <item><description>DeletionCount records the number of deleted documents in this segment.</description></item>
+    ///   <item><description>SegCodec is the <see cref="Codec.Name"/> of the <see cref="Codec"/> that encoded
+    ///       this segment.</description></item>
+    ///   <item><description>CommitUserData stores an optional user-supplied opaque
+    ///       <see cref="T:IDictionary{string, string}"/> that was passed to
+    ///       <see cref="IndexWriter.SetCommitData(IDictionary{string, string})"/>.</description></item>
+    ///   <item><description>FieldInfosGen is the generation count of the fieldInfos file. If this is -1,
     ///       there are no updates to the fieldInfos in that segment. Anything above zero
-    ///       means there are updates to fieldInfos stored by <seealso cref="FieldInfosFormat"/>.</li>
-    ///   <li>UpdatesFiles stores the list of files that were updated in that segment.</li>
-    /// </ul>
-    /// </p>
+    ///       means there are updates to fieldInfos stored by <see cref="Codecs.FieldInfosFormat"/>.</description></item>
+    ///   <item><description>UpdatesFiles stores the list of files that were updated in that segment.</description></item>
+    /// </list>
+    /// </para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -149,30 +149,31 @@ namespace Lucene.Net.Index
         // there was an IOException that had interrupted a commit
 
         /// <summary>
-        /// Opaque Map&lt;String, String&gt; that user can specify during IndexWriter.commit </summary>
+        /// Opaque <see cref="T:IDictionary{string, string}"/> that user can specify during <see cref="IndexWriter.Commit()"/> </summary>
         private IDictionary<string, string> userData = Collections.EmptyMap<string, string>();
 
         private List<SegmentCommitInfo> segments = new List<SegmentCommitInfo>();
 
         /// <summary>
-        /// If non-null, information about loading segments_N files </summary>
-        /// will be printed here.  <seealso cref= #setInfoStream. </seealso>
+        /// If non-null, information about loading segments_N files 
+        /// will be printed here.</summary> 
+        /// <seealso cref="InfoStream"/>
         private static TextWriter infoStream = null;
 
         /// <summary>
         /// Sole constructor. Typically you call this and then
-        ///  use {@link #read(Directory) or
-        ///  #read(Directory,String)} to populate each {@link
-        ///  SegmentCommitInfo}.  Alternatively, you can add/remove your
-        ///  own <seealso cref="SegmentCommitInfo"/>s.
+        /// use <see cref="Read(Directory)"/> or
+        /// <see cref="Read(Directory, string)"/> to populate each
+        /// <see cref="SegmentCommitInfo"/>.  Alternatively, you can add/remove your
+        /// own <see cref="SegmentCommitInfo"/>s.
         /// </summary>
         public SegmentInfos()
         {
         }
 
         /// <summary>
-        /// Returns <seealso cref="SegmentCommitInfo"/> at the provided
-        ///  index.
+        /// Returns <see cref="SegmentCommitInfo"/> at the provided
+        /// index.
         /// </summary>
         public SegmentCommitInfo Info(int i)
         {
@@ -183,7 +184,7 @@ namespace Lucene.Net.Index
         /// Get the generation of the most recent commit to the
         /// list of index files (N in the segments_N file).
         /// </summary>
-        /// <param name="files"> -- array of file names to check </param>
+        /// <param name="files"> array of file names to check </param>
         public static long GetLastCommitGeneration(string[] files)
         {
             if (files == null)
@@ -209,7 +210,7 @@ namespace Lucene.Net.Index
         /// Get the generation of the most recent commit to the
         /// index in this directory (N in the segments_N file).
         /// </summary>
-        /// <param name="directory"> -- directory to search for the latest segments_N file </param>
+        /// <param name="directory"> directory to search for the latest segments_N file </param>
         public static long GetLastCommitGeneration(Directory directory)
         {
             try
@@ -226,7 +227,7 @@ namespace Lucene.Net.Index
         /// Get the filename of the segments_N file for the most
         /// recent commit in the list of index files.
         /// </summary>
-        /// <param name="files"> -- array of file names to check </param>
+        /// <param name="files"> array of file names to check </param>
 
         public static string GetLastCommitSegmentsFileName(string[] files)
         {
@@ -237,7 +238,7 @@ namespace Lucene.Net.Index
         /// Get the filename of the segments_N file for the most
         /// recent commit to the index in this Directory.
         /// </summary>
-        /// <param name="directory"> -- directory to search for the latest segments_N file </param>
+        /// <param name="directory"> directory to search for the latest segments_N file </param>
         public static string GetLastCommitSegmentsFileName(Directory directory)
         {
             return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetLastCommitGeneration(directory));
@@ -272,14 +273,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// A utility for writing the <seealso cref="IndexFileNames#SEGMENTS_GEN"/> file to a
-        /// <seealso cref="Directory"/>.
-        ///
-        /// <p>
+        /// A utility for writing the <see cref="IndexFileNames.SEGMENTS_GEN"/> file to a
+        /// <see cref="Directory"/>.
+        /// <para/>
         /// <b>NOTE:</b> this is an internal utility which is kept public so that it's
         /// accessible by code from other packages. You should avoid calling this
         /// method unless you're absolutely sure what you're doing!
-        ///
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public static void WriteSegmentsGen(Directory dir, long generation)
@@ -335,11 +335,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Read a particular segmentFileName.  Note that this may
-        /// throw an IOException if a commit is in process.
+        /// Read a particular <paramref name="segmentFileName"/>.  Note that this may
+        /// throw an <see cref="IOException"/> if a commit is in process.
         /// </summary>
-        /// <param name="directory"> -- directory containing the segments file </param>
-        /// <param name="segmentFileName"> -- segment file to load </param>
+        /// <param name="directory"> directory containing the segments file </param>
+        /// <param name="segmentFileName"> segment file to load </param>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>
         public void Read(Directory directory, string segmentFileName)
@@ -457,8 +457,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Find the latest commit ({@code segments_N file}) and
-        ///  load all <seealso cref="SegmentCommitInfo"/>s.
+        /// Find the latest commit (<c>segments_N file</c>) and
+        /// load all <see cref="SegmentCommitInfo"/>s.
         /// </summary>
         public void Read(Directory directory)
         {
@@ -695,9 +695,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns a copy of this instance, also copying each
-        /// SegmentInfo.
+        /// <see cref="SegmentInfo"/>.
         /// </summary>
-
         public object Clone()
         {
             var sis = (SegmentInfos)base.MemberwiseClone();
@@ -721,7 +720,7 @@ namespace Lucene.Net.Index
 
 
         /// <summary>
-        /// Counts how often the index has been changed.
+        /// Version number when this <see cref="SegmentInfos"/> was generated.
         /// </summary>
         public long Version { get; internal set; }
 
@@ -765,19 +764,21 @@ namespace Lucene.Net.Index
             }
         }
 
-        /* Advanced configuration of retry logic in loading
-           segments_N file */
+        /// <summary>
+        /// Advanced configuration of retry logic in loading
+        /// segments_N file
+        /// </summary>
         private static int defaultGenLookaheadCount = 10;
 
         /// <summary>
         /// Gets or Sets the <see cref="defaultGenLookaheadCount"/>.
-        /// 
+        /// <para/>
         /// Advanced: set how many times to try incrementing the
         /// gen when loading the segments file.  this only runs if
         /// the primary (listing directory) and secondary (opening
         /// segments.gen file) methods fail to find the segments
         /// file.
-        ///
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public static int DefaultGenLookaheadCount // LUCENENET specific: corrected spelling issue with the getter
@@ -793,8 +794,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Prints the given message to the infoStream. Note, this method does not
-        /// check for null infoStream. It assumes this check has been performed by the
+        /// Prints the given message to the <see cref="InfoStream"/>. Note, this method does not
+        /// check for <c>null</c> <see cref="InfoStream"/>. It assumes this check has been performed by the
         /// caller, which is recommended to avoid the (usually) expensive message
         /// creation.
         /// </summary>
@@ -805,7 +806,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Utility class for executing code that needs to do
-        /// something with the current segments file.  this is
+        /// something with the current segments file.  This is
         /// necessary with lock-less commits because from the time
         /// you locate the current segments file name, until you
         /// actually open it, read its contents, or check modified
@@ -824,8 +825,8 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Locate the most recent {@code segments} file and
-            ///  run <seealso cref="#doBody"/> on it.
+            /// Locate the most recent <c>segments</c> file and
+            /// run <see cref="DoBody(string)"/> on it.
             /// </summary>
             public virtual object Run()
             {
@@ -833,7 +834,7 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Run <seealso cref="#doBody"/> on the provided commit. </summary>
+            /// Run <see cref="DoBody(string)"/> on the provided commit. </summary>
             public virtual object Run(IndexCommit commit)
             {
                 if (commit != null)
@@ -1097,7 +1098,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Subclass must implement this.  The assumption is an
-            /// IOException will be thrown if something goes wrong
+            /// <see cref="IOException"/> will be thrown if something goes wrong
             /// during the processing that could have been caused by
             /// a writer committing.
             /// </summary>
@@ -1130,16 +1131,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Call this to start a commit.  this writes the new
-        ///  segments file, but writes an invalid checksum at the
-        ///  end, so that it is not visible to readers.  Once this
-        ///  is called you must call <seealso cref="#finishCommit"/> to complete
-        ///  the commit or <seealso cref="#rollbackCommit"/> to abort it.
-        ///  <p>
-        ///  Note: <seealso cref="#changed()"/> should be called prior to this
-        ///  method if changes have been made to this <seealso cref="SegmentInfos"/> instance
-        ///  </p>
-        ///
+        /// Call this to start a commit.  This writes the new
+        /// segments file, but writes an invalid checksum at the
+        /// end, so that it is not visible to readers.  Once this
+        /// is called you must call <see cref="FinishCommit(Directory)"/> to complete
+        /// the commit or <see cref="RollbackCommit(Directory)"/> to abort it.
+        /// <para>
+        /// Note: <see cref="Changed()"/> should be called prior to this
+        /// method if changes have been made to this <see cref="SegmentInfos"/> instance
+        /// </para>
         /// </summary>
         internal void PrepareCommit(Directory dir)
         {
@@ -1151,13 +1151,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns all file names referenced by SegmentInfo
-        ///  instances matching the provided Directory (ie files
-        ///  associated with any "external" segments are skipped).
-        ///  The returned collection is recomputed on each
-        ///  invocation.
+        /// Returns all file names referenced by <see cref="SegmentInfo"/>
+        /// instances matching the provided <see cref="Directory"/> (ie files
+        /// associated with any "external" segments are skipped).
+        /// The returned collection is recomputed on each
+        /// invocation.
         /// </summary>
-        public ICollection<string> Files(Directory dir, bool includeSegmentsFile)
+        public ICollection<string> Files(Directory dir, bool includeSegmentsFile) // LUCENENET TODO: API Rename GetFiles()
         {
             var files = new HashSet<string>();
             if (includeSegmentsFile)
@@ -1261,13 +1261,12 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Writes & syncs to the Directory dir, taking care to
-        ///  remove the segments file on exception
-        ///  <p>
-        ///  Note: <seealso cref="#changed()"/> should be called prior to this
-        ///  method if changes have been made to this <seealso cref="SegmentInfos"/> instance
-        ///  </p>
-        ///
+        /// Writes &amp; syncs to the Directory dir, taking care to
+        /// remove the segments file on exception
+        /// <para>
+        /// Note: <see cref="Changed()"/> should be called prior to this
+        /// method if changes have been made to this <see cref="SegmentInfos"/> instance
+        /// </para>
         /// </summary>
         internal void Commit(Directory dir)
         {
@@ -1295,16 +1294,16 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return {@code userData} saved with this commit.
+        /// Gets <see cref="userData"/> saved with this commit.
         /// </summary>
-        /// <seealso cref= IndexWriter#commit() </seealso>
+        /// <seealso cref="IndexWriter.Commit()"/>
         public IDictionary<string, string> UserData
         {
             get
             {
                 return userData;
             }
-            set
+            internal set
             {
                 if (value == null)
                 {
@@ -1319,8 +1318,8 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Replaces all segments in this instance, but keeps
-        ///  generation, version, counter so that future commits
-        ///  remain write once.
+        /// generation, version, counter so that future commits
+        /// remain write once.
         /// </summary>
         internal void Replace(SegmentInfos other)
         {
@@ -1330,7 +1329,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns sum of all segment's docCounts.  Note that
-        ///  this does not include deletions
+        /// this does not include deletions
         /// </summary>
         public int TotalDocCount
         {
@@ -1339,7 +1338,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Call this before committing if changes have been made to the
-        ///  segments.
+        /// segments.
         /// </summary>
         public void Changed()
         {
@@ -1347,7 +1346,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// applies all changes caused by committing a merge to this SegmentInfos </summary>
+        /// applies all changes caused by committing a merge to this <see cref="SegmentInfos"/> </summary>
         internal void ApplyMergeChanges(MergePolicy.OneMerge merge, bool dropSegment)
         {
             var mergedAway = new HashSet<SegmentCommitInfo>(merge.Segments);
@@ -1404,6 +1403,9 @@ namespace Lucene.Net.Index
             this.AddAll(infos);
         }
 
+        /// <summary>
+        /// Returns an <b>unmodifiable</b> <see cref="T:IEnumerator{SegmentCommitInfo}"/> of contained segments in order.
+        /// </summary>
         public IEnumerator<SegmentCommitInfo> GetEnumerator()
         {
             return AsList().GetEnumerator();
@@ -1415,14 +1417,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns all contained segments as an <b>unmodifiable</b> <seealso cref="List"/> view. </summary>
+        /// Returns all contained segments as an <b>unmodifiable</b> <see cref="T:IList{SegmentCommitInfo}"/> view. </summary>
         public IList<SegmentCommitInfo> AsList()
         {
             return Collections.UnmodifiableList<SegmentCommitInfo>(segments);
         }
 
         /// <summary>
-        /// Returns number of <seealso cref="SegmentCommitInfo"/>s. 
+        /// Returns number of <see cref="SegmentCommitInfo"/>s.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -1431,14 +1434,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Appends the provided <seealso cref="SegmentCommitInfo"/>. </summary>
+        /// Appends the provided <see cref="SegmentCommitInfo"/>. </summary>
         public void Add(SegmentCommitInfo si)
         {
             segments.Add(si);
         }
 
         /// <summary>
-        /// Appends the provided <seealso cref="SegmentCommitInfo"/>s. </summary>
+        /// Appends the provided <see cref="SegmentCommitInfo"/>s. </summary>
         public void AddAll(IEnumerable<SegmentCommitInfo> sis)
         {
             foreach (var si in sis)
@@ -1448,16 +1451,16 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Clear all <seealso cref="SegmentCommitInfo"/>s. </summary>
+        /// Clear all <see cref="SegmentCommitInfo"/>s. </summary>
         public void Clear()
         {
             segments.Clear();
         }
 
         /// <summary>
-        /// Remove the provided <seealso cref="SegmentCommitInfo"/>.
+        /// Remove the provided <see cref="SegmentCommitInfo"/>.
         ///
-        /// <p><b>WARNING</b>: O(N) cost
+        /// <para/><b>WARNING</b>: O(N) cost
         /// </summary>
         public void Remove(SegmentCommitInfo si)
         {
@@ -1465,10 +1468,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Remove the <seealso cref="SegmentCommitInfo"/> at the
+        /// Remove the <see cref="SegmentCommitInfo"/> at the
         /// provided index.
         ///
-        /// <p><b>WARNING</b>: O(N) cost
+        /// <para/><b>WARNING</b>: O(N) cost
         /// </summary>
         internal void Remove(int index)
         {
@@ -1476,10 +1479,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return true if the provided {@link
-        ///  SegmentCommitInfo} is contained.
+        /// Return true if the provided 
+        /// <see cref="SegmentCommitInfo"/> is contained.
         ///
-        /// <p><b>WARNING</b>: O(N) cost
+        /// <para/><b>WARNING</b>: O(N) cost
         /// </summary>
         internal bool Contains(SegmentCommitInfo si)
         {
@@ -1487,10 +1490,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns index of the provided {@link
-        ///  SegmentCommitInfo}.
+        /// Returns index of the provided
+        /// <see cref="SegmentCommitInfo"/>.
         ///
-        /// <p><b>WARNING</b>: O(N) cost
+        /// <para/><b>WARNING</b>: O(N) cost
         /// </summary>
         internal int IndexOf(SegmentCommitInfo si)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentMerger.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentMerger.cs b/src/Lucene.Net/Index/SegmentMerger.cs
index fab5dee..57e1fa7 100644
--- a/src/Lucene.Net/Index/SegmentMerger.cs
+++ b/src/Lucene.Net/Index/SegmentMerger.cs
@@ -36,11 +36,11 @@ namespace Lucene.Net.Index
     using TermVectorsWriter = Lucene.Net.Codecs.TermVectorsWriter;
 
     /// <summary>
-    /// The SegmentMerger class combines two or more Segments, represented by an
-    /// IndexReader, into a single Segment.  Call the merge method to combine the
+    /// The <see cref="SegmentMerger"/> class combines two or more Segments, represented by an
+    /// <see cref="IndexReader"/>, into a single Segment.  Call the merge method to combine the
     /// segments.
     /// </summary>
-    /// <seealso cref= #merge </seealso>
+    /// <seealso cref="Merge()"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -77,7 +77,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// True if any merging should happen </summary>
+        /// <c>True</c> if any merging should happen </summary>
         internal bool ShouldMerge
         {
             get { return mergeState.SegmentInfo.DocCount > 0; }
@@ -87,7 +87,7 @@ namespace Lucene.Net.Index
         /// Merges the readers into the directory passed to the constructor </summary>
         /// <returns> The number of documents that were merged </returns>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         internal MergeState Merge()
         {
             if (!ShouldMerge)
@@ -381,7 +381,7 @@ namespace Lucene.Net.Index
         ///
         /// <returns> The number of documents in all of the readers </returns>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         private int MergeFields()
         {
             StoredFieldsWriter fieldsWriter = codec.StoredFieldsFormat.FieldsWriter(directory, mergeState.SegmentInfo, context);
@@ -398,7 +398,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Merge the TermVectors from each of the segments into the new one. </summary>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         private int MergeVectors()
         {
             TermVectorsWriter termVectorsWriter = codec.TermVectorsFormat.VectorsWriter(directory, mergeState.SegmentInfo, context);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentReadState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentReadState.cs b/src/Lucene.Net/Index/SegmentReadState.cs
index 3770649..e80e340 100644
--- a/src/Lucene.Net/Index/SegmentReadState.cs
+++ b/src/Lucene.Net/Index/SegmentReadState.cs
@@ -26,6 +26,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Holder class for common parameters used during read.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -34,57 +35,57 @@ namespace Lucene.Net.Index
     public class SegmentReadState
     {
         /// <summary>
-        /// <seealso cref="Directory"/> where this segment is read from. </summary>
+        /// <see cref="Store.Directory"/> where this segment is read from. </summary>
         public Directory Directory { get; private set; }
 
         /// <summary>
-        /// <seealso cref="SegmentInfo"/> describing this segment. </summary>
+        /// <see cref="Index.SegmentInfo"/> describing this segment. </summary>
         public SegmentInfo SegmentInfo { get; private set; }
 
         /// <summary>
-        /// <seealso cref="FieldInfos"/> describing all fields in this
-        ///  segment.
+        /// <see cref="Index.FieldInfos"/> describing all fields in this
+        /// segment.
         /// </summary>
         public FieldInfos FieldInfos { get; private set; }
 
         /// <summary>
-        /// <seealso cref="IOContext"/> to pass to {@link
-        ///  Directory#openInput(String,IOContext)}.
+        /// <see cref="IOContext"/> to pass to 
+        /// <see cref="Directory.OpenInput(string, IOContext)"/>.
         /// </summary>
         public IOContext Context { get; private set; }
 
         /// <summary>
-        /// The {@code termInfosIndexDivisor} to use, if
-        ///  appropriate (not all <seealso cref="PostingsFormat"/>s support
-        ///  it; in particular the current default does not).
+        /// The <c>termInfosIndexDivisor</c> to use, if
+        /// appropriate (not all <see cref="PostingsFormat"/>s support
+        /// it; in particular the current default does not).
         ///
-        /// <p>  NOTE: if this is &lt; 0, that means "defer terms index
-        ///  load until needed".  But if the codec must load the
-        ///  terms index on init (preflex is the only once currently
-        ///  that must do so), then it should negate this value to
-        ///  get the app's terms divisor
+        /// <para/>  NOTE: if this is &lt; 0, that means "defer terms index
+        /// load until needed".  But if the codec must load the
+        /// terms index on init (preflex is the only once currently
+        /// that must do so), then it should negate this value to
+        /// get the app's terms divisor
         /// </summary>
         public int TermsIndexDivisor { get; set; } 
 
         /// <summary>
         /// Unique suffix for any postings files read for this
-        ///  segment.  <seealso cref="PerFieldPostingsFormat"/> sets this for
-        ///  each of the postings formats it wraps.  If you create
-        ///  a new <seealso cref="PostingsFormat"/> then any files you
-        ///  write/read must be derived using this suffix (use
-        ///  <seealso cref="IndexFileNames#segmentFileName(String,String,String)"/>).
+        /// segment.  <see cref="PerFieldPostingsFormat"/> sets this for
+        /// each of the postings formats it wraps.  If you create
+        /// a new <see cref="PostingsFormat"/> then any files you
+        /// write/read must be derived using this suffix (use
+        /// <see cref="IndexFileNames.SegmentFileName(string, string, string)"/>).
         /// </summary>
         public string SegmentSuffix { get; private set; }
 
         /// <summary>
-        /// Create a {@code SegmentReadState}. </summary>
+        /// Create a <see cref="SegmentReadState"/>. </summary>
         public SegmentReadState(Directory dir, SegmentInfo info, FieldInfos fieldInfos, IOContext context, int termsIndexDivisor)
             : this(dir, info, fieldInfos, context, termsIndexDivisor, "")
         {
         }
 
         /// <summary>
-        /// Create a {@code SegmentReadState}. </summary>
+        /// Create a <see cref="SegmentReadState"/>. </summary>
         public SegmentReadState(Directory dir, SegmentInfo info, FieldInfos fieldInfos, IOContext context, int termsIndexDivisor, string segmentSuffix)
         {
             this.Directory = dir;
@@ -96,7 +97,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create a {@code SegmentReadState}. </summary>
+        /// Create a <see cref="SegmentReadState"/>. </summary>
         public SegmentReadState(SegmentReadState other, string newSegmentSuffix)
         {
             this.Directory = other.Directory;


[5/9] lucenenet git commit: SWEEP: Lucene.Net.Index: Fixed up documentation comments for types starting with M-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentReader.cs b/src/Lucene.Net/Index/SegmentReader.cs
index a795209..c5baf10 100644
--- a/src/Lucene.Net/Index/SegmentReader.cs
+++ b/src/Lucene.Net/Index/SegmentReader.cs
@@ -36,10 +36,11 @@ namespace Lucene.Net.Index
     using TermVectorsReader = Lucene.Net.Codecs.TermVectorsReader;
 
     /// <summary>
-    /// IndexReader implementation over a single segment.
-    /// <p>
+    /// <see cref="IndexReader"/> implementation over a single segment.
+    /// <para/>
     /// Instances pointing to the same segment (but with different deletes, etc)
     /// may share the same core data.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -94,9 +95,9 @@ namespace Lucene.Net.Index
         private readonly IList<long?> dvGens = new List<long?>();
 
         /// <summary>
-        /// Constructs a new SegmentReader with a new core. </summary>
+        /// Constructs a new <see cref="SegmentReader"/> with a new core. </summary>
         /// <exception cref="CorruptIndexException"> if the index is corrupt </exception>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         // TODO: why is this public?
         public SegmentReader(SegmentCommitInfo si, int termInfosIndexDivisor, IOContext context)
         {
@@ -149,9 +150,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create new SegmentReader sharing core from a previous
-        ///  SegmentReader and loading new live docs from a new
-        ///  deletes file.  Used by openIfChanged.
+        /// Create new <see cref="SegmentReader"/> sharing core from a previous
+        /// <see cref="SegmentReader"/> and loading new live docs from a new
+        /// deletes file. Used by <see cref="DirectoryReader.OpenIfChanged(DirectoryReader)"/>.
         /// </summary>
         internal SegmentReader(SegmentCommitInfo si, SegmentReader sr)
             : this(si, sr, si.Info.Codec.LiveDocsFormat.ReadLiveDocs(si.Info.Dir, si, IOContext.READ_ONCE), si.Info.DocCount - si.DelCount)
@@ -159,10 +160,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create new SegmentReader sharing core from a previous
-        ///  SegmentReader and using the provided in-memory
-        ///  liveDocs.  Used by IndexWriter to provide a new NRT
-        ///  reader
+        /// Create new <see cref="SegmentReader"/> sharing core from a previous
+        /// <see cref="SegmentReader"/> and using the provided in-memory
+        /// liveDocs.  Used by <see cref="IndexWriter"/> to provide a new NRT
+        /// reader
         /// </summary>
         internal SegmentReader(SegmentCommitInfo si, SegmentReader sr, IBits liveDocs, int numDocs)
         {
@@ -231,8 +232,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Reads the most recent <seealso cref="FieldInfos"/> of the given segment info.
-        ///
+        /// Reads the most recent <see cref="Index.FieldInfos"/> of the given segment info.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         internal static FieldInfos ReadFieldInfos(SegmentCommitInfo info)
@@ -329,9 +330,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: retrieve thread-private {@link
-        ///  StoredFieldsReader}
-        ///  @lucene.internal
+        /// Expert: retrieve thread-private 
+        /// <see cref="StoredFieldsReader"/>
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         public StoredFieldsReader FieldsReader
         {
@@ -376,9 +378,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: retrieve thread-private {@link
-        ///  TermVectorsReader}
-        ///  @lucene.internal
+        /// Expert: retrieve thread-private
+        /// <see cref="Codecs.TermVectorsReader"/>
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         public TermVectorsReader TermVectorsReader
         {
@@ -427,7 +430,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return the SegmentInfoPerCommit of the segment this reader is reading.
+        /// Return the <see cref="SegmentCommitInfo"/> of the segment this reader is reading.
         /// </summary>
         public SegmentCommitInfo SegmentInfo
         {
@@ -474,7 +477,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns term infos index divisor originally passed to
-        ///  <seealso cref="#SegmentReader(SegmentCommitInfo, int, IOContext)"/>.
+        /// <see cref="SegmentReader(SegmentCommitInfo, int, IOContext)"/>.
         /// </summary>
         public int TermInfosIndexDivisor
         {
@@ -661,29 +664,29 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Called when the shared core for this SegmentReader
-        /// is closed.
-        /// <p>
-        /// this listener is called only once all SegmentReaders
-        /// sharing the same core are closed.  At this point it
+        /// Called when the shared core for this <see cref="SegmentReader"/>
+        /// is disposed.
+        /// <para>
+        /// This listener is called only once all <see cref="SegmentReader"/>s
+        /// sharing the same core are disposed.  At this point it
         /// is safe for apps to evict this reader from any caches
-        /// keyed on <seealso cref="#getCoreCacheKey"/>.  this is the same
-        /// interface that <seealso cref="IFieldCache"/> uses, internally,
-        /// to evict entries.</p>
+        /// keyed on <see cref="CoreCacheKey"/>.  This is the same
+        /// interface that <see cref="Search.IFieldCache"/> uses, internally,
+        /// to evict entries.</para>
         ///
         /// @lucene.experimental
         /// </summary>
-        public interface ICoreClosedListener
+        public interface ICoreClosedListener // LUCENENET TODO: API Rename ICoreDisposedListener
         {
             /// <summary>
-            /// Invoked when the shared core of the original {@code
-            ///  SegmentReader} has closed.
+            /// Invoked when the shared core of the original 
+            /// <see cref="SegmentReader"/> has disposed.
             /// </summary>
-            void OnClose(object ownerCoreCacheKey);
+            void OnClose(object ownerCoreCacheKey); // LUCENENET TODO: API Rename OnDispose()
         }
 
         /// <summary>
-        /// Expert: adds a CoreClosedListener to this reader's shared core </summary>
+        /// Expert: adds a <see cref="ICoreClosedListener"/> to this reader's shared core </summary>
         public void AddCoreClosedListener(ICoreClosedListener listener)
         {
             EnsureOpen();
@@ -691,7 +694,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: removes a CoreClosedListener from this reader's shared core </summary>
+        /// Expert: removes a <see cref="ICoreClosedListener"/> from this reader's shared core </summary>
         public void RemoveCoreClosedListener(ICoreClosedListener listener)
         {
             EnsureOpen();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SegmentWriteState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SegmentWriteState.cs b/src/Lucene.Net/Index/SegmentWriteState.cs
index 5a8ae5b..0d2f091 100644
--- a/src/Lucene.Net/Index/SegmentWriteState.cs
+++ b/src/Lucene.Net/Index/SegmentWriteState.cs
@@ -28,6 +28,7 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Holder class for common parameters used during write.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -36,33 +37,33 @@ namespace Lucene.Net.Index
     public class SegmentWriteState
     {
         /// <summary>
-        /// <seealso cref="InfoStream"/> used for debugging messages. </summary>
+        /// <see cref="Util.InfoStream"/> used for debugging messages. </summary>
         public InfoStream InfoStream { get; private set; }
 
         /// <summary>
-        /// <seealso cref="Directory"/> where this segment will be written
-        ///  to.
+        /// <see cref="Store.Directory"/> where this segment will be written
+        /// to.
         /// </summary>
         public Directory Directory { get; private set; }
 
         /// <summary>
-        /// <seealso cref="SegmentInfo"/> describing this segment. </summary>
+        /// <see cref="Index.SegmentInfo"/> describing this segment. </summary>
         public SegmentInfo SegmentInfo { get; private set; }
 
         /// <summary>
-        /// <seealso cref="FieldInfos"/> describing all fields in this
-        ///  segment.
+        /// <see cref="Index.FieldInfos"/> describing all fields in this
+        /// segment.
         /// </summary>
         public FieldInfos FieldInfos { get; private set; }
 
         /// <summary>
         /// Number of deleted documents set while flushing the
-        ///  segment.
+        /// segment.
         /// </summary>
         public int DelCountOnFlush { get; set; }
 
         /// <summary>
-        /// Deletes and updates to apply while we are flushing the segment. A Term is
+        /// Deletes and updates to apply while we are flushing the segment. A <see cref="Term"/> is
         /// enrolled in here if it was deleted/updated at one point, and it's mapped to
         /// the docIDUpto, meaning any docID &lt; docIDUpto containing this term should
         /// be deleted/updated.
@@ -70,18 +71,18 @@ namespace Lucene.Net.Index
         public BufferedUpdates SegUpdates { get; private set; }
 
         /// <summary>
-        /// <seealso cref="IMutableBits"/> recording live documents; this is
-        ///  only set if there is one or more deleted documents.
+        /// <see cref="IMutableBits"/> recording live documents; this is
+        /// only set if there is one or more deleted documents.
         /// </summary>
         public IMutableBits LiveDocs { get; set; }
 
         /// <summary>
         /// Unique suffix for any postings files written for this
-        ///  segment.  <seealso cref="PerFieldPostingsFormat"/> sets this for
-        ///  each of the postings formats it wraps.  If you create
-        ///  a new <seealso cref="PostingsFormat"/> then any files you
-        ///  write/read must be derived using this suffix (use
-        ///  <seealso cref="IndexFileNames#segmentFileName(String,String,String)"/>).
+        /// segment.  <see cref="PerFieldPostingsFormat"/> sets this for
+        /// each of the postings formats it wraps.  If you create
+        /// a new <see cref="PostingsFormat"/> then any files you
+        /// write/read must be derived using this suffix (use
+        /// <see cref="IndexFileNames.SegmentFileName(string,string,string)"/>).
         /// </summary>
         public string SegmentSuffix { get; private set; }
 
@@ -95,8 +96,8 @@ namespace Lucene.Net.Index
         public int TermIndexInterval { get; set; } // TODO: this should be private to the codec, not settable here or in IWC
 
         /// <summary>
-        /// <seealso cref="IOContext"/> for all writes; you should pass this
-        ///  to <seealso cref="Directory#createOutput(String,IOContext)"/>.
+        /// <see cref="IOContext"/> for all writes; you should pass this
+        /// to <see cref="Directory.CreateOutput(string, IOContext)"/>.
         /// </summary>
         public IOContext Context { get; private set; }
 
@@ -110,8 +111,8 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Constructor which takes segment suffix.
         /// </summary>
-        /// <seealso cref= #SegmentWriteState(InfoStream, Directory, SegmentInfo, FieldInfos, int,
-        ///      BufferedUpdates, IOContext) </seealso>
+        /// <seealso cref="SegmentWriteState(InfoStream, Directory, SegmentInfo, FieldInfos, int,
+        ///      BufferedUpdates, IOContext)"/>
         public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, int termIndexInterval, BufferedUpdates segUpdates, IOContext context, string segmentSuffix)
         {
             this.InfoStream = infoStream;
@@ -125,7 +126,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create a shallow copy of <seealso cref="SegmentWriteState"/> with a new segment suffix. </summary>
+        /// Create a shallow copy of <see cref="SegmentWriteState"/> with a new segment suffix. </summary>
         public SegmentWriteState(SegmentWriteState state, string segmentSuffix)
         {
             InfoStream = state.InfoStream;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SerialMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SerialMergeScheduler.cs b/src/Lucene.Net/Index/SerialMergeScheduler.cs
index c150594..14dea0e 100644
--- a/src/Lucene.Net/Index/SerialMergeScheduler.cs
+++ b/src/Lucene.Net/Index/SerialMergeScheduler.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// A <seealso cref="MergeScheduler"/> that simply does each merge
-    ///  sequentially, using the current thread.
+    /// A <see cref="MergeScheduler"/> that simply does each merge
+    /// sequentially, using the current thread.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SimpleMergedSegmentWarmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SimpleMergedSegmentWarmer.cs b/src/Lucene.Net/Index/SimpleMergedSegmentWarmer.cs
index 1090df8..26989ce 100644
--- a/src/Lucene.Net/Index/SimpleMergedSegmentWarmer.cs
+++ b/src/Lucene.Net/Index/SimpleMergedSegmentWarmer.cs
@@ -35,8 +35,8 @@ namespace Lucene.Net.Index
         private readonly InfoStream infoStream;
 
         /// <summary>
-        /// Creates a new SimpleMergedSegmentWarmer </summary>
-        /// <param name="infoStream"> InfoStream to log statistics about warming. </param>
+        /// Creates a new <see cref="SimpleMergedSegmentWarmer"/> </summary>
+        /// <param name="infoStream"> <see cref="InfoStream"/> to log statistics about warming. </param>
         public SimpleMergedSegmentWarmer(InfoStream infoStream)
         {
             this.infoStream = infoStream;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SingleTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SingleTermsEnum.cs b/src/Lucene.Net/Index/SingleTermsEnum.cs
index 7f35e75..bb89193 100644
--- a/src/Lucene.Net/Index/SingleTermsEnum.cs
+++ b/src/Lucene.Net/Index/SingleTermsEnum.cs
@@ -23,11 +23,11 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Subclass of FilteredTermsEnum for enumerating a single term.
+    /// Subclass of <see cref="FilteredTermsEnum"/> for enumerating a single term.
     /// <para/>
     /// For example, this can be used by <see cref="Search.MultiTermQuery"/>s
     /// that need only visit one term, but want to preserve
-    /// MultiTermQuery semantics such as <see cref="Search.MultiTermQuery.MultiTermRewriteMethod"/>.
+    /// <see cref="Search.MultiTermQuery"/> semantics such as <see cref="Search.MultiTermQuery.MultiTermRewriteMethod"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -37,8 +37,8 @@ namespace Lucene.Net.Index
         private readonly BytesRef singleRef;
 
         /// <summary>
-        /// Creates a new <code>SingleTermsEnum</code>.
-        /// <p>
+        /// Creates a new <see cref="SingleTermsEnum"/>.
+        /// <para/>
         /// After calling the constructor the enumeration is already pointing to the term,
         /// if it exists.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SingletonSortedSetDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SingletonSortedSetDocValues.cs b/src/Lucene.Net/Index/SingletonSortedSetDocValues.cs
index cb829fb..22cc8c4 100644
--- a/src/Lucene.Net/Index/SingletonSortedSetDocValues.cs
+++ b/src/Lucene.Net/Index/SingletonSortedSetDocValues.cs
@@ -24,9 +24,9 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Exposes multi-valued view over a single-valued instance.
-    /// <p>
-    /// this can be used if you want to have one multi-valued implementation
-    /// against e.g. FieldCache.getDocTermOrds that also works for single-valued
+    /// <para/>
+    /// This can be used if you want to have one multi-valued implementation
+    /// against e.g. <see cref="Search.IFieldCache.GetDocTermOrds(AtomicReader, string)"/> that also works for single-valued
     /// fields.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -39,7 +39,7 @@ namespace Lucene.Net.Index
         private bool set;
 
         /// <summary>
-        /// Creates a multi-valued view over the provided SortedDocValues </summary>
+        /// Creates a multi-valued view over the provided <see cref="Index.SortedDocValues"/> </summary>
         public SingletonSortedSetDocValues(SortedDocValues @in)
         {
             this.@in = @in;
@@ -47,7 +47,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Return the wrapped <seealso cref="SortedDocValues"/> </summary>
+        /// Return the wrapped <see cref="Index.SortedDocValues"/> </summary>
         public SortedDocValues SortedDocValues
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SlowCompositeReaderWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SlowCompositeReaderWrapper.cs b/src/Lucene.Net/Index/SlowCompositeReaderWrapper.cs
index 8d80e63..7be60e5 100644
--- a/src/Lucene.Net/Index/SlowCompositeReaderWrapper.cs
+++ b/src/Lucene.Net/Index/SlowCompositeReaderWrapper.cs
@@ -27,17 +27,17 @@ namespace Lucene.Net.Index
     using OrdinalMap = Lucene.Net.Index.MultiDocValues.OrdinalMap;
 
     /// <summary>
-    /// this class forces a composite reader (eg a {@link
-    /// MultiReader} or <seealso cref="DirectoryReader"/>) to emulate an
-    /// atomic reader.  this requires implementing the postings
-    /// APIs on-the-fly, using the static methods in {@link
-    /// MultiFields}, <seealso cref="MultiDocValues"/>, by stepping through
+    /// This class forces a composite reader (eg a 
+    /// <see cref="MultiReader"/> or <see cref="DirectoryReader"/>) to emulate an
+    /// atomic reader.  This requires implementing the postings
+    /// APIs on-the-fly, using the static methods in 
+    /// <see cref="MultiFields"/>, <see cref="MultiDocValues"/>, by stepping through
     /// the sub-readers to merge fields/terms, appending docs, etc.
     ///
-    /// <p><b>NOTE</b>: this class almost always results in a
+    /// <para/><b>NOTE</b>: This class almost always results in a
     /// performance hit.  If this is important to your use case,
     /// you'll get better performance by gathering the sub readers using
-    /// <seealso cref="IndexReader#getContext()"/> to get the
+    /// <see cref="IndexReader.Context"/> to get the
     /// atomic leaves and then operate per-AtomicReader,
     /// instead of using this class.
     /// </summary>
@@ -51,8 +51,8 @@ namespace Lucene.Net.Index
         private readonly IBits liveDocs;
 
         /// <summary>
-        /// this method is sugar for getting an <seealso cref="AtomicReader"/> from
-        /// an <seealso cref="IndexReader"/> of any kind. If the reader is already atomic,
+        /// This method is sugar for getting an <see cref="AtomicReader"/> from
+        /// an <see cref="IndexReader"/> of any kind. If the reader is already atomic,
         /// it is returned unchanged, otherwise wrapped by this class.
         /// </summary>
         public static AtomicReader Wrap(IndexReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs b/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
index 0cd7e80..8015092 100644
--- a/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
+++ b/src/Lucene.Net/Index/SnapshotDeletionPolicy.cs
@@ -24,20 +24,20 @@ namespace Lucene.Net.Index
     using Directory = Lucene.Net.Store.Directory;
 
     /// <summary>
-    /// An <seealso cref="IndexDeletionPolicy"/> that wraps any other
-    /// <seealso cref="IndexDeletionPolicy"/> and adds the ability to hold and later release
-    /// snapshots of an index. While a snapshot is held, the <seealso cref="IndexWriter"/> will
+    /// An <see cref="IndexDeletionPolicy"/> that wraps any other
+    /// <see cref="IndexDeletionPolicy"/> and adds the ability to hold and later release
+    /// snapshots of an index. While a snapshot is held, the <see cref="IndexWriter"/> will
     /// not remove any files associated with it even if the index is otherwise being
     /// actively, arbitrarily changed. Because we wrap another arbitrary
-    /// <seealso cref="IndexDeletionPolicy"/>, this gives you the freedom to continue using
-    /// whatever <seealso cref="IndexDeletionPolicy"/> you would normally want to use with your
+    /// <see cref="IndexDeletionPolicy"/>, this gives you the freedom to continue using
+    /// whatever <see cref="IndexDeletionPolicy"/> you would normally want to use with your
     /// index.
     ///
-    /// <p>
-    /// this class maintains all snapshots in-memory, and so the information is not
+    /// <para/>
+    /// This class maintains all snapshots in-memory, and so the information is not
     /// persisted and not protected against system failures. If persistence is
-    /// important, you can use <seealso cref="PersistentSnapshotDeletionPolicy"/>.
-    ///
+    /// important, you can use <see cref="PersistentSnapshotDeletionPolicy"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -47,20 +47,20 @@ namespace Lucene.Net.Index
     {
         /// <summary>
         /// Records how many snapshots are held against each
-        ///  commit generation
+        /// commit generation
         /// </summary>
         protected IDictionary<long, int> m_refCounts = new Dictionary<long, int>();
 
         /// <summary>
-        /// Used to map gen to IndexCommit. </summary>
+        /// Used to map gen to <see cref="IndexCommit"/>. </summary>
         protected IDictionary<long?, IndexCommit> m_indexCommits = new Dictionary<long?, IndexCommit>();
 
         /// <summary>
-        /// Wrapped <seealso cref="IndexDeletionPolicy"/> </summary>
+        /// Wrapped <see cref="IndexDeletionPolicy"/> </summary>
         private IndexDeletionPolicy primary;
 
         /// <summary>
-        /// Most recently committed <seealso cref="IndexCommit"/>. </summary>
+        /// Most recently committed <see cref="IndexCommit"/>. </summary>
         protected IndexCommit m_lastCommit;
 
         /// <summary>
@@ -68,8 +68,8 @@ namespace Lucene.Net.Index
         private bool initCalled;
 
         /// <summary>
-        /// Sole constructor, taking the incoming {@link
-        ///  IndexDeletionPolicy} to wrap.
+        /// Sole constructor, taking the incoming 
+        /// <see cref="IndexDeletionPolicy"/> to wrap.
         /// </summary>
         public SnapshotDeletionPolicy(IndexDeletionPolicy primary)
         {
@@ -109,7 +109,7 @@ namespace Lucene.Net.Index
         /// Release a snapshotted commit.
         /// </summary>
         /// <param name="commit">
-        ///          the commit previously returned by <seealso cref="#snapshot"/> </param>
+        ///          the commit previously returned by <see cref="Snapshot()"/> </param>
         public virtual void Release(IndexCommit commit)
         {
             lock (this)
@@ -147,7 +147,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Increments the refCount for this <seealso cref="IndexCommit"/>. </summary>
+        /// Increments the refCount for this <see cref="IndexCommit"/>. </summary>
         protected internal virtual void IncRef(IndexCommit ic)
         {
             lock (this)
@@ -170,20 +170,20 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Snapshots the last commit and returns it. Once a commit is 'snapshotted,' it is protected
-        /// from deletion (as long as this <seealso cref="IndexDeletionPolicy"/> is used). The
-        /// snapshot can be removed by calling <seealso cref="#release(IndexCommit)"/> followed
-        /// by a call to <seealso cref="IndexWriter#deleteUnusedFiles()"/>.
+        /// from deletion (as long as this <see cref="IndexDeletionPolicy"/> is used). The
+        /// snapshot can be removed by calling <see cref="Release(IndexCommit)"/> followed
+        /// by a call to <see cref="IndexWriter.DeleteUnusedFiles()"/>.
         ///
-        /// <p>
+        /// <para/>
         /// <b>NOTE:</b> while the snapshot is held, the files it references will not
         /// be deleted, which will consume additional disk space in your index. If you
         /// take a snapshot at a particularly bad time (say just before you call
-        /// forceMerge) then in the worst case this could consume an extra 1X of your
+        /// <see cref="IndexWriter.ForceMerge(int)"/>) then in the worst case this could consume an extra 1X of your
         /// total index size, until you release the snapshot.
         /// </summary>
-        /// <exception cref="IllegalStateException">
+        /// <exception cref="InvalidOperationException">
         ///           if this index does not have any commits yet </exception>
-        /// <returns> the <seealso cref="IndexCommit"/> that was snapshotted. </returns>
+        /// <returns> the <see cref="IndexCommit"/> that was snapshotted. </returns>
         public virtual IndexCommit Snapshot()
         {
             lock (this)
@@ -205,7 +205,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns all IndexCommits held by at least one snapshot. </summary>
+        /// Returns all <see cref="IndexCommit"/>s held by at least one snapshot. </summary>
         public virtual IList<IndexCommit> GetSnapshots()
         {
             lock (this)
@@ -234,9 +234,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Retrieve an <seealso cref="IndexCommit"/> from its generation;
-        ///  returns null if this IndexCommit is not currently
-        ///  snapshotted
+        /// Retrieve an <see cref="IndexCommit"/> from its generation;
+        /// returns <c>null</c> if this <see cref="IndexCommit"/> is not currently
+        /// snapshotted
         /// </summary>
         public virtual IndexCommit GetIndexCommit(long gen)
         {
@@ -260,8 +260,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Wraps each <seealso cref="IndexCommit"/> as a {@link
-        ///  SnapshotCommitPoint}.
+        /// Wraps each <see cref="IndexCommit"/> as a 
+        /// <see cref="SnapshotCommitPoint"/>.
         /// </summary>
         private IList<IndexCommit> WrapCommits<T>(IList<T> commits)
             where T : IndexCommit
@@ -275,20 +275,20 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Wraps a provided <seealso cref="IndexCommit"/> and prevents it
-        ///  from being deleted.
+        /// Wraps a provided <see cref="IndexCommit"/> and prevents it
+        /// from being deleted.
         /// </summary>
         private class SnapshotCommitPoint : IndexCommit
         {
             private readonly SnapshotDeletionPolicy outerInstance;
 
             /// <summary>
-            /// The <seealso cref="IndexCommit"/> we are preventing from deletion. </summary>
+            /// The <see cref="IndexCommit"/> we are preventing from deletion. </summary>
             protected IndexCommit m_cp;
 
             /// <summary>
-            /// Creates a {@code SnapshotCommitPoint} wrapping the provided
-            ///  <seealso cref="IndexCommit"/>.
+            /// Creates a <see cref="SnapshotCommitPoint"/> wrapping the provided
+            /// <see cref="IndexCommit"/>.
             /// </summary>
             protected internal SnapshotCommitPoint(SnapshotDeletionPolicy outerInstance, IndexCommit cp)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedDocValues.cs b/src/Lucene.Net/Index/SortedDocValues.cs
index 0ed7556..99417d8 100644
--- a/src/Lucene.Net/Index/SortedDocValues.cs
+++ b/src/Lucene.Net/Index/SortedDocValues.cs
@@ -22,9 +22,9 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// A per-document byte[] with presorted values.
-    /// <p>
-    /// Per-Document values in a SortedDocValues are deduplicated, dereferenced,
+    /// A per-document <see cref="T:byte[]"/> with presorted values.
+    /// <para/>
+    /// Per-Document values in a <see cref="SortedDocValues"/> are deduplicated, dereferenced,
     /// and sorted into a dictionary of unique values. A pointer to the
     /// dictionary value (ordinal) can be retrieved for each document. Ordinals
     /// are dense and in increasing sorted order.
@@ -52,14 +52,14 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Retrieves the value for the specified ordinal. </summary>
-        /// <param name="ord"> ordinal to lookup (must be &gt;= 0 and &lt <seealso cref="#getValueCount()"/>) </param>
+        /// <param name="ord"> ordinal to lookup (must be &gt;= 0 and &lt; <see cref="ValueCount"/>) </param>
         /// <param name="result"> will be populated with the ordinal's value </param>
-        /// <seealso cref= #getOrd(int)  </seealso>
+        /// <seealso cref="GetOrd(int)"/>
         public abstract void LookupOrd(int ord, BytesRef result);
 
         /// <summary>
         /// Returns the number of unique values. </summary>
-        /// <returns> number of unique values in this SortedDocValues. this is
+        /// <returns> Number of unique values in this <see cref="SortedDocValues"/>. This is
         ///         also equivalent to one plus the maximum ordinal. </returns>
         public abstract int ValueCount { get; }
 
@@ -79,12 +79,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// If {@code key} exists, returns its ordinal, else
-        ///  returns {@code -insertionPoint-1}, like {@code
-        ///  Arrays.binarySearch}.
+        /// If <paramref name="key"/> exists, returns its ordinal, else
+        /// returns <c>-insertionPoint-1</c>, like 
+        /// <see cref="Array.BinarySearch(Array, int, int, object)"/>
         /// </summary>
-        ///  <param name="key"> Key to look up
-        ///  </param>
+        /// <param name="key"> Key to look up</param>
         public virtual int LookupTerm(BytesRef key)
         {
             BytesRef spare = new BytesRef();
@@ -115,8 +114,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a <seealso cref="TermsEnum"/> over the values.
-        /// The enum supports <seealso cref="TermsEnum#ord()"/> and <seealso cref="TermsEnum#seekExact(long)"/>.
+        /// Returns a <see cref="TermsEnum"/> over the values.
+        /// The enum supports <see cref="TermsEnum.Ord"/> and <see cref="TermsEnum.SeekExact(long)"/>.
         /// </summary>
         public virtual TermsEnum GetTermsEnum()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedDocValuesTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedDocValuesTermsEnum.cs b/src/Lucene.Net/Index/SortedDocValuesTermsEnum.cs
index d9ff7ef..8ea7576 100644
--- a/src/Lucene.Net/Index/SortedDocValuesTermsEnum.cs
+++ b/src/Lucene.Net/Index/SortedDocValuesTermsEnum.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Implements a <seealso cref="TermsEnum"/> wrapping a provided
-    /// <seealso cref="SortedDocValues"/>.
+    /// Implements a <see cref="TermsEnum"/> wrapping a provided
+    /// <see cref="SortedDocValues"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -38,7 +38,7 @@ namespace Lucene.Net.Index
         private readonly BytesRef term = new BytesRef();
 
         /// <summary>
-        /// Creates a new TermsEnum over the provided values </summary>
+        /// Creates a new <see cref="TermsEnum"/> over the provided values </summary>
         public SortedDocValuesTermsEnum(SortedDocValues values)
         {
             this.values = values;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedDocValuesWriter.cs b/src/Lucene.Net/Index/SortedDocValuesWriter.cs
index fd5ae82..b97c2c4 100644
--- a/src/Lucene.Net/Index/SortedDocValuesWriter.cs
+++ b/src/Lucene.Net/Index/SortedDocValuesWriter.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Buffers up pending byte[] per doc, deref and sorting via
-    ///  int ord, then flushes when segment flushes.
+    /// Buffers up pending <see cref="T:byte[]"/> per doc, deref and sorting via
+    /// int ord, then flushes when segment flushes.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -154,143 +154,5 @@ namespace Lucene.Net.Index
                 yield return ord == -1 ? ord : ordMap[ord];
             }
         }
-
-        /*
-	  private class IterableAnonymousInnerClassHelper : IEnumerable<BytesRef>
-	  {
-		  private readonly SortedDocValuesWriter OuterInstance;
-
-		  private int ValueCount;
-		  private int[] SortedValues;
-
-		  public IterableAnonymousInnerClassHelper(SortedDocValuesWriter outerInstance, int valueCount, int[] sortedValues)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.ValueCount = valueCount;
-			  this.SortedValues = sortedValues;
-		  }
-
-									// ord -> value
-		  public virtual IEnumerator<BytesRef> GetEnumerator()
-		  {
-			return new ValuesIterator(OuterInstance, SortedValues, ValueCount);
-		  }
-	  }
-
-	  private class IterableAnonymousInnerClassHelper2 : IEnumerable<Number>
-	  {
-		  private readonly SortedDocValuesWriter OuterInstance;
-
-		  private int MaxDoc;
-		  private int[] OrdMap;
-
-		  public IterableAnonymousInnerClassHelper2(SortedDocValuesWriter outerInstance, int maxDoc, int[] ordMap)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.MaxDoc = maxDoc;
-			  this.OrdMap = ordMap;
-		  }
-
-		  public virtual IEnumerator<Number> GetEnumerator()
-		  {
-			return new OrdsIterator(OuterInstance, OrdMap, MaxDoc);
-		  }
-	  }
-
-	  public override void Abort()
-	  {
-	  }
-
-	  // iterates over the unique values we have in ram
-	  private class ValuesIterator : IEnumerator<BytesRef>
-	  {
-		  private readonly SortedDocValuesWriter OuterInstance;
-
-		internal readonly int[] SortedValues;
-		internal readonly BytesRef Scratch = new BytesRef();
-		internal readonly int ValueCount;
-		internal int OrdUpto;
-
-		internal ValuesIterator(SortedDocValuesWriter outerInstance, int[] sortedValues, int valueCount)
-		{
-			this.OuterInstance = outerInstance;
-		  this.SortedValues = sortedValues;
-		  this.ValueCount = valueCount;
-		}
-
-		public override bool HasNext()
-		{
-		  return OrdUpto < ValueCount;
-		}
-
-		public override BytesRef Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new Exception();
-		  }
-		  OuterInstance.Hash.Get(SortedValues[OrdUpto], Scratch);
-		  OrdUpto++;
-		  return Scratch;
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }
-
-	  // iterates over the ords for each doc we have in ram
-	  private class OrdsIterator : IEnumerator<Number>
-	  {
-		  internal bool InstanceFieldsInitialized = false;
-
-		  internal virtual void InitializeInstanceFields()
-		  {
-			  Iter = OuterInstance.Pending.Iterator();
-		  }
-
-		  private readonly SortedDocValuesWriter OuterInstance;
-
-		internal AppendingDeltaPackedLongBuffer.Iterator Iter;
-		internal readonly int[] OrdMap;
-		internal readonly int MaxDoc;
-		internal int DocUpto;
-
-		internal OrdsIterator(SortedDocValuesWriter outerInstance, int[] ordMap, int maxDoc)
-		{
-			this.OuterInstance = outerInstance;
-
-			if (!InstanceFieldsInitialized)
-			{
-				InitializeInstanceFields();
-				InstanceFieldsInitialized = true;
-			}
-		  this.OrdMap = ordMap;
-		  this.MaxDoc = maxDoc;
-		  Debug.Assert(outerInstance.Pending.Size() == maxDoc);
-		}
-
-		public override bool HasNext()
-		{
-		  return DocUpto < MaxDoc;
-		}
-
-		public override Number Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new Exception();
-		  }
-		  int ord = (int) Iter.next();
-		  DocUpto++;
-		  return ord == -1 ? ord : OrdMap[ord];
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }*/
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedSetDocValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedSetDocValues.cs b/src/Lucene.Net/Index/SortedSetDocValues.cs
index e7844d0..f07d218 100644
--- a/src/Lucene.Net/Index/SortedSetDocValues.cs
+++ b/src/Lucene.Net/Index/SortedSetDocValues.cs
@@ -22,9 +22,9 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// A per-document set of presorted byte[] values.
-    /// <p>
-    /// Per-Document values in a SortedDocValues are deduplicated, dereferenced,
+    /// A per-document set of presorted <see cref="T:byte[]"/> values.
+    /// <para/>
+    /// Per-Document values in a <see cref="SortedDocValues"/> are deduplicated, dereferenced,
     /// and sorted into a dictionary of unique values. A pointer to the
     /// dictionary value (ordinal) can be retrieved for each document. Ordinals
     /// are dense and in increasing sorted order.
@@ -43,15 +43,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// When returned by <seealso cref="#nextOrd()"/> it means there are no more
+        /// When returned by <see cref="NextOrd()"/> it means there are no more
         /// ordinals for the document.
         /// </summary>
         public static readonly long NO_MORE_ORDS = -1;
 
         /// <summary>
         /// Returns the next ordinal for the current document (previously
-        /// set by <seealso cref="#setDocument(int)"/>. </summary>
-        /// <returns> next ordinal for the document, or <seealso cref="#NO_MORE_ORDS"/>.
+        /// set by <see cref="SetDocument(int)"/>. </summary>
+        /// <returns> Next ordinal for the document, or <see cref="NO_MORE_ORDS"/>.
         ///         ordinals are dense, start at 0, then increment by 1 for
         ///         the next value in sorted order.  </returns>
         public abstract long NextOrd();
@@ -65,22 +65,21 @@ namespace Lucene.Net.Index
         /// Retrieves the value for the specified ordinal. </summary>
         /// <param name="ord"> ordinal to lookup </param>
         /// <param name="result"> will be populated with the ordinal's value </param>
-        /// <seealso cref= #nextOrd </seealso>
+        /// <seealso cref="NextOrd()"/>
         public abstract void LookupOrd(long ord, BytesRef result);
 
         /// <summary>
         /// Returns the number of unique values. </summary>
-        /// <returns> number of unique values in this SortedDocValues. this is
+        /// <returns> Number of unique values in this <see cref="SortedDocValues"/>. This is
         ///         also equivalent to one plus the maximum ordinal. </returns>
         public abstract long ValueCount { get; }
 
         /// <summary>
-        /// If {@code key} exists, returns its ordinal, else
-        ///  returns {@code -insertionPoint-1}, like {@code
-        ///  Arrays.binarySearch}.
+        /// If <paramref name="key"/> exists, returns its ordinal, else
+        /// returns <c>-insertionPoint-1</c>, like
+        /// <see cref="Array.BinarySearch(Array, int, int, object)"/>.
         /// </summary>
-        ///  <param name="key"> Key to look up
-        ///  </param>
+        /// <param name="key"> Key to look up</param>
         public virtual long LookupTerm(BytesRef key)
         {
             BytesRef spare = new BytesRef();
@@ -111,8 +110,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns a <seealso cref="TermsEnum"/> over the values.
-        /// The enum supports <seealso cref="TermsEnum#ord()"/> and <seealso cref="TermsEnum#seekExact(long)"/>.
+        /// Returns a <see cref="TermsEnum"/> over the values.
+        /// The enum supports <see cref="TermsEnum.Ord"/> and <see cref="TermsEnum.SeekExact(long)"/>.
         /// </summary>
         public virtual TermsEnum GetTermsEnum()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedSetDocValuesTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedSetDocValuesTermsEnum.cs b/src/Lucene.Net/Index/SortedSetDocValuesTermsEnum.cs
index c2502cb..2ad6e82 100644
--- a/src/Lucene.Net/Index/SortedSetDocValuesTermsEnum.cs
+++ b/src/Lucene.Net/Index/SortedSetDocValuesTermsEnum.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Implements a <seealso cref="TermsEnum"/> wrapping a provided
-    /// <seealso cref="SortedSetDocValues"/>.
+    /// Implements a <see cref="TermsEnum"/> wrapping a provided
+    /// <see cref="SortedSetDocValues"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -38,7 +38,7 @@ namespace Lucene.Net.Index
         private readonly BytesRef term = new BytesRef();
 
         /// <summary>
-        /// Creates a new TermsEnum over the provided values </summary>
+        /// Creates a new <see cref="TermsEnum"/> over the provided values </summary>
         public SortedSetDocValuesTermsEnum(SortedSetDocValues values)
         {
             this.values = values;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/SortedSetDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/SortedSetDocValuesWriter.cs b/src/Lucene.Net/Index/SortedSetDocValuesWriter.cs
index c7db93e..a37ebce 100644
--- a/src/Lucene.Net/Index/SortedSetDocValuesWriter.cs
+++ b/src/Lucene.Net/Index/SortedSetDocValuesWriter.cs
@@ -34,8 +34,8 @@ namespace Lucene.Net.Index
     using RamUsageEstimator = Lucene.Net.Util.RamUsageEstimator;
 
     /// <summary>
-    /// Buffers up pending byte[]s per doc, deref and sorting via
-    ///  int ord, then flushes when segment flushes.
+    /// Buffers up pending <see cref="T:byte[]"/>s per doc, deref and sorting via
+    /// int ord, then flushes when segment flushes.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -235,230 +235,5 @@ namespace Lucene.Net.Index
                 yield return ord;
             }
         }
-
-        /*
-	  private class IterableAnonymousInnerClassHelper : IEnumerable<BytesRef>
-	  {
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		  private int ValueCount;
-		  private int[] SortedValues;
-
-		  public IterableAnonymousInnerClassHelper(SortedSetDocValuesWriter outerInstance, int valueCount, int[] sortedValues)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.ValueCount = valueCount;
-			  this.SortedValues = sortedValues;
-		  }
-
-									// ord -> value
-		  public virtual IEnumerator<BytesRef> GetEnumerator()
-		  {
-			return new ValuesIterator(OuterInstance, SortedValues, ValueCount);
-		  }
-	  }
-
-	  private class IterableAnonymousInnerClassHelper2 : IEnumerable<Number>
-	  {
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		  private int MaxDoc;
-
-		  public IterableAnonymousInnerClassHelper2(SortedSetDocValuesWriter outerInstance, int maxDoc)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.MaxDoc = maxDoc;
-		  }
-
-		  public virtual IEnumerator<Number> GetEnumerator()
-		  {
-			return new OrdCountIterator(OuterInstance, MaxDoc);
-		  }
-	  }
-
-	  private class IterableAnonymousInnerClassHelper3 : IEnumerable<Number>
-	  {
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		  private int MaxCountPerDoc;
-		  private int[] OrdMap;
-
-		  public IterableAnonymousInnerClassHelper3(SortedSetDocValuesWriter outerInstance, int maxCountPerDoc, int[] ordMap)
-		  {
-			  this.OuterInstance = outerInstance;
-			  this.MaxCountPerDoc = maxCountPerDoc;
-			  this.OrdMap = ordMap;
-		  }
-
-		  public virtual IEnumerator<Number> GetEnumerator()
-		  {
-			return new OrdsIterator(OuterInstance, OrdMap, MaxCountPerDoc);
-		  }
-	  }
-
-	  public override void Abort()
-	  {
-	  }
-
-	  // iterates over the unique values we have in ram
-	  private class ValuesIterator : IEnumerator<BytesRef>
-	  {
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		internal readonly int[] SortedValues;
-		internal readonly BytesRef Scratch = new BytesRef();
-		internal readonly int ValueCount;
-		internal int OrdUpto;
-
-		internal ValuesIterator(SortedSetDocValuesWriter outerInstance, int[] sortedValues, int valueCount)
-		{
-			this.OuterInstance = outerInstance;
-		  this.SortedValues = sortedValues;
-		  this.ValueCount = valueCount;
-		}
-
-		public override bool HasNext()
-		{
-		  return OrdUpto < ValueCount;
-		}
-
-		public override BytesRef Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new Exception();
-		  }
-		  OuterInstance.Hash.Get(SortedValues[OrdUpto], Scratch);
-		  OrdUpto++;
-		  return Scratch;
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }
-
-	  // iterates over the ords for each doc we have in ram
-	  private class OrdsIterator : IEnumerator<Number>
-	  {
-		  internal bool InstanceFieldsInitialized = false;
-
-		  internal virtual void InitializeInstanceFields()
-		  {
-			  Iter = OuterInstance.Pending.Iterator();
-			  Counts = OuterInstance.PendingCounts.Iterator();
-		  }
-
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		internal AppendingPackedLongBuffer.Iterator Iter;
-		internal AppendingDeltaPackedLongBuffer.Iterator Counts;
-		internal readonly int[] OrdMap;
-		internal readonly long NumOrds;
-		internal long OrdUpto;
-
-		internal readonly int[] CurrentDoc;
-		internal int CurrentUpto;
-		internal int CurrentLength;
-
-		internal OrdsIterator(SortedSetDocValuesWriter outerInstance, int[] ordMap, int maxCount)
-		{
-			this.OuterInstance = outerInstance;
-
-			if (!InstanceFieldsInitialized)
-			{
-				InitializeInstanceFields();
-				InstanceFieldsInitialized = true;
-			}
-		  this.CurrentDoc = new int[maxCount];
-		  this.OrdMap = ordMap;
-		  this.NumOrds = outerInstance.Pending.Size();
-		}
-
-		public override bool HasNext()
-		{
-		  return OrdUpto < NumOrds;
-		}
-
-		public override Number Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new Exception();
-		  }
-		  while (CurrentUpto == CurrentLength)
-		  {
-			// refill next doc, and sort remapped ords within the doc.
-			CurrentUpto = 0;
-			CurrentLength = (int) Counts.Next();
-			for (int i = 0; i < CurrentLength; i++)
-			{
-			  CurrentDoc[i] = OrdMap[(int) Iter.Next()];
-			}
-			Array.Sort(CurrentDoc, 0, CurrentLength);
-		  }
-		  int ord = CurrentDoc[CurrentUpto];
-		  CurrentUpto++;
-		  OrdUpto++;
-		  // TODO: make reusable Number
-		  return ord;
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }
-
-	  private class OrdCountIterator : IEnumerator<Number>
-	  {
-		  internal bool InstanceFieldsInitialized = false;
-
-		  internal virtual void InitializeInstanceFields()
-		  {
-			  Iter = OuterInstance.PendingCounts.Iterator();
-		  }
-
-		  private readonly SortedSetDocValuesWriter OuterInstance;
-
-		internal AppendingDeltaPackedLongBuffer.Iterator Iter;
-		internal readonly int MaxDoc;
-		internal int DocUpto;
-
-		internal OrdCountIterator(SortedSetDocValuesWriter outerInstance, int maxDoc)
-		{
-			this.OuterInstance = outerInstance;
-
-			if (!InstanceFieldsInitialized)
-			{
-				InitializeInstanceFields();
-				InstanceFieldsInitialized = true;
-			}
-		  this.MaxDoc = maxDoc;
-		  Debug.Assert(outerInstance.PendingCounts.Size() == maxDoc);
-		}
-
-		public override bool HasNext()
-		{
-		  return DocUpto < MaxDoc;
-		}
-
-		public override Number Next()
-		{
-		  if (!HasNext())
-		  {
-			throw new Exception();
-		  }
-		  DocUpto++;
-		  // TODO: make reusable Number
-		  return Iter.Next();
-		}
-
-		public override void Remove()
-		{
-		  throw new System.NotSupportedException();
-		}
-	  }*/
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/StandardDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/StandardDirectoryReader.cs b/src/Lucene.Net/Index/StandardDirectoryReader.cs
index 2893acf..303d2ca 100644
--- a/src/Lucene.Net/Index/StandardDirectoryReader.cs
+++ b/src/Lucene.Net/Index/StandardDirectoryReader.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Index
         private readonly bool applyAllDeletes;
 
         /// <summary>
-        /// called only from static open() methods </summary>
+        /// called only from static <c>Open()</c> methods </summary>
         internal StandardDirectoryReader(Directory directory, AtomicReader[] readers, IndexWriter writer, SegmentInfos sis, int termInfosIndexDivisor, bool applyAllDeletes)
             : base(directory, readers)
         {
@@ -49,7 +49,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// called from DirectoryReader.open(...) methods </summary>
+        /// called from <c>DirectoryReader.Open(...)</c> methods </summary>
         internal static DirectoryReader Open(Directory directory, IndexCommit commit, int termInfosIndexDivisor)
         {
             return (DirectoryReader)new FindSegmentsFileAnonymousInnerClassHelper(directory, termInfosIndexDivisor).Run(commit);
@@ -171,7 +171,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// this constructor is only used for <seealso cref="#doOpenIfChanged(SegmentInfos)"/> </summary>
+        /// This constructor is only used for <see cref="DoOpenIfChanged(SegmentInfos)"/> </summary>
         private static DirectoryReader Open(Directory directory, SegmentInfos infos, IList<AtomicReader> oldReaders, int termInfosIndexDivisor)
         {
             // we put the old SegmentReaders in a map, that allows us

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/StoredFieldVisitor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/StoredFieldVisitor.cs b/src/Lucene.Net/Index/StoredFieldVisitor.cs
index 381dc61..16a34ff 100644
--- a/src/Lucene.Net/Index/StoredFieldVisitor.cs
+++ b/src/Lucene.Net/Index/StoredFieldVisitor.cs
@@ -18,24 +18,22 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    /// Expert: provides a low-level means of accessing the stored field
-    /// values in an index.  See {@link IndexReader#document(int,
-    /// StoredFieldVisitor)}.
+    /// Expert: Provides a low-level means of accessing the stored field
+    /// values in an index.  See <see cref="IndexReader.Document(int, StoredFieldVisitor)"/>.
     ///
-    /// <p><b>NOTE</b>: a {@code StoredFieldVisitor} implementation
+    /// <para/><b>NOTE</b>: a <see cref="StoredFieldVisitor"/> implementation
     /// should not try to load or visit other stored documents in
     /// the same reader because the implementation of stored
     /// fields for most codecs is not reeentrant and you will see
     /// strange exceptions as a result.
     ///
-    /// <p>See <seealso cref="DocumentStoredFieldVisitor"/>, which is a
-    /// <code>StoredFieldVisitor</code> that builds the
-    /// <seealso cref="Document"/> containing all stored fields.  this is
-    /// used by <seealso cref="IndexReader#document(int)"/>.
-    ///
+    /// <para/>See <see cref="Documents.DocumentStoredFieldVisitor"/>, which is a
+    /// <see cref="StoredFieldVisitor"/> that builds the
+    /// <see cref="Documents.Document"/> containing all stored fields.  This is
+    /// used by <see cref="IndexReader.Document(int)"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class StoredFieldVisitor
     {
         /// <summary>
@@ -54,31 +52,31 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Process a string field </summary>
+        /// Process a <see cref="string"/> field </summary>
         public virtual void StringField(FieldInfo fieldInfo, string value)
         {
         }
 
         /// <summary>
-        /// Process a int numeric field. </summary>
+        /// Process a <see cref="int"/> numeric field. </summary>
         public virtual void Int32Field(FieldInfo fieldInfo, int value) // LUCENENET specific: Renamed from IntField to Int32Field per .NET conventions
         {
         }
 
         /// <summary>
-        /// Process a long numeric field. </summary>
+        /// Process a <see cref="long"/> numeric field. </summary>
         public virtual void Int64Field(FieldInfo fieldInfo, long value) // LUCENENET specific: Renamed from LongField to Int64Field per .NET conventions
         {
         }
 
         /// <summary>
-        /// Process a float numeric field. </summary>
+        /// Process a <see cref="float"/> numeric field. </summary>
         public virtual void SingleField(FieldInfo fieldInfo, float value) // LUCENENET specific: Renamed from FloatField to SingleField per .NET conventions
         {
         }
 
         /// <summary>
-        /// Process a double numeric field. </summary>
+        /// Process a <see cref="double"/> numeric field. </summary>
         public virtual void DoubleField(FieldInfo fieldInfo, double value)
         {
         }
@@ -86,14 +84,14 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Hook before processing a field.
         /// Before a field is processed, this method is invoked so that
-        /// subclasses can return a <seealso cref="Status"/> representing whether
+        /// subclasses can return a <see cref="Status"/> representing whether
         /// they need that particular field or not, or to stop processing
         /// entirely.
         /// </summary>
         public abstract Status NeedsField(FieldInfo fieldInfo);
 
         /// <summary>
-        /// Enumeration of possible return values for <seealso cref="#needsField"/>.
+        /// Enumeration of possible return values for <see cref="NeedsField(FieldInfo)"/>.
         /// </summary>
         public enum Status
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/StoredFieldsProcessor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/StoredFieldsProcessor.cs b/src/Lucene.Net/Index/StoredFieldsProcessor.cs
index 6196078..bd16042 100644
--- a/src/Lucene.Net/Index/StoredFieldsProcessor.cs
+++ b/src/Lucene.Net/Index/StoredFieldsProcessor.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Index
     using StoredFieldsWriter = Lucene.Net.Codecs.StoredFieldsWriter;
 
     /// <summary>
-    /// this is a StoredFieldsConsumer that writes stored fields. </summary>
+    /// This is a <see cref="StoredFieldsConsumer"/> that writes stored fields. </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TaskMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TaskMergeScheduler.cs b/src/Lucene.Net/Index/TaskMergeScheduler.cs
index d87c4dc..3495356 100644
--- a/src/Lucene.Net/Index/TaskMergeScheduler.cs
+++ b/src/Lucene.Net/Index/TaskMergeScheduler.cs
@@ -28,13 +28,13 @@ namespace Lucene.Net.Index
      */
 
     /// <summary>
-    ///  A <seealso cref="MergeScheduler"/> that runs each merge using
-    ///  Tasks on the default TaskScheduler.
+    /// A <see cref="MergeScheduler"/> that runs each merge using
+    /// <see cref="Task"/>s on the default <see cref="TaskScheduler"/>.
     /// 
-    ///  <p>If more than <seealso cref="#GetMaxMergeCount"/> merges are
-    ///  requested then this class will forcefully throttle the
-    ///  incoming threads by pausing until one more more merges
-    ///  complete.</p>
+    /// <para>If more than <see cref="MaxMergeCount"/> merges are
+    /// requested then this class will forcefully throttle the
+    /// incoming threads by pausing until one more more merges
+    /// complete.</para>
     ///  
     /// LUCENENET specific
     /// </summary>
@@ -49,27 +49,27 @@ namespace Lucene.Net.Index
         private readonly ReaderWriterLockSlim _lock = new ReaderWriterLockSlim();
         private readonly ManualResetEventSlim _manualResetEvent = new ManualResetEventSlim();
         /// <summary>
-        /// List of currently active <seealso cref="MergeThread"/>s.</summary>
+        /// List of currently active <see cref="MergeThread"/>s.</summary>
         private readonly IList<MergeThread> _mergeThreads = new List<MergeThread>();
 
         /// <summary>
-        /// How many <seealso cref="MergeThread"/>s have kicked off (this is use
-        ///  to name them).
+        /// How many <see cref="MergeThread"/>s have kicked off (this is use
+        /// to name them).
         /// </summary>
         private int _mergeThreadCount;
 
         /// <summary>
-        /// <seealso cref="Directory"/> that holds the index. </summary>
+        /// <see cref="Directory"/> that holds the index. </summary>
         private Directory _directory;
 
         /// <summary>
-        /// <seealso cref="IndexWriter"/> that owns this instance.
+        /// <seea cref="IndexWriter"/> that owns this instance.
         /// </summary>
         private IndexWriter _writer;
 
         /// <summary>
         /// Sole constructor, with all settings set to default
-        ///  values.
+        /// values.
         /// </summary>
         public TaskMergeScheduler() : base()
         {
@@ -80,14 +80,14 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Sets the maximum number of merge threads and simultaneous merges allowed.
         /// </summary>
-        /// <param name="maxMergeCount"> the max # simultaneous merges that are allowed.
+        /// <param name="maxMergeCount"> The max # simultaneous merges that are allowed.
         ///       If a merge is necessary yet we already have this many
         ///       threads running, the incoming thread (that is calling
         ///       add/updateDocument) will block until a merge thread
         ///       has completed.  Note that we will only run the
-        ///       smallest <code>maxThreadCount</code> merges at a time. </param>
-        /// <param name="maxThreadCount"> the max # simultaneous merge threads that should
-        ///       be running at once.  this must be &lt;= <code>maxMergeCount</code> </param>
+        ///       smallest <paramref name="maxThreadCount"/> merges at a time. </param>
+        /// <param name="maxThreadCount"> The max # simultaneous merge threads that should
+        ///       be running at once.  This must be &lt;= <paramref name="maxMergeCount"/> </param>
         public void SetMaxMergesAndThreads(int maxMergeCount, int maxThreadCount)
         {
             // This is handled by TaskScheduler.Default.MaximumConcurrencyLevel
@@ -97,11 +97,11 @@ namespace Lucene.Net.Index
         /// Max number of merge threads allowed to be running at
         /// once.  When there are more merges then this, we
         /// forcefully pause the larger ones, letting the smaller
-        /// ones run, up until maxMergeCount merges at which point
+        /// ones run, up until <see cref="MaxMergeCount"/> merges at which point
         /// we forcefully pause incoming threads (that presumably
         /// are the ones causing so much merging).
         /// </summary>
-        /// <seealso cref= #setMaxMergesAndThreads(int, int)  </seealso>
+        /// <seealso cref="SetMaxMergesAndThreads(int, int)"/>
         public int MaxThreadCount { get; private set; }
 
         /// <summary>
@@ -134,8 +134,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Called whenever the running merges have changed, to pause & unpause
-        /// threads. this method sorts the merge threads by their merge size in
+        /// Called whenever the running merges have changed, to pause &amp; unpause
+        /// threads. This method sorts the merge threads by their merge size in
         /// descending order and then pauses/unpauses threads from first to last --
         /// that way, smaller merges are guaranteed to run before larger ones.
         /// </summary>
@@ -153,14 +153,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns true if verbosing is enabled. this method is usually used in
-        /// conjunction with <seealso cref="#message(String)"/>, like that:
+        /// Returns <c>true</c> if verbosing is enabled. This method is usually used in
+        /// conjunction with <see cref="Message(string)"/>, like that:
         ///
-        /// <pre class="prettyprint">
-        /// if (verbose()) {
-        ///   message(&quot;your message&quot;);
+        /// <code>
+        /// if (Verbose) {
+        ///     Message(&quot;your message&quot;);
         /// }
-        /// </pre>
+        /// </code>
         /// </summary>
         protected bool Verbose
         {
@@ -168,8 +168,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Outputs the given message - this method assumes <seealso cref="#verbose()"/> was
-        /// called and returned true.
+        /// Outputs the given message - this method assumes <see cref="Verbose"/> was
+        /// called and returned <c>true</c>.
         /// </summary>
         protected virtual void Message(string message)
         {
@@ -184,7 +184,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Wait for any running merge threads to finish. 
-        /// This call is not interruptible as used by <seealso cref="#Dispose()"/>.
+        /// This call is not interruptible as used by <see cref="MergeScheduler.Dispose()"/>.
         /// </summary>
         public virtual void Sync()
         {
@@ -217,7 +217,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Returns the number of merge threads that are alive. Note that this number
-        /// is &lt;= <seealso cref="#mergeThreads"/> size.
+        /// is &lt;= <see cref="_mergeThreads"/> size.
         /// </summary>
         private int MergeThreadCount
         {
@@ -347,7 +347,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Create and return a new MergeThread </summary>
+        /// Create and return a new <see cref="MergeThread"/> </summary>
         private MergeThread CreateTask(IndexWriter writer, MergePolicy.OneMerge merge)
         {
             var count = Interlocked.Increment(ref _mergeThreadCount);
@@ -358,7 +358,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Called when an exception is hit in a background merge
-        ///  thread
+        /// thread
         /// </summary>
         protected virtual void HandleMergeException(Exception exc)
         {
@@ -424,7 +424,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Runs a merge thread, which may run one or more merges
-        ///  in sequence.
+        /// in sequence.
         /// </summary>
         internal class MergeThread : IDisposable
         {
@@ -491,8 +491,8 @@ namespace Lucene.Net.Index
             }
 
             /// <summary>
-            /// Return the current merge, or null if this {@code
-            ///  MergeThread} is done.
+            /// Return the current merge, or <c>null</c> if this 
+            /// <see cref="MergeThread"/> is done.
             /// </summary>
             public virtual MergePolicy.OneMerge CurrentMerge
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/Term.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/Term.cs b/src/Lucene.Net/Index/Term.cs
index 1cf8dd7..19d5ee2 100644
--- a/src/Lucene.Net/Index/Term.cs
+++ b/src/Lucene.Net/Index/Term.cs
@@ -24,12 +24,12 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    ///  A Term represents a word from text.  this is the unit of search.  It is
-    ///  composed of two elements, the text of the word, as a string, and the name of
-    ///  the field that the text occurred in.
-    ///
-    ///  Note that terms may represent more than words from text fields, but also
-    ///  things like dates, email addresses, urls, etc.
+    /// A <see cref="Term"/> represents a word from text.  This is the unit of search.  It is
+    /// composed of two elements, the text of the word, as a string, and the name of
+    /// the field that the text occurred in.
+    /// <para/>
+    /// Note that terms may represent more than words from text fields, but also
+    /// things like dates, email addresses, urls, etc.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -37,14 +37,14 @@ namespace Lucene.Net.Index
     public sealed class Term : IComparable<Term>, IEquatable<Term> // LUCENENET specific - class implements IEquatable<T>
     {
         /// <summary>
-        /// Constructs a Term with the given field and bytes.
-        /// <p>Note that a null field or null bytes value results in undefined
+        /// Constructs a <see cref="Term"/> with the given field and bytes.
+        /// <para/>Note that a null field or null bytes value results in undefined
         /// behavior for most Lucene APIs that accept a Term parameter.
         ///
-        /// <p>WARNING: the provided BytesRef is not copied, but used directly.
+        /// <para/>WARNING: the provided <see cref="BytesRef"/> is not copied, but used directly.
         /// Therefore the bytes should not be modified after construction, for
-        /// example, you should clone a copy by <seealso cref="BytesRef#deepCopyOf"/>
-        /// rather than pass reused bytes from a TermsEnum.
+        /// example, you should clone a copy by <see cref="BytesRef.DeepCopyOf(BytesRef)"/>
+        /// rather than pass reused bytes from a <see cref="TermsEnum"/>.
         /// </summary>
         public Term(string fld, BytesRef bytes)
         {
@@ -53,9 +53,9 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Constructs a Term with the given field and text.
-        /// <p>Note that a null field or null text value results in undefined
-        /// behavior for most Lucene APIs that accept a Term parameter.
+        /// Constructs a <see cref="Term"/> with the given field and text.
+        /// <para/>Note that a <c>null</c> field or null text value results in undefined
+        /// behavior for most Lucene APIs that accept a <see cref="Term"/> parameter.
         /// </summary>
         public Term(string fld, string text)
             : this(fld, new BytesRef(text))
@@ -63,8 +63,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Constructs a Term with the given field and empty text.
-        /// this serves two purposes: 1) reuse of a Term with the same field.
+        /// Constructs a <see cref="Term"/> with the given field and empty text.
+        /// this serves two purposes: 1) reuse of a <see cref="Term"/> with the same field.
         /// 2) pattern for a query.
         /// </summary>
         /// <param name="fld"> field's name </param>
@@ -74,15 +74,15 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the field of this term.   The field indicates
-        ///  the part of a document which this term came from.
+        /// Returns the field of this term.  The field indicates
+        /// the part of a document which this term came from.
         /// </summary>
         public string Field { get; internal set; }
 
         /// <summary>
         /// Returns the text of this term.  In the case of words, this is simply the
-        ///  text of the word.  In the case of dates and other types, this is an
-        ///  encoding of the object as a string.
+        /// text of the word.  In the case of dates and other types, this is an
+        /// encoding of the object as a string.
         /// </summary>
         public string Text()
         {
@@ -129,10 +129,10 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Compares two terms, returning a negative integer if this
-        ///  term belongs before the argument, zero if this term is equal to the
-        ///  argument, and a positive integer if this term belongs after the argument.
-        ///
-        ///  The ordering of terms is first by field, then by text.
+        /// term belongs before the argument, zero if this term is equal to the
+        /// argument, and a positive integer if this term belongs after the argument.
+        /// <para/>
+        /// The ordering of terms is first by field, then by text.
         /// </summary>
         public int CompareTo(Term other)
         {
@@ -148,8 +148,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Resets the field and text of a Term.
-        /// <p>WARNING: the provided BytesRef is not copied, but used directly.
+        /// Resets the field and text of a <see cref="Term"/>.
+        /// <para/>WARNING: the provided <see cref="BytesRef"/> is not copied, but used directly.
         /// Therefore the bytes should not be modified after construction, for
         /// example, you should clone a copy rather than pass reused bytes from
         /// a TermsEnum.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermContext.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermContext.cs b/src/Lucene.Net/Index/TermContext.cs
index 945d928..b389c60 100644
--- a/src/Lucene.Net/Index/TermContext.cs
+++ b/src/Lucene.Net/Index/TermContext.cs
@@ -24,12 +24,12 @@ namespace Lucene.Net.Index
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// Maintains a <seealso cref="IndexReader"/> <seealso cref="TermState"/> view over
-    /// <seealso cref="IndexReader"/> instances containing a single term. The
-    /// <seealso cref="TermContext"/> doesn't track if the given <seealso cref="TermState"/>
-    /// objects are valid, neither if the <seealso cref="TermState"/> instances refer to the
+    /// Maintains a <see cref="IndexReader"/> <see cref="TermState"/> view over
+    /// <see cref="IndexReader"/> instances containing a single term. The
+    /// <see cref="TermContext"/> doesn't track if the given <see cref="TermState"/>
+    /// objects are valid, neither if the <see cref="TermState"/> instances refer to the
     /// same terms in the associated readers.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -38,11 +38,11 @@ namespace Lucene.Net.Index
     public sealed class TermContext
     {
         /// <summary>
-        /// Holds the <seealso cref="IndexReaderContext"/> of the top-level
-        ///  <seealso cref="IndexReader"/>, used internally only for
-        ///  asserting.
-        ///
-        ///  @lucene.internal
+        /// Holds the <see cref="IndexReaderContext"/> of the top-level
+        /// <see cref="IndexReader"/>, used internally only for
+        /// asserting.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         public IndexReaderContext TopReaderContext { get; private set; }
 
@@ -53,7 +53,7 @@ namespace Lucene.Net.Index
         //public static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
 
         /// <summary>
-        /// Creates an empty <seealso cref="TermContext"/> from a <seealso cref="IndexReaderContext"/>
+        /// Creates an empty <see cref="TermContext"/> from a <see cref="IndexReaderContext"/>
         /// </summary>
         public TermContext(IndexReaderContext context)
         {
@@ -73,8 +73,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Creates a <seealso cref="TermContext"/> with an initial <seealso cref="TermState"/>,
-        /// <seealso cref="IndexReader"/> pair.
+        /// Creates a <see cref="TermContext"/> with an initial <see cref="TermState"/>,
+        /// <see cref="IndexReader"/> pair.
         /// </summary>
         public TermContext(IndexReaderContext context, TermState state, int ord, int docFreq, long totalTermFreq)
             : this(context)
@@ -83,11 +83,11 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Creates a <seealso cref="TermContext"/> from a top-level <seealso cref="IndexReaderContext"/> and the
-        /// given <seealso cref="Term"/>. this method will lookup the given term in all context's leaf readers
-        /// and register each of the readers containing the term in the returned <seealso cref="TermContext"/>
+        /// Creates a <see cref="TermContext"/> from a top-level <see cref="IndexReaderContext"/> and the
+        /// given <see cref="Term"/>. this method will lookup the given term in all context's leaf readers
+        /// and register each of the readers containing the term in the returned <see cref="TermContext"/>
         /// using the leaf reader's ordinal.
-        /// <p>
+        /// <para/>
         /// Note: the given context must be a top-level context.
         /// </summary>
         public static TermContext Build(IndexReaderContext context, Term term)
@@ -120,8 +120,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Clears the <seealso cref="TermContext"/> internal state and removes all
-        /// registered <seealso cref="TermState"/>s
+        /// Clears the <see cref="TermContext"/> internal state and removes all
+        /// registered <see cref="TermState"/>s
         /// </summary>
         public void Clear()
         {
@@ -130,8 +130,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Registers and associates a <seealso cref="TermState"/> with an leaf ordinal. The leaf ordinal
-        /// should be derived from a <seealso cref="IndexReaderContext"/>'s leaf ord.
+        /// Registers and associates a <see cref="TermState"/> with an leaf ordinal. The leaf ordinal
+        /// should be derived from a <see cref="IndexReaderContext"/>'s leaf ord.
         /// </summary>
         public void Register(TermState state, int ord, int docFreq, long totalTermFreq)
         {
@@ -151,13 +151,13 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Returns the <seealso cref="TermState"/> for an leaf ordinal or <code>null</code> if no
-        /// <seealso cref="TermState"/> for the ordinal was registered.
+        /// Returns the <see cref="TermState"/> for an leaf ordinal or <c>null</c> if no
+        /// <see cref="TermState"/> for the ordinal was registered.
         /// </summary>
         /// <param name="ord">
-        ///          the readers leaf ordinal to get the <seealso cref="TermState"/> for. </param>
-        /// <returns> the <seealso cref="TermState"/> for the given readers ord or <code>null</code> if no
-        ///         <seealso cref="TermState"/> for the reader was registered </returns>
+        ///          The readers leaf ordinal to get the <see cref="TermState"/> for. </param>
+        /// <returns> The <see cref="TermState"/> for the given readers ord or <c>null</c> if no
+        ///         <see cref="TermState"/> for the reader was registered </returns>
         public TermState Get(int ord)
         {
             Debug.Assert(ord >= 0 && ord < states.Length);
@@ -165,10 +165,10 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        ///  Returns the accumulated term frequency of all <seealso cref="TermState"/>
-        ///         instances passed to <seealso cref="#register(TermState, int, int, long)"/>. </summary>
-        /// <returns> the accumulated term frequency of all <seealso cref="TermState"/>
-        ///         instances passed to <seealso cref="#register(TermState, int, int, long)"/>. </returns>
+        ///  Returns the accumulated term frequency of all <see cref="TermState"/>
+        ///         instances passed to <see cref="Register(TermState, int, int, long)"/>. </summary>
+        /// <returns> the accumulated term frequency of all <see cref="TermState"/>
+        ///         instances passed to <see cref="Register(TermState, int, int, long)"/>. </returns>
         public long TotalTermFreq
         {
             get { return totalTermFreq; }
@@ -176,11 +176,12 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// expert: only available for queries that want to lie about docfreq
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public int DocFreq
         {
-            set
+            internal set
             {
                 this.docFreq = value;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermState.cs b/src/Lucene.Net/Index/TermState.cs
index c5dd839..5ecff88 100644
--- a/src/Lucene.Net/Index/TermState.cs
+++ b/src/Lucene.Net/Index/TermState.cs
@@ -21,11 +21,12 @@ namespace Lucene.Net.Index
 
     /// <summary>
     /// Encapsulates all required internal state to position the associated
-    /// <seealso cref="TermsEnum"/> without re-seeking.
+    /// <see cref="TermsEnum"/> without re-seeking.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= TermsEnum#seekExact(Lucene.Net.Util.BytesRef, TermState) </seealso>
-    /// <seealso cref= TermsEnum#termState()
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="TermsEnum.SeekExact(Lucene.Net.Util.BytesRef, TermState)"/>
+    /// <seealso cref="TermsEnum.GetTermState()"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -33,17 +34,17 @@ namespace Lucene.Net.Index
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected TermState()
         {
         }
 
         /// <summary>
-        /// Copies the content of the given <seealso cref="TermState"/> to this instance
+        /// Copies the content of the given <see cref="TermState"/> to this instance
         /// </summary>
         /// <param name="other">
-        ///          the TermState to copy </param>
+        ///          the <see cref="TermState"/> to copy </param>
         public abstract void CopyFrom(TermState other);
 
         public virtual object Clone()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermVectorsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermVectorsConsumer.cs b/src/Lucene.Net/Index/TermVectorsConsumer.cs
index 04ba05d..f1c8019 100644
--- a/src/Lucene.Net/Index/TermVectorsConsumer.cs
+++ b/src/Lucene.Net/Index/TermVectorsConsumer.cs
@@ -88,7 +88,7 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Fills in no-term-vectors for all docs we haven't seen
-        ///  since the last doc that had term vectors.
+        /// since the last doc that had term vectors.
         /// </summary>
         internal void Fill(int docID)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/646db0ce/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs b/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
index f24e457..16dacea 100644
--- a/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
+++ b/src/Lucene.Net/Index/TermVectorsConsumerPerField.cs
@@ -156,9 +156,10 @@ namespace Lucene.Net.Index
 
         /// <summary>
         /// Called once per field per document if term vectors
-        ///  are enabled, to write the vectors to
-        ///  RAMOutputStream, which is then quickly flushed to
-        ///  the real term vectors files in the Directory. 	  /// </summary>
+        /// are enabled, to write the vectors to
+        /// RAMOutputStream, which is then quickly flushed to
+        /// the real term vectors files in the Directory. 	  
+        /// </summary>
         internal override void Finish()
         {
             if (!doVectors || termsHashPerField.bytesHash.Count == 0)