You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/01 22:48:58 UTC

[2/9] lucenenet git commit: SWEEP: Changed to in documentation comments

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
index 04b6c1d..e36aee8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/RSLPStemmerBase.cs
@@ -44,14 +44,14 @@ namespace Lucene.Net.Analysis.Pt
     /// <code>{ "suffix", N, "replacement", { "exception1", "exception2", ...}}</code>
     /// where:
     /// <list type="bullet">
-    ///   <item><c>suffix</c> is the suffix to be removed (such as "inho").</item>
-    ///   <item><c>N</c> is the min stem size, where stem is defined as the candidate stem 
-    ///       after removing the suffix (but before appending the replacement!)</item>
-    ///   <item><c>replacement</c> is an optimal string to append after removing the suffix.
-    ///       This can be the empty string.</item>
-    ///   <item><c>exceptions</c> is an optional list of exceptions, patterns that should 
+    ///   <item><description><c>suffix</c> is the suffix to be removed (such as "inho").</description></item>
+    ///   <item><description><c>N</c> is the min stem size, where stem is defined as the candidate stem 
+    ///       after removing the suffix (but before appending the replacement!)</description></item>
+    ///   <item><description><c>replacement</c> is an optimal string to append after removing the suffix.
+    ///       This can be the empty string.</description></item>
+    ///   <item><description><c>exceptions</c> is an optional list of exceptions, patterns that should 
     ///       not be stemmed. These patterns can be specified as whole word or suffix (ends-with) 
-    ///       patterns, depending upon the exceptions format flag in the step header.</item>
+    ///       patterns, depending upon the exceptions format flag in the step header.</description></item>
     /// </list>
     /// </para>
     /// <para>
@@ -61,17 +61,17 @@ namespace Lucene.Net.Analysis.Pt
     /// </blockquote>
     /// where:
     /// <list type="bullet">
-    ///   <item><c>name</c> is a name for the step (such as "Plural").</item>
-    ///   <item><c>N</c> is the min word size. Words that are less than this length bypass
+    ///   <item><description><c>name</c> is a name for the step (such as "Plural").</description></item>
+    ///   <item><description><c>N</c> is the min word size. Words that are less than this length bypass
     ///       the step completely, as an optimization. Note: N can be zero, in this case this 
     ///       implementation will automatically calculate the appropriate value from the underlying 
-    ///       rules.</item>
-    ///   <item><c>B</c> is a "boolean" flag specifying how exceptions in the rules are matched.
+    ///       rules.</description></item>
+    ///   <item><description><c>B</c> is a "boolean" flag specifying how exceptions in the rules are matched.
     ///       A value of 1 indicates whole-word pattern matching, a value of 0 indicates that 
-    ///       exceptions are actually suffixes and should be matched with ends-with.</item>
-    ///   <item><c>conds</c> are an optional list of conditions to enter the step at all. If
+    ///       exceptions are actually suffixes and should be matched with ends-with.</description></item>
+    ///   <item><description><c>conds</c> are an optional list of conditions to enter the step at all. If
     ///       the list is non-empty, then a word must end with one of these conditions or it will
-    ///       bypass the step completely as an optimization.</item>
+    ///       bypass the step completely as an optimization.</description></item>
     /// </list>
     /// </para>
     /// <a href="http://www.inf.ufrgs.br/~viviane/rslp/index.htm">RSLP description</a>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
index 4b48902..4440167 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Reverse/ReverseStringFilter.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Reverse
     /// compatibility when creating <see cref="ReverseStringFilter"/>, or when using any of
     /// its static methods:
     /// <list type="bullet">
-    ///     <item> As of 3.1, supplementary characters are handled correctly</item>
+    ///     <item><description> As of 3.1, supplementary characters are handled correctly</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index ad8e0ea..bb086a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.Ru
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="RussianAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, <see cref="StandardTokenizer"/> is used, Snowball stemming is done with
-    ///        <see cref="SnowballFilter"/>, and Snowball stopwords are used by default.</item>
+    ///     <item><description> As of 3.1, <see cref="StandardTokenizer"/> is used, Snowball stemming is done with
+    ///        <see cref="SnowballFilter"/>, and Snowball stopwords are used by default.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
index bcaa1d6..a6d2be2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballAnalyzer.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Analysis.Snowball
     /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
     /// dependent settings as <see cref="StandardAnalyzer"/>, with the following addition:
     /// <list type="bullet">
-    ///   <item> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.</item>
+    ///   <item><description> As of 3.1, uses <see cref="TurkishLowerCaseFilter"/> for Turkish language.</description></item>
     /// </list>
     /// </para> </summary>
     /// @deprecated (3.1) Use the language-specific analyzer in modules/analysis instead. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
index d9f8672..8ac2021 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Snowball/SnowballFilter.cs
@@ -28,8 +28,8 @@ namespace Lucene.Net.Analysis.Snowball
     /// Available stemmers are listed in Lucene.Net.Tartarus.Snowball.Ext.
     /// <para><b>NOTE</b>: <see cref="SnowballFilter"/> expects lowercased text.
     /// <list type="bullet">
-    ///     <item>For the Turkish language, see <see cref="Tr.TurkishLowerCaseFilter"/>.</item>
-    ///     <item>For other languages, see <see cref="Core.LowerCaseFilter"/>.</item>
+    ///     <item><description>For the Turkish language, see <see cref="Tr.TurkishLowerCaseFilter"/>.</description></item>
+    ///     <item><description>For other languages, see <see cref="Core.LowerCaseFilter"/>.</description></item>
     /// </list>
     /// </para>
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
index 888431b..f5b42e0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicAnalyzer.cs
@@ -30,12 +30,12 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ClassicAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.1, <see cref="StopFilter"/> correctly handles Unicode 4.0
-    ///         supplementary characters in stopwords</item>
-    ///     <item> As of 2.9, <see cref="StopFilter"/> preserves position
-    ///        increments</item>
-    ///     <item> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
-    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///     <item><description> As of 3.1, <see cref="StopFilter"/> correctly handles Unicode 4.0
+    ///         supplementary characters in stopwords</description></item>
+    ///     <item><description> As of 2.9, <see cref="StopFilter"/> preserves position
+    ///        increments</description></item>
+    ///     <item><description> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
+    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// 
     /// <see cref="ClassicAnalyzer"/> was named <see cref="StandardAnalyzer"/> in Lucene versions prior to 3.1. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
index 111e23f..1bd65af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/ClassicTokenizer.cs
@@ -27,11 +27,11 @@ namespace Lucene.Net.Analysis.Standard
     /// <para> This should be a good tokenizer for most European-language documents:
     /// 
     /// <list type="bullet">
-    ///     <item>Splits words at punctuation characters, removing punctuation. However, a 
-    ///         dot that's not followed by whitespace is considered part of a token.</item>
-    ///     <item>Splits words at hyphens, unless there's a number in the token, in which case
-    ///         the whole token is interpreted as a product number and is not split.</item>
-    ///     <item>Recognizes email addresses and internet hostnames as one token.</item>
+    ///     <item><description>Splits words at punctuation characters, removing punctuation. However, a 
+    ///         dot that's not followed by whitespace is considered part of a token.</description></item>
+    ///     <item><description>Splits words at hyphens, unless there's a number in the token, in which case
+    ///         the whole token is interpreted as a product number and is not split.</description></item>
+    ///     <item><description>Recognizes email addresses and internet hostnames as one token.</description></item>
     /// </list>
     /// 
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
index d7f8515..ca6c60c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardAnalyzer.cs
@@ -30,17 +30,17 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StandardAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///   <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///        from their combining characters. If you use a previous version number,
-    ///        you get the exact broken behavior for backwards compatibility.</item>
-    ///   <item> As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
+    ///        you get the exact broken behavior for backwards compatibility.</description></item>
+    ///   <item><description> As of 3.1, <see cref="StandardTokenizer"/> implements Unicode text segmentation,
     ///        and <see cref="StopFilter"/> correctly handles Unicode 4.0 supplementary characters
     ///        in stopwords.  <see cref="ClassicTokenizer"/> and <see cref="ClassicAnalyzer"/> 
     ///        are the pre-3.1 implementations of <see cref="StandardTokenizer"/> and
-    ///        <see cref="StandardAnalyzer"/>.</item>
-    ///   <item> As of 2.9, <see cref="StopFilter"/> preserves position increments</item>
-    ///   <item> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
-    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</item>
+    ///        <see cref="StandardAnalyzer"/>.</description></item>
+    ///   <item><description> As of 2.9, <see cref="StopFilter"/> preserves position increments</description></item>
+    ///   <item><description> As of 2.4, <see cref="Token"/>s incorrectly identified as acronyms
+    ///        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1068</a>)</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
index 3fa7bb8..5d89a29 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizer.cs
@@ -41,12 +41,12 @@ namespace Lucene.Net.Analysis.Standard
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="StandardTokenizer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///     <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///         from their combining characters. If you use a previous version number,
-    ///         you get the exact broken behavior for backwards compatibility.</item>
-    ///     <item> As of 3.1, StandardTokenizer implements Unicode text segmentation.
+    ///         you get the exact broken behavior for backwards compatibility.</description></item>
+    ///     <item><description> As of 3.1, StandardTokenizer implements Unicode text segmentation.
     ///         If you use a previous version number, you get the exact behavior of
-    ///         <see cref="ClassicTokenizer"/> for backwards compatibility.</item>
+    ///         <see cref="ClassicTokenizer"/> for backwards compatibility.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
index aa66336..edfcbb8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/StandardTokenizerImpl.cs
@@ -29,14 +29,14 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
-    ///     <item>&lt;KATAKANA&gt;: A sequence of katakana characters</item>
-    ///     <item>&lt;HANGUL&gt;: A sequence of Hangul characters</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
+    ///     <item><description>&lt;KATAKANA&gt;: A sequence of katakana characters</description></item>
+    ///     <item><description>&lt;HANGUL&gt;: A sequence of Hangul characters</description></item>
     /// </list>
     /// </summary>
     public sealed class StandardTokenizerImpl : IStandardTokenizerInterface

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
index 292b7bd..31642c6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizer.cs
@@ -33,21 +33,21 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;URL&gt;: A URL</item>
-    ///     <item>&lt;EMAIL&gt;: An email address</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;URL&gt;: A URL</description></item>
+    ///     <item><description>&lt;EMAIL&gt;: An email address</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
     /// </list>
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="UAX29URLEmailTokenizer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.4, Hiragana and Han characters are no longer wrongly split
+    ///     <item><description> As of 3.4, Hiragana and Han characters are no longer wrongly split
     ///         from their combining characters. If you use a previous version number,
-    ///         you get the exact broken behavior for backwards compatibility.</item>
+    ///         you get the exact broken behavior for backwards compatibility.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
index 547a62c..c95f064 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Standard/UAX29URLEmailTokenizerImpl.cs
@@ -29,16 +29,16 @@ namespace Lucene.Net.Analysis.Standard
     /// <para/>
     /// Tokens produced are of the following types:
     /// <list type="bullet">
-    ///     <item>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</item>
-    ///     <item>&lt;NUM&gt;: A number</item>
-    ///     <item>&lt;URL&gt;: A URL</item>
-    ///     <item>&lt;EMAIL&gt;: An email address</item>
-    ///     <item>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
-    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</item>
-    ///     <item>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</item>
-    ///     <item>&lt;HIRAGANA&gt;: A single hiragana character</item>
-    ///     <item>&lt;KATAKANA&gt;: A sequence of katakana characters</item>
-    ///     <item>&lt;HANGUL&gt;: A sequence of Hangul characters</item>
+    ///     <item><description>&lt;ALPHANUM&gt;: A sequence of alphabetic and numeric characters</description></item>
+    ///     <item><description>&lt;NUM&gt;: A number</description></item>
+    ///     <item><description>&lt;URL&gt;: A URL</description></item>
+    ///     <item><description>&lt;EMAIL&gt;: An email address</description></item>
+    ///     <item><description>&lt;SOUTHEAST_ASIAN&gt;: A sequence of characters from South and Southeast
+    ///         Asian languages, including Thai, Lao, Myanmar, and Khmer</description></item>
+    ///     <item><description>&lt;IDEOGRAPHIC&gt;: A single CJKV ideographic character</description></item>
+    ///     <item><description>&lt;HIRAGANA&gt;: A single hiragana character</description></item>
+    ///     <item><description>&lt;KATAKANA&gt;: A sequence of katakana characters</description></item>
+    ///     <item><description>&lt;HANGUL&gt;: A sequence of Hangul characters</description></item>
     /// </list>
     /// </summary>
     public sealed class UAX29URLEmailTokenizerImpl : IStandardTokenizerInterface

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
index 7016143..74b969c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SolrSynonymParser.cs
@@ -26,21 +26,21 @@ namespace Lucene.Net.Analysis.Synonym
     /// <summary>
     /// Parser for the Solr synonyms format.
     /// <list type="bullet">
-    ///     <item> Blank lines and lines starting with '#' are comments.</item>
-    ///     <item> Explicit mappings match any token sequence on the LHS of "=>"
+    ///     <item><description> Blank lines and lines starting with '#' are comments.</description></item>
+    ///     <item><description> Explicit mappings match any token sequence on the LHS of "=>"
     ///         and replace with all alternatives on the RHS.  These types of mappings
     ///         ignore the expand parameter in the constructor.
     ///         Example:
     ///         <code>i-pod, i pod => ipod</code>
-    ///     </item>
-    ///     <item> Equivalent synonyms may be separated with commas and give
+    ///     </description></item>
+    ///     <item><description> Equivalent synonyms may be separated with commas and give
     ///         no explicit mapping.  In this case the mapping behavior will
     ///         be taken from the expand parameter in the constructor.  This allows
     ///         the same synonym file to be used in different synonym handling strategies.
     ///         Example:
     ///         <code>ipod, i-pod, i pod</code>
-    ///     </item>
-    ///     <item> Multiple synonym mapping entries are merged.
+    ///     </description></item>
+    ///     <item><description> Multiple synonym mapping entries are merged.
     ///         Example:
     ///         <code>
     ///             foo => foo bar
@@ -48,7 +48,7 @@ namespace Lucene.Net.Analysis.Synonym
     ///             is equivalent to
     ///             foo => foo bar, baz
     ///         </code>
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// @lucene.experimental
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
index 80699e6..d08941c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Synonym/SynonymFilterFactory.cs
@@ -49,9 +49,9 @@ namespace Lucene.Net.Analysis.Synonym
     /// <see cref="SynonymMap.Parser"/> class name. The default is <c>solr</c>.
     /// A custom <see cref="SynonymMap.Parser"/> is expected to have a constructor taking:
     /// <list type="bullet">
-    ///     <item><c><see cref="bool"/> dedup</c> - true if duplicates should be ignored, false otherwise</item>
-    ///     <item><c><see cref="bool"/> expand</c> - true if conflation groups should be expanded, false if they are one-directional</item>
-    ///     <item><c><see cref="Analyzer"/> analyzer</c> - an analyzer used for each raw synonym</item>
+    ///     <item><description><c><see cref="bool"/> dedup</c> - true if duplicates should be ignored, false otherwise</description></item>
+    ///     <item><description><c><see cref="bool"/> expand</c> - true if conflation groups should be expanded, false if they are one-directional</description></item>
+    ///     <item><description><c><see cref="Analyzer"/> analyzer</c> - an analyzer used for each raw synonym</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
index 0885069..5b84fde 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Th
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="ThaiAnalyzer"/>:
     /// <list type="bullet">
-    ///     <item> As of 3.6, a set of Thai stopwords is used by default</item>
+    ///     <item><description> As of 3.6, a set of Thai stopwords is used by default</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index a3ec443..6761686 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -33,10 +33,10 @@ namespace Lucene.Net.Analysis.Util
     /// <para>
     /// The typical lifecycle for a factory consumer is:
     /// <list type="bullet">
-    ///     <item>Create factory via its constructor (or via XXXFactory.ForName)</item>
-    ///     <item>(Optional) If the factory uses resources such as files, 
-    ///         <see cref="IResourceLoaderAware.Inform(IResourceLoader)"/> is called to initialize those resources.</item>
-    ///     <item>Consumer calls create() to obtain instances.</item>
+    ///     <item><description>Create factory via its constructor (or via XXXFactory.ForName)</description></item>
+    ///     <item><description>(Optional) If the factory uses resources such as files, 
+    ///         <see cref="IResourceLoaderAware.Inform(IResourceLoader)"/> is called to initialize those resources.</description></item>
+    ///     <item><description>Consumer calls create() to obtain instances.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
index e006ea5..447fb98 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharArrayMap.cs
@@ -41,8 +41,8 @@ namespace Lucene.Net.Analysis.Util
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="CharArrayMap"/>:
     /// <list type="bullet">
-    ///   <item> As of 3.1, supplementary characters are
-    ///       properly lowercased.</item>
+    ///   <item><description> As of 3.1, supplementary characters are
+    ///       properly lowercased.</description></item>
     /// </list>
     /// Before 3.1 supplementary characters could not be
     /// lowercased correctly due to the lack of Unicode 4

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
index 9ef33c4..e3ba728 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/CharTokenizer.cs
@@ -29,9 +29,9 @@ namespace Lucene.Net.Analysis.Util
     /// You must specify the required <see cref="LuceneVersion"/> compatibility
     /// when creating <see cref="CharTokenizer"/>:
     /// <list type="bullet">
-    ///     <item>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
+    ///     <item><description>As of 3.1, <see cref="CharTokenizer"/> uses an int based API to normalize and
     ///         detect token codepoints. See <see cref="IsTokenChar(int)"/> and
-    ///         <see cref="Normalize(int)"/> for details.</item>
+    ///         <see cref="Normalize(int)"/> for details.</description></item>
     /// </list>
     /// </para>
     /// <para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
index 5687823..631879d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/WordlistLoader.cs
@@ -134,9 +134,9 @@ namespace Lucene.Net.Analysis.Util
         /// <para>
         /// The snowball format is the following:
         /// <list type="bullet">
-        ///     <item>Lines may contain multiple words separated by whitespace.</item>
-        ///     <item>The comment character is the vertical line (&#124;).</item>
-        ///     <item>Lines may contain trailing comments.</item>
+        ///     <item><description>Lines may contain multiple words separated by whitespace.</description></item>
+        ///     <item><description>The comment character is the vertical line (&#124;).</description></item>
+        ///     <item><description>Lines may contain trailing comments.</description></item>
         /// </list>
         /// </para>
         /// </summary>
@@ -177,9 +177,9 @@ namespace Lucene.Net.Analysis.Util
         /// <para>
         /// The snowball format is the following:
         /// <list type="bullet">
-        ///     <item>Lines may contain multiple words separated by whitespace.</item>
-        ///     <item>The comment character is the vertical line (&#124;).</item>
-        ///     <item>Lines may contain trailing comments.</item>
+        ///     <item><description>Lines may contain multiple words separated by whitespace.</description></item>
+        ///     <item><description>The comment character is the vertical line (&#124;).</description></item>
+        ///     <item><description>Lines may contain trailing comments.</description></item>
         /// </list>
         /// </para>
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
index bc2fa88..b1d289d 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationAttributeFactory.cs
@@ -38,16 +38,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationAttributeFactory</c> in the analysis-icu package 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
index 92ce4a0..4028f0c 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyAnalyzer.cs
@@ -45,16 +45,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationKeyAnalyzer</c> in the analysis-icu package 
@@ -76,8 +76,8 @@ namespace Lucene.Net.Collation
     /// <para>You must specify the required <see cref="LuceneVersion"/>
     /// compatibility when creating <see cref="CollationKeyAnalyzer"/>:
     /// <list type="bullet">
-    ///   <item> As of 4.0, Collation Keys are directly encoded as bytes. Previous
-    ///   versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.</item>
+    ///   <item><description> As of 4.0, Collation Keys are directly encoded as bytes. Previous
+    ///   versions will encode the bytes with <see cref="IndexableBinaryStringTools"/>.</description></item>
     /// </list>
     /// </para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
index 4e053d7..d498f84 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilter.cs
@@ -40,16 +40,16 @@ namespace Lucene.Net.Collation
     ///   same at query time):
     /// </para>
     /// <list type="number">
-    ///   <item>JVM vendor</item>
-    ///   <item>JVM version, including patch version</item>
-    ///   <item>
+    ///   <item><description>JVM vendor</description></item>
+    ///   <item><description>JVM version, including patch version</description></item>
+    ///   <item><description>
     ///     The language (and country and variant, if specified) of the Locale
     ///     used when constructing the collator via
     ///     <see cref="Collator.Create(System.Globalization.CultureInfo)"/>.
-    ///   </item>
-    ///   <item>
+    ///   </description></item>
+    ///   <item><description>
     ///     The collation strength used - see <see cref="Collator.Strength"/>
-    ///   </item>
+    ///   </description></item>
     /// </list> 
     /// <para>
     ///   The <c>ICUCollationKeyFilter</c> in the analysis-icu package 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
index 6599f17..5293cb1 100644
--- a/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Collation/CollationKeyFilterFactory.cs
@@ -34,26 +34,26 @@ namespace Lucene.Net.Collation
     /// <para>
     /// This factory can be created in two ways: 
     /// <list type="bullet">
-    ///  <item>Based upon a system collator associated with a <see cref="System.Globalization.CultureInfo"/>.</item>
-    ///  <item>Based upon a tailored ruleset.</item>
+    ///  <item><description>Based upon a system collator associated with a <see cref="System.Globalization.CultureInfo"/>.</description></item>
+    ///  <item><description>Based upon a tailored ruleset.</description></item>
     /// </list>
     /// </para>
     /// <para>
     /// Using a System collator:
     /// <list type="bullet">
-    ///  <item>language: ISO-639 language code (mandatory)</item>
-    ///  <item>country: ISO-3166 country code (optional)</item>
-    ///  <item>variant: vendor or browser-specific code (optional)</item>
-    ///  <item>strength: 'primary','secondary','tertiary', or 'identical' (optional)</item>
-    ///  <item>decomposition: 'no','canonical', or 'full' (optional)</item>
+    ///  <item><description>language: ISO-639 language code (mandatory)</description></item>
+    ///  <item><description>country: ISO-3166 country code (optional)</description></item>
+    ///  <item><description>variant: vendor or browser-specific code (optional)</description></item>
+    ///  <item><description>strength: 'primary','secondary','tertiary', or 'identical' (optional)</description></item>
+    ///  <item><description>decomposition: 'no','canonical', or 'full' (optional)</description></item>
     /// </list>
     /// </para>
     /// <para>
     /// Using a Tailored ruleset:
     /// <list type="bullet">
-    ///  <item>custom: UTF-8 text file containing rules supported by RuleBasedCollator (mandatory)</item>
-    ///  <item>strength: 'primary','secondary','tertiary', or 'identical' (optional)</item>
-    ///  <item>decomposition: 'no','canonical', or 'full' (optional)</item>
+    ///  <item><description>custom: UTF-8 text file containing rules supported by RuleBasedCollator (mandatory)</description></item>
+    ///  <item><description>strength: 'primary','secondary','tertiary', or 'identical' (optional)</description></item>
+    ///  <item><description>decomposition: 'no','canonical', or 'full' (optional)</description></item>
     /// </list>
     /// 
     /// <code>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs b/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
index c5db947..6dc81c9 100644
--- a/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/ParallelTaxonomyArrays.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Facet.Taxonomy
     /// <summary>
     /// Returns 3 arrays for traversing the taxonomy:
     /// <list type="bullet">
-    /// <item> <see cref="Parents"/>: <c>Parents[i]</c> denotes the parent of category
-    /// ordinal <c>i</c>.</item>
-    /// <item> <see cref="Children"/>: <c>Children[i]</c> denotes a child of category ordinal
-    /// <c>i</c>.</item>
-    /// <item> <see cref="Siblings"/>: <c>Siblings[i]</c> denotes the sibling of category
-    /// ordinal <c>i</c>.</item>
+    /// <item><description> <see cref="Parents"/>: <c>Parents[i]</c> denotes the parent of category
+    /// ordinal <c>i</c>.</description></item>
+    /// <item><description> <see cref="Children"/>: <c>Children[i]</c> denotes a child of category ordinal
+    /// <c>i</c>.</description></item>
+    /// <item><description> <see cref="Siblings"/>: <c>Siblings[i]</c> denotes the sibling of category
+    /// ordinal <c>i</c>.</description></item>
     /// </list>
     /// 
     /// To traverse the taxonomy tree, you typically start with <c>Children[0]</c>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs b/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
index cbe3742..ca4d6db 100644
--- a/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/TaxonomyReader.cs
@@ -31,15 +31,15 @@ namespace Lucene.Net.Facet.Taxonomy
     /// A TaxonomyReader holds a list of categories. Each category has a serial
     /// number which we call an "ordinal", and a hierarchical "path" name:
     /// <list type="bullet">
-    /// <item>
+    /// <item><description>
     /// The ordinal is an integer that starts at 0 for the first category (which is
     /// always the root category), and grows contiguously as more categories are
     /// added; Note that once a category is added, it can never be deleted.
-    /// </item>
-    /// <item>
+    /// </description></item>
+    /// <item><description>
     /// The path is a CategoryPath object specifying the category's position in the
     /// hierarchy.
-    /// </item>
+    /// </description></item>
     /// </list>
     /// </para>
     /// <b>Notes about concurrent access to the taxonomy:</b>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
index 05f8e92..c6845e9 100644
--- a/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/TokenSources.cs
@@ -155,35 +155,35 @@ namespace Lucene.Net.Search.Highlight
         /// <para/>
         /// In my tests the speeds to recreate 1000 token streams using this method are:
         /// <list type="bullet">
-        ///     <item>
+        ///     <item><description>
         ///     with TermVector offset only data stored - 420  milliseconds 
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     with TermVector offset AND position data stored - 271 milliseconds
         ///     (nb timings for TermVector with position data are based on a tokenizer with contiguous
         ///     positions - no overlaps or gaps)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     The cost of not using TermPositionVector to store
         ///     pre-parsed content and using an analyzer to re-parse the original content:
         ///     - reanalyzing the original content - 980 milliseconds
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// 
         /// The re-analyze timings will typically vary depending on -
         /// <list type="number">
-        ///     <item>
+        ///     <item><description>
         ///     The complexity of the analyzer code (timings above were using a
         ///     stemmer/lowercaser/stopword combo)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     The  number of other fields (Lucene reads ALL fields off the disk 
         ///     when accessing just one document field - can cost dear!)
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     Use of compression on field storage - could be faster due to compression (less disk IO)
         ///     or slower (more CPU burn) depending on the content.
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// </summary>
         /// <param name="tpv"></param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
index de0fd45..fc64a70 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PassageScorer.cs
@@ -43,9 +43,9 @@ namespace Lucene.Net.Search.PostingsHighlight
         /// <summary>
         /// Creates <see cref="PassageScorer"/> with these default values:
         /// <list type="bullet">
-        ///     <item><c>k1 = 1.2</c></item>
-        ///     <item><c>b = 0.75</c></item>
-        ///     <item><c>pivot = 87</c></item>
+        ///     <item><description><c>k1 = 1.2</c></description></item>
+        ///     <item><description><c>b = 0.75</c></description></item>
+        ///     <item><description><c>pivot = 87</c></description></item>
         /// </list>
         /// </summary>
         public PassageScorer()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
index 6211042..7562228 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
@@ -46,10 +46,10 @@ namespace Lucene.Net.Search.PostingsHighlight
     /// <para/>
     /// You can customize the behavior by subclassing this highlighter, some important hooks:
     /// <list type="bullet">
-    ///     <item><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</item>
-    ///     <item><see cref="GetScorer(string)"/>: Customize how passages are ranked.</item>
-    ///     <item><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</item>
-    ///     <item><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</item>
+    ///     <item><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</description></item>
+    ///     <item><see cref="GetScorer(string)"/>: Customize how passages are ranked.</description></item>
+    ///     <item><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</description></item>
+    ///     <item><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</description></item>
     /// </list>
     /// <para/>
     /// <b>WARNING</b>: The code is very new and probably still has some exciting bugs!

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs b/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
index 1ee6f1c..db9b8d1 100644
--- a/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
+++ b/src/Lucene.Net.Misc/Index/Sorter/BlockJoinComparatorSource.cs
@@ -28,9 +28,9 @@ namespace Lucene.Net.Index.Sorter
     /// Note that this class is intended to used with <see cref="SortingMergePolicy"/>,
     /// and for other purposes has some limitations:
     /// <list type="bullet">
-    ///    <item>Cannot yet be used with <see cref="IndexSearcher.SearchAfter(ScoreDoc, Query, Filter, int, Sort)">
-    ///    IndexSearcher.SearchAfter</see></item>
-    ///    <item>Filling sort field values is not yet supported.</item>
+    ///    <item><description>Cannot yet be used with <see cref="IndexSearcher.SearchAfter(ScoreDoc, Query, Filter, int, Sort)">
+    ///    IndexSearcher.SearchAfter</see></description></item>
+    ///    <item><description>Filling sort field values is not yet supported.</description></item>
     /// </list>
     /// @lucene.experimental
     /// </para>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Queries/CustomScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/CustomScoreQuery.cs b/src/Lucene.Net.Queries/CustomScoreQuery.cs
index 1ee6639..e997f3c 100644
--- a/src/Lucene.Net.Queries/CustomScoreQuery.cs
+++ b/src/Lucene.Net.Queries/CustomScoreQuery.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Queries
     /// <summary>
     /// Query that sets document score as a programmatic function of several (sub) scores:
     /// <list type="bullet">
-    ///    <item>the score of its subQuery (any query)</item>
-    ///    <item>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</item>
+    ///    <item>the score of its subQuery (any query)</description></item>
+    ///    <item>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</description></item>
     /// </list>
     /// Subclasses can modify the computation by overriding <see cref="GetCustomScoreProvider"/>.
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
index 16b2fe0..f9d0312 100644
--- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
+++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
@@ -86,11 +86,11 @@ namespace Lucene.Net.Queries.Mlt
     /// <para/>
     /// Thus you:
     /// <list type="bullet">
-    ///     <item>do your normal, Lucene setup for searching,</item>
-    ///     <item>create a MoreLikeThis,</item>
-    ///     <item>get the text of the doc you want to find similarities to</item>
-    ///     <item>then call one of the <see cref="Like"/> calls to generate a similarity query</item>
-    ///     <item>call the searcher to find the similar docs</item>
+    ///     <item><description>do your normal, Lucene setup for searching,</description></item>
+    ///     <item><description>create a MoreLikeThis,</description></item>
+    ///     <item><description>get the text of the doc you want to find similarities to</description></item>
+    ///     <item><description>then call one of the <see cref="Like"/> calls to generate a similarity query</description></item>
+    ///     <item><description>call the searcher to find the similar docs</description></item>
     /// </list>
     /// <para/>
     /// <b>More Advanced Usage</b>
@@ -103,15 +103,15 @@ namespace Lucene.Net.Queries.Mlt
     /// may want to call the other set methods to control how the similarity queries are
     /// generated:
     /// <list type="bullet">
-    ///     <item><see cref="MinTermFreq"/></item>
-    ///     <item><see cref="MinDocFreq"/></item>
-    ///     <item><see cref="MaxDocFreq"/></item>
-    ///     <item><see cref="SetMaxDocFreqPct(int)"/></item>
-    ///     <item><see cref="MinWordLen"/></item>
-    ///     <item><see cref="MaxWordLen"/></item>
-    ///     <item><see cref="MaxQueryTerms"/></item>
-    ///     <item><see cref="MaxNumTokensParsed"/></item>
-    ///     <item><see cref="StopWords"/></item>
+    ///     <item><description><see cref="MinTermFreq"/></description></item>
+    ///     <item><description><see cref="MinDocFreq"/></description></item>
+    ///     <item><description><see cref="MaxDocFreq"/></description></item>
+    ///     <item><description><see cref="SetMaxDocFreqPct(int)"/></description></item>
+    ///     <item><description><see cref="MinWordLen"/></description></item>
+    ///     <item><description><see cref="MaxWordLen"/></description></item>
+    ///     <item><description><see cref="MaxQueryTerms"/></description></item>
+    ///     <item><description><see cref="MaxNumTokensParsed"/></description></item>
+    ///     <item><description><see cref="StopWords"/></description></item>
     /// </list>
     /// </summary>
     /// <remarks>
@@ -650,12 +650,12 @@ namespace Lucene.Net.Queries.Mlt
         /// Each array has 6 elements.
         /// The elements are:
         /// <list type="bullet">
-        ///     <item>The word (<see cref="string"/>)</item>
-        ///     <item>The top field that this word comes from (<see cref="string"/>)</item>
-        ///     <item>The score for this word (<see cref="float"/>)</item>
-        ///     <item>The IDF value (<see cref="float"/>)</item>
-        ///     <item>The frequency of this word in the index (<see cref="int"/>)</item>
-        ///     <item>The frequency of this word in the source document (<see cref="int"/>)</item>
+        ///     <item><description>The word (<see cref="string"/>)</description></item>
+        ///     <item><description>The top field that this word comes from (<see cref="string"/>)</description></item>
+        ///     <item><description>The score for this word (<see cref="float"/>)</description></item>
+        ///     <item><description>The IDF value (<see cref="float"/>)</description></item>
+        ///     <item><description>The frequency of this word in the index (<see cref="int"/>)</description></item>
+        ///     <item><description>The frequency of this word in the source document (<see cref="int"/>)</description></item>
         /// </list>
         /// This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
         /// This method is exposed so that you can identify the "interesting words" in a document.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
index 070802a..3a35db8 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
@@ -34,19 +34,19 @@ namespace Lucene.Net.QueryParsers.Classic
     /// A Query is a series of clauses.
     /// A clause may be prefixed by:
     /// <list type="bullet">
-    /// <item> a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating
-    /// that the clause is required or prohibited respectively; or</item>
-    /// <item> a term followed by a colon, indicating the field to be searched.
-    /// This enables one to construct queries which search multiple fields.</item>
+    /// <item><description> a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating
+    /// that the clause is required or prohibited respectively; or</description></item>
+    /// <item><description> a term followed by a colon, indicating the field to be searched.
+    /// This enables one to construct queries which search multiple fields.</description></item>
     /// </list>
     /// 
     /// <para/>
     /// A clause may be either:
     /// <list type="bullet">
-    /// <item> a term, indicating all the documents that contain this term; or</item>
-    /// <item> a nested query, enclosed in parentheses.  Note that this may be used
+    /// <item><description> a term, indicating all the documents that contain this term; or</description></item>
+    /// <item><description> a nested query, enclosed in parentheses.  Note that this may be used
     /// with a <c>+</c>/<c>-</c> prefix to require any of a set of
-    /// terms.</item>
+    /// terms.</description></item>
     /// </list>
     /// 
     /// <para/>
@@ -95,7 +95,7 @@ namespace Lucene.Net.QueryParsers.Classic
     /// <b>NOTE</b>: You must specify the required <see cref="LuceneVersion" /> compatibility when
     /// creating QueryParser:
     /// <list type="bullet">
-    /// <item>As of 3.1, <see cref="QueryParserBase.AutoGeneratePhraseQueries"/> is false by default.</item>
+    /// <item><description>As of 3.1, <see cref="QueryParserBase.AutoGeneratePhraseQueries"/> is false by default.</description></item>
     /// </list>
     /// </summary>
     public class QueryParser : QueryParserBase

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs b/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
index 8bc7be4..f113c99 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Core/QueryParserHelper.cs
@@ -195,19 +195,19 @@ namespace Lucene.Net.QueryParsers.Flexible.Core
         /// In this method the three phases are executed:
         /// <para/>
         /// <list type="number">
-        ///     <item>
+        ///     <item><description>
         ///     the query string is parsed using the
         ///     text parser returned by <see cref="SyntaxParser"/>, the result is a query
         ///     node tree.
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     the query node tree is processed by the
         ///     processor returned by <see cref="QueryNodeProcessor"/>.
-        ///     </item>
-        ///     <item>
+        ///     </description></item>
+        ///     <item><description>
         ///     a object is built from the query node
         ///     tree using the builder returned by <see cref="QueryBuilder"/>.
-        ///     </item>
+        ///     </description></item>
         /// </list>
         /// </summary>
         /// <param name="query">the query string</param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
index 1940263..dedcd9f 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/StandardQueryParser.cs
@@ -54,25 +54,25 @@ namespace Lucene.Net.QueryParsers.Flexible.Standard
     /// <para/>
     /// A Query is a series of clauses. A clause may be prefixed by:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     a plus (<c>+</c>) or a minus (<c>-</c>) sign, indicating that
     ///     the clause is required or prohibited respectively; or
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     a term followed by a colon, indicating the field to be searched. This
     ///     enables one to construct queries which search multiple fields.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// 
     /// A clause may be either:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     a term, indicating all the documents that contain this term; or
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     a nested query, enclosed in parentheses. Note that this may be used with
     ///     a <c>+</c>/<c>-</c> prefix to require any of a set of terms.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// 
     /// Thus, in BNF, the query grammar is:

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs b/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
index 01af234..7438bdd 100644
--- a/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Simple/SimpleQueryParser.cs
@@ -69,14 +69,14 @@ namespace Lucene.Net.QueryParsers.Simple
     /// to decipher what it can; however, this may mean odd or unexpected results.
     /// <h4>Query Operators</h4>
     /// <list type="bullet">
-    ///  <item>'<c>+</c>' specifies <c>AND</c> operation: <c>token1+token2</c></item>
-    ///  <item>'<c>|</c>' specifies <c>OR</c> operation: <c>token1|token2</c></item>
-    ///  <item>'<c>-</c>' negates a single token: <c>-token0</c></item>
-    ///  <item>'<c>"</c>' creates phrases of terms: <c>"term1 term2 ..."</c></item>
-    ///  <item>'<c>*</c>' at the end of terms specifies prefix query: <c>term*</c></item>
-    ///  <item>'<c>~</c>N' at the end of terms specifies fuzzy query: <c>term~1</c></item>
-    ///  <item>'<c>~</c>N' at the end of phrases specifies near query: <c>"term1 term2"~5</c></item>
-    ///  <item>'<c>(</c>' and '<c>)</c>' specifies precedence: <c>token1 + (token2 | token3)</c></item>
+    ///  <item><description>'<c>+</c>' specifies <c>AND</c> operation: <c>token1+token2</c></description></item>
+    ///  <item><description>'<c>|</c>' specifies <c>OR</c> operation: <c>token1|token2</c></description></item>
+    ///  <item><description>'<c>-</c>' negates a single token: <c>-token0</c></description></item>
+    ///  <item><description>'<c>"</c>' creates phrases of terms: <c>"term1 term2 ..."</c></description></item>
+    ///  <item><description>'<c>*</c>' at the end of terms specifies prefix query: <c>term*</c></description></item>
+    ///  <item><description>'<c>~</c>N' at the end of terms specifies fuzzy query: <c>term~1</c></description></item>
+    ///  <item><description>'<c>~</c>N' at the end of phrases specifies near query: <c>"term1 term2"~5</c></description></item>
+    ///  <item><description>'<c>(</c>' and '<c>)</c>' specifies precedence: <c>token1 + (token2 | token3)</c></description></item>
     /// </list>
     /// <para/>
     /// The default operator is <c>OR</c> if no other operator is specified.
@@ -99,20 +99,20 @@ namespace Lucene.Net.QueryParsers.Simple
     /// beyond the first character do not need to be escaped.
     /// For example:
     /// <list type="bullet">
-    ///   <item><c>-term1</c>   -- Specifies <c>NOT</c> operation against <c>term1</c></item>
-    ///   <item><c>\-term1</c>  -- Searches for the term <c>-term1</c>.</item>
-    ///   <item><c>term-1</c>   -- Searches for the term <c>term-1</c>.</item>
-    ///   <item><c>term\-1</c>  -- Searches for the term <c>term-1</c>.</item>
+    ///   <item><description><c>-term1</c>   -- Specifies <c>NOT</c> operation against <c>term1</c></description></item>
+    ///   <item><description><c>\-term1</c>  -- Searches for the term <c>-term1</c>.</description></item>
+    ///   <item><description><c>term-1</c>   -- Searches for the term <c>term-1</c>.</description></item>
+    ///   <item><description><c>term\-1</c>  -- Searches for the term <c>term-1</c>.</description></item>
     /// </list>
     /// <para/>
     /// The '<c>*</c>' operator is a special case. On individual terms (not phrases) the last
     /// character of a term that is '<c>*</c>' must be escaped; however, any '<c>*</c>' characters
     /// before the last character do not need to be escaped:
     /// <list type="bullet">
-    ///   <item><c>term1*</c>  --  Searches for the prefix <c>term1</c></item>
-    ///   <item><c>term1\*</c> --  Searches for the term <c>term1*</c></item>
-    ///   <item><c>term*1</c>  --  Searches for the term <c>term*1</c></item>
-    ///   <item><c>term\*1</c> --  Searches for the term <c>term*1</c></item>
+    ///   <item><description><c>term1*</c>  --  Searches for the prefix <c>term1</c></description></item>
+    ///   <item><description><c>term1\*</c> --  Searches for the term <c>term1*</c></description></item>
+    ///   <item><description><c>term*1</c>  --  Searches for the term <c>term*1</c></description></item>
+    ///   <item><description><c>term\*1</c> --  Searches for the term <c>term*1</c></description></item>
     /// </list>
     /// <para/>
     /// Note that above examples consider the terms before text processing.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs b/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
index 7fba250..8fbc219 100644
--- a/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
+++ b/src/Lucene.Net.QueryParser/Surround/Query/SpanNearClauseFactory.cs
@@ -29,13 +29,13 @@ namespace Lucene.Net.QueryParsers.Surround.Query
     /// Operations:
     /// 
     /// <list type="bullet">
-    ///     <item>create for a field name and an indexreader.</item>
+    ///     <item><description>create for a field name and an indexreader.</description></item>
     /// 
-    ///     <item>add a weighted Term - this should add a corresponding SpanTermQuery, or increase the weight of an existing one.</item>
+    ///     <item><description>add a weighted Term - this should add a corresponding SpanTermQuery, or increase the weight of an existing one.</description></item>
     /// 
-    ///     <item>add a weighted subquery SpanNearQuery</item>
+    ///     <item><description>add a weighted subquery SpanNearQuery</description></item>
     /// 
-    ///     <item>create a clause for SpanNearQuery from the things added above.</item>
+    ///     <item><description>create a clause for SpanNearQuery from the things added above.</description></item>
     /// </list>
     /// <para/>
     /// For this, create an array of SpanQuery's from the added ones.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs b/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
index 78a27db..2069099 100644
--- a/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
+++ b/src/Lucene.Net.Sandbox/Queries/SortedSetSortField.cs
@@ -39,15 +39,15 @@ namespace Lucene.Net.Sandbox.Queries
     /// <para/>
     /// Limitations:
     /// <list type="bullet">
-    ///     <item>
+    ///     <item><description>
     ///     Fields containing <see cref="int.MaxValue"/> or more unique values
     ///     are unsupported.
-    ///     </item>
-    ///     <item>
+    ///     </description></item>
+    ///     <item><description>
     ///     Selectors other than the default <see cref="Selector.MIN"/> require 
     ///     optional codec support. However several codecs provided by Lucene,
     ///     including the current default codec, support this.
-    ///     </item>
+    ///     </description></item>
     /// </list>
     /// </summary>
     public class SortedSetSortField : SortField

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
index 61ed934..ac3d79e 100644
--- a/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
+++ b/src/Lucene.Net.Spatial/Prefix/PrefixTreeStrategy.cs
@@ -41,27 +41,27 @@ namespace Lucene.Net.Spatial.Prefix
     /// 
     /// <h4>Characteristics:</h4>
     /// <list type="bullet">
-    /// <item>Can index any shape; however only
+    /// <item><description>Can index any shape; however only
     /// <see cref="RecursivePrefixTreeStrategy">RecursivePrefixTreeStrategy</see>
-    /// can effectively search non-point shapes.</item>
-    /// <item>Can index a variable number of shapes per field value. This strategy
+    /// can effectively search non-point shapes.</description></item>
+    /// <item><description>Can index a variable number of shapes per field value. This strategy
     /// can do it via multiple calls to <see cref="CreateIndexableFields(IShape)"/>
     /// for a document or by giving it some sort of Shape aggregate (e.g. NTS
     /// WKT MultiPoint).  The shape's boundary is approximated to a grid precision.
-    /// </item>
-    /// <item>Can query with any shape.  The shape's boundary is approximated to a grid
-    /// precision.</item>
-    /// <item>Only <see cref="SpatialOperation.Intersects"/>
+    /// </description></item>
+    /// <item><description>Can query with any shape.  The shape's boundary is approximated to a grid
+    /// precision.</description></item>
+    /// <item><description>Only <see cref="SpatialOperation.Intersects"/>
     /// is supported.  If only points are indexed then this is effectively equivalent
-    /// to IsWithin.</item>
-    /// <item>The strategy supports <see cref="MakeDistanceValueSource(IPoint, double)"/>
+    /// to IsWithin.</description></item>
+    /// <item><description>The strategy supports <see cref="MakeDistanceValueSource(IPoint, double)"/>
     /// even for multi-valued data, so long as the indexed data is all points; the
     /// behavior is undefined otherwise.  However, <c>it will likely be removed in
     /// the future</c> in lieu of using another strategy with a more scalable
     /// implementation.  Use of this call is the only
     /// circumstance in which a cache is used.  The cache is simple but as such
     /// it doesn't scale to large numbers of points nor is it real-time-search
-    /// friendly.</item>
+    /// friendly.</description></item>
     /// </list>
     /// 
     /// <h4>Implementation:</h4>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/SpatialStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/SpatialStrategy.cs b/src/Lucene.Net.Spatial/SpatialStrategy.cs
index 348ca7e..bb39500 100644
--- a/src/Lucene.Net.Spatial/SpatialStrategy.cs
+++ b/src/Lucene.Net.Spatial/SpatialStrategy.cs
@@ -32,11 +32,11 @@ namespace Lucene.Net.Spatial
     /// Different implementations will support different features. A strategy should
     /// document these common elements:
     /// <list type="bullet">
-    ///     <item>Can it index more than one shape per field?</item>
-    ///     <item>What types of shapes can be indexed?</item>
-    ///     <item>What types of query shapes can be used?</item>
-    ///     <item>What types of query operations are supported? This might vary per shape.</item>
-    ///     <item>Does it use the <see cref="FieldCache"/>, or some other type of cache?  When?</item>
+    ///     <item><description>Can it index more than one shape per field?</description></item>
+    ///     <item><description>What types of shapes can be indexed?</description></item>
+    ///     <item><description>What types of query shapes can be used?</description></item>
+    ///     <item><description>What types of query operations are supported? This might vary per shape.</description></item>
+    ///     <item><description>Does it use the <see cref="FieldCache"/>, or some other type of cache?  When?</description></item>
     /// </list>
     /// If a strategy only supports certain shapes at index or query time, then in
     /// general it will throw an exception if given an incompatible one.  It will not

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs b/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
index e8a0e6a..28a0050 100644
--- a/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
+++ b/src/Lucene.Net.Spatial/Vector/PointVectorStrategy.cs
@@ -32,11 +32,11 @@ namespace Lucene.Net.Spatial.Vector
     /// 
     /// <h4>Characteristics:</h4>
     /// <list type="bullet">
-    ///     <item>Only indexes points; just one per field value.</item>
-    ///     <item>Can query by a rectangle or circle.</item>
-    ///     <item><see cref="SpatialOperation.Intersects"/> and <see cref="SpatialOperation.IsWithin"/> is supported.</item>
-    ///     <item>Uses the FieldCache for <see cref="SpatialStrategy.MakeDistanceValueSource(IPoint)"/> and for
-    ///     searching with a Circle.</item>
+    ///     <item><description>Only indexes points; just one per field value.</description></item>
+    ///     <item><description>Can query by a rectangle or circle.</description></item>
+    ///     <item><description><see cref="SpatialOperation.Intersects"/> and <see cref="SpatialOperation.IsWithin"/> is supported.</description></item>
+    ///     <item><description>Uses the FieldCache for <see cref="SpatialStrategy.MakeDistanceValueSource(IPoint)"/> and for
+    ///     searching with a Circle.</description></item>
     /// </list>
     /// 
     /// <h4>Implementation:</h4>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs b/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
index 5f8988b..4010f80 100644
--- a/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
+++ b/src/Lucene.Net.Suggest/Spell/LuceneLevenshteinDistance.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Search.Spell
     ///  
     ///  Notes:
     ///  <list type="bullet">
-    ///    <item> This metric treats full unicode codepoints as characters</item>
-    ///    <item> This metric scales raw edit distances into a floating point score
-    ///         based upon the shortest of the two terms</item>
-    ///    <item> Transpositions of two adjacent codepoints are treated as primitive 
-    ///         edits.</item>
-    ///    <item> Edits are applied in parallel: for example, "ab" and "bca" have 
-    ///         distance 3.</item>
+    ///    <item><description> This metric treats full unicode codepoints as characters</description></item>
+    ///    <item><description> This metric scales raw edit distances into a floating point score
+    ///         based upon the shortest of the two terms</description></item>
+    ///    <item><description> Transpositions of two adjacent codepoints are treated as primitive 
+    ///         edits.</description></item>
+    ///    <item><description> Edits are applied in parallel: for example, "ab" and "bca" have 
+    ///         distance 3.</description></item>
     ///  </list>
     ///  
     ///  NOTE: this class is not particularly efficient. It is only intended

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
index 8c58e5a..0f85629 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingSuggester.cs
@@ -65,21 +65,21 @@ namespace Lucene.Net.Search.Suggest.Analyzing
     /// There are some limitations:
     /// <list type="number">
     /// 
-    ///   <item> A lookup from a query like "net" in English won't
+    ///   <item><description> A lookup from a query like "net" in English won't
     ///        be any different than "net " (ie, user added a
     ///        trailing space) because analyzers don't reflect
     ///        when they've seen a token separator and when they
-    ///        haven't.</item>
+    ///        haven't.</description></item>
     /// 
-    ///   <item> If you're using <see cref="Analysis.Core.StopFilter"/>, and the user will
+    ///   <item><description> If you're using <see cref="Analysis.Core.StopFilter"/>, and the user will
     ///        type "fast apple", but so far all they've typed is
     ///        "fast a", again because the analyzer doesn't convey whether
     ///        it's seen a token separator after the "a",
     ///        <see cref="Analysis.Core.StopFilter"/> will remove that "a" causing
-    ///        far more matches than you'd expect.</item>
+    ///        far more matches than you'd expect.</description></item>
     /// 
-    ///   <item> Lookups with the empty string return no results
-    ///        instead of all results.</item>
+    ///   <item><description> Lookups with the empty string return no results
+    ///        instead of all results.</description></item>
     /// </list>
     /// 
     /// @lucene.experimental

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs b/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
index 4026e36..af9fabe 100644
--- a/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/DocumentDictionary.cs
@@ -31,19 +31,19 @@ namespace Lucene.Net.Search.Suggest
     /// </para>
     /// <b>NOTE:</b> 
     ///  <list type="bullet">
-    ///    <item>
+    ///    <item><description>
     ///      The term and (optionally) payload fields have to be
     ///      stored
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      The weight field can be stored or can be a <see cref="NumericDocValues"/>.
     ///      If the weight field is not defined, the value of the weight is <c>0</c>
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      if any of the term or (optionally) payload fields supplied
     ///      do not have a value for a document, then the document is 
     ///      skipped by the dictionary
-    ///    </item>
+    ///    </description></item>
     ///  </list>
     /// </summary>
     public class DocumentDictionary : IDictionary

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs b/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
index 401e46a..baf0e9f 100644
--- a/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/DocumentValueSourceDictionary.cs
@@ -33,15 +33,15 @@ namespace Lucene.Net.Search.Suggest
     /// </para>
     /// <b>NOTE:</b> 
     ///  <list type="bullet">
-    ///    <item>
+    ///    <item><description>
     ///      The term and (optionally) payload fields have to be
     ///      stored
-    ///    </item>
-    ///    <item>
+    ///    </description></item>
+    ///    <item><description>
     ///      if the term or (optionally) payload fields supplied
     ///      do not have a value for a document, then the document is 
     ///      rejected by the dictionary
-    ///    </item>
+    ///    </description></item>
     ///  </list>
     ///  <para>
     ///  In practice the <see cref="ValueSource"/> will likely be obtained

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7099a846/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
index e6f48da..35d6ab0 100644
--- a/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
+++ b/src/Lucene.Net.Suggest/Suggest/FileDictionary.cs
@@ -30,20 +30,20 @@ namespace Lucene.Net.Search.Suggest
     /// <para>Format allowed: 1 entry per line:</para>
     /// <para>An entry can be: </para>
     /// <list type="number">
-    /// <item>suggestion</item>
-    /// <item>suggestion <see cref="fieldDelimiter"/> weight</item>
-    /// <item>suggestion <see cref="fieldDelimiter"/> weight <see cref="fieldDelimiter"/> payload</item>
+    /// <item><description>suggestion</description></item>
+    /// <item><description>suggestion <see cref="fieldDelimiter"/> weight</description></item>
+    /// <item><description>suggestion <see cref="fieldDelimiter"/> weight <see cref="fieldDelimiter"/> payload</description></item>
     /// </list>
     /// where the default <see cref="fieldDelimiter"/> is <see cref="DEFAULT_FIELD_DELIMITER"/> (a tab)
     /// <para>
     /// <b>NOTE:</b> 
     /// <list type="number">
-    /// <item>In order to have payload enabled, the first entry has to have a payload</item>
-    /// <item>If the weight for an entry is not specified then a value of 1 is used</item>
-    /// <item>A payload cannot be specified without having the weight specified for an entry</item>
-    /// <item>If the payload for an entry is not specified (assuming payload is enabled) 
-    ///  then an empty payload is returned</item>
-    /// <item>An entry cannot have more than two <see cref="fieldDelimiter"/>s</item>
+    /// <item><description>In order to have payload enabled, the first entry has to have a payload</description></item>
+    /// <item><description>If the weight for an entry is not specified then a value of 1 is used</description></item>
+    /// <item><description>A payload cannot be specified without having the weight specified for an entry</description></item>
+    /// <item><description>If the payload for an entry is not specified (assuming payload is enabled) 
+    ///  then an empty payload is returned</description></item>
+    /// <item><description>An entry cannot have more than two <see cref="fieldDelimiter"/>s</description></item>
     /// </list>
     /// </para>
     /// <c>Example:</c><para/>