You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/06 00:11:34 UTC

[01/48] lucenenet git commit: Lucene.Net.Search.Similarities: Fixed up documentation comments

Repository: lucenenet
Updated Branches:
  refs/heads/master 1197b1aed -> 666de32b0


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/Similarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/Similarity.cs b/src/Lucene.Net/Search/Similarities/Similarity.cs
index 95b3c01..44cd6e0 100644
--- a/src/Lucene.Net/Search/Similarities/Similarity.cs
+++ b/src/Lucene.Net/Search/Similarities/Similarity.cs
@@ -30,71 +30,72 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Similarity defines the components of Lucene scoring.
-    /// <p>
+    /// <para/>
     /// Expert: Scoring API.
-    /// <p>
-    /// this is a low-level API, you should only extend this API if you want to implement
+    /// <para/>
+    /// This is a low-level API, you should only extend this API if you want to implement
     /// an information retrieval <i>model</i>.  If you are instead looking for a convenient way
     /// to alter Lucene's scoring, consider extending a higher-level implementation
-    /// such as <seealso cref="TFIDFSimilarity"/>, which implements the vector space model with this API, or
-    /// just tweaking the default implementation: <seealso cref="DefaultSimilarity"/>.
-    /// <p>
+    /// such as <see cref="TFIDFSimilarity"/>, which implements the vector space model with this API, or
+    /// just tweaking the default implementation: <see cref="DefaultSimilarity"/>.
+    /// <para/>
     /// Similarity determines how Lucene weights terms, and Lucene interacts with
     /// this class at both <a href="#indextime">index-time</a> and
     /// <a href="#querytime">query-time</a>.
-    /// <p>
+    /// <para/>
     /// <a name="indextime"/>
-    /// At indexing time, the indexer calls <seealso cref="#computeNorm(FieldInvertState)"/>, allowing
-    /// the Similarity implementation to set a per-document value for the field that will
-    /// be later accessible via <seealso cref="AtomicReader#getNormValues(String)"/>.  Lucene makes no assumption
+    /// At indexing time, the indexer calls <see cref="ComputeNorm(FieldInvertState)"/>, allowing
+    /// the <see cref="Similarity"/> implementation to set a per-document value for the field that will
+    /// be later accessible via <see cref="Index.AtomicReader.GetNormValues(string)"/>.  Lucene makes no assumption
     /// about what is in this norm, but it is most useful for encoding length normalization
     /// information.
-    /// <p>
+    /// <para/>
     /// Implementations should carefully consider how the normalization is encoded: while
-    /// Lucene's classical <seealso cref="TFIDFSimilarity"/> encodes a combination of index-time boost
-    /// and length normalization information with <seealso cref="SmallFloat"/> into a single byte, this
+    /// Lucene's classical <see cref="TFIDFSimilarity"/> encodes a combination of index-time boost
+    /// and length normalization information with <see cref="Util.SmallSingle"/> into a single byte, this
     /// might not be suitable for all purposes.
-    /// <p>
+    /// <para/>
     /// Many formulas require the use of average document length, which can be computed via a
-    /// combination of <seealso cref="CollectionStatistics#sumTotalTermFreq()"/> and
-    /// <seealso cref="CollectionStatistics#maxDoc()"/> or <seealso cref="CollectionStatistics#docCount()"/>,
+    /// combination of <see cref="CollectionStatistics.SumTotalTermFreq"/> and
+    /// <see cref="CollectionStatistics.MaxDoc"/> or <see cref="CollectionStatistics.DocCount"/>,
     /// depending upon whether the average should reflect field sparsity.
-    /// <p>
+    /// <para/>
     /// Additional scoring factors can be stored in named
-    /// <code>NumericDocValuesField</code>s and accessed
-    /// at query-time with <seealso cref="AtomicReader#getNumericDocValues(String)"/>.
-    /// <p>
+    /// <see cref="Documents.NumericDocValuesField"/>s and accessed
+    /// at query-time with <see cref="Index.AtomicReader.GetNumericDocValues(string)"/>.
+    /// <para/>
     /// Finally, using index-time boosts (either via folding into the normalization byte or
-    /// via DocValues), is an inefficient way to boost the scores of different fields if the
+    /// via <see cref="Index.DocValues"/>), is an inefficient way to boost the scores of different fields if the
     /// boost will be the same for every document, instead the Similarity can simply take a constant
-    /// boost parameter <i>C</i>, and <seealso cref="PerFieldSimilarityWrapper"/> can return different
+    /// boost parameter <i>C</i>, and <see cref="PerFieldSimilarityWrapper"/> can return different
     /// instances with different boosts depending upon field name.
-    /// <p>
+    /// <para/>
     /// <a name="querytime"/>
     /// At query-time, Queries interact with the Similarity via these steps:
-    /// <ol>
-    ///   <li>The <seealso cref="#computeWeight(float, CollectionStatistics, TermStatistics...)"/> method is called a single time,
+    /// <list type="number">
+    ///   <item><description>The <see cref="ComputeWeight(float, CollectionStatistics, TermStatistics[])"/> method is called a single time,
     ///       allowing the implementation to compute any statistics (such as IDF, average document length, etc)
-    ///       across <i>the entire collection</i>. The <seealso cref="TermStatistics"/> and <seealso cref="CollectionStatistics"/> passed in
-    ///       already contain all of the raw statistics involved, so a Similarity can freely use any combination
+    ///       across <i>the entire collection</i>. The <see cref="TermStatistics"/> and <see cref="CollectionStatistics"/> passed in
+    ///       already contain all of the raw statistics involved, so a <see cref="Similarity"/> can freely use any combination
     ///       of statistics without causing any additional I/O. Lucene makes no assumption about what is
-    ///       stored in the returned <seealso cref="Similarity.SimWeight"/> object.
-    ///   <li>The query normalization process occurs a single time: <seealso cref="Similarity.SimWeight#getValueForNormalization()"/>
-    ///       is called for each query leaf node, <seealso cref="Similarity#queryNorm(float)"/> is called for the top-level
-    ///       query, and finally <seealso cref="Similarity.SimWeight#normalize(float, float)"/> passes down the normalization value
-    ///       and any top-level boosts (e.g. from enclosing <seealso cref="BooleanQuery"/>s).
-    ///   <li>For each segment in the index, the Query creates a <seealso cref="#simScorer(SimWeight, AtomicReaderContext)"/>
-    ///       The score() method is called for each matching document.
-    /// </ol>
-    /// <p>
+    ///       stored in the returned <see cref="Similarity.SimWeight"/> object.</description></item>
+    ///   <item><description>The query normalization process occurs a single time: <see cref="Similarity.SimWeight.GetValueForNormalization()"/>
+    ///       is called for each query leaf node, <see cref="Similarity.QueryNorm(float)"/> is called for the top-level
+    ///       query, and finally <see cref="Similarity.SimWeight.Normalize(float, float)"/> passes down the normalization value
+    ///       and any top-level boosts (e.g. from enclosing <see cref="BooleanQuery"/>s).</description></item>
+    ///   <item><description>For each segment in the index, the <see cref="Query"/> creates a <see cref="GetSimScorer(SimWeight, AtomicReaderContext)"/>
+    ///       The GetScore() method is called for each matching document.</description></item>
+    /// </list>
+    /// <para/>
     /// <a name="explaintime"/>
-    /// When <seealso cref="IndexSearcher#explain(Lucene.Net.Search.Query, int)"/> is called, queries consult the Similarity's DocScorer for an
+    /// When <see cref="IndexSearcher.Explain(Lucene.Net.Search.Query, int)"/> is called, queries consult the Similarity's DocScorer for an
     /// explanation of how it computed its score. The query passes in a the document id and an explanation of how the frequency
     /// was computed.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene.Net.Index.IndexWriterConfig#setSimilarity(Similarity) </seealso>
-    /// <seealso cref= IndexSearcher#setSimilarity(Similarity)
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene.Net.Index.IndexWriterConfig.Similarity"/>
+    /// <seealso cref="IndexSearcher.Similarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -110,10 +111,10 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Hook to integrate coordinate-level matching.
-        /// <p>
-        /// By default this is disabled (returns <code>1</code>), as with
+        /// <para/>
+        /// By default this is disabled (returns <c>1</c>), as with
         /// most modern models this will only skew performance, but some
-        /// implementations such as <seealso cref="TFIDFSimilarity"/> override this.
+        /// implementations such as <see cref="TFIDFSimilarity"/> override this.
         /// </summary>
         /// <param name="overlap"> the number of query terms matched in the document </param>
         /// <param name="maxOverlap"> the total number of terms in the query </param>
@@ -125,14 +126,14 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Computes the normalization value for a query given the sum of the
-        /// normalized weights <seealso cref="SimWeight#getValueForNormalization()"/> of
+        /// normalized weights <see cref="SimWeight.GetValueForNormalization()"/> of
         /// each of the query terms.  this value is passed back to the
-        /// weight (<seealso cref="SimWeight#normalize(float, float)"/> of each query
+        /// weight (<see cref="SimWeight.Normalize(float, float)"/> of each query
         /// term, to provide a hook to attempt to make scores from different
         /// queries comparable.
-        /// <p>
-        /// By default this is disabled (returns <code>1</code>), but some
-        /// implementations such as <seealso cref="TFIDFSimilarity"/> override this.
+        /// <para/>
+        /// By default this is disabled (returns <c>1</c>), but some
+        /// implementations such as <see cref="TFIDFSimilarity"/> override this.
         /// </summary>
         /// <param name="valueForNormalization"> the sum of the term normalization values </param>
         /// <returns> a normalization factor for query weights </returns>
@@ -143,12 +144,12 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Computes the normalization value for a field, given the accumulated
-        /// state of term processing for this field (see <seealso cref="FieldInvertState"/>).
-        ///
-        /// <p>Matches in longer fields are less precise, so implementations of this
-        /// method usually set smaller values when <code>state.getLength()</code> is large,
-        /// and larger values when <code>state.getLength()</code> is small.
+        /// state of term processing for this field (see <see cref="FieldInvertState"/>).
         ///
+        /// <para/>Matches in longer fields are less precise, so implementations of this
+        /// method usually set smaller values when <c>state.Length</c> is large,
+        /// and larger values when <code>state.Length</code> is small.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         /// <param name="state"> current processing state for this field </param>
@@ -161,24 +162,24 @@ namespace Lucene.Net.Search.Similarities
         /// <param name="queryBoost"> the query-time boost. </param>
         /// <param name="collectionStats"> collection-level statistics, such as the number of tokens in the collection. </param>
         /// <param name="termStats"> term-level statistics, such as the document frequency of a term across the collection. </param>
-        /// <returns> SimWeight object with the information this Similarity needs to score a query. </returns>
+        /// <returns> <see cref="SimWeight"/> object with the information this <see cref="Similarity"/> needs to score a query. </returns>
         public abstract SimWeight ComputeWeight(float queryBoost, CollectionStatistics collectionStats, params TermStatistics[] termStats);
 
         /// <summary>
-        /// Creates a new <seealso cref="Similarity.SimScorer"/> to score matching documents from a segment of the inverted index. </summary>
-        /// <param name="weight"> collection information from <seealso cref="#computeWeight(float, CollectionStatistics, TermStatistics...)"/> </param>
+        /// Creates a new <see cref="Similarity.SimScorer"/> to score matching documents from a segment of the inverted index. </summary>
+        /// <param name="weight"> collection information from <see cref="ComputeWeight(float, CollectionStatistics, TermStatistics[])"/> </param>
         /// <param name="context"> segment of the inverted index to be scored. </param>
-        /// <returns> SloppySimScorer for scoring documents across <code>context</code> </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <returns> Sloppy <see cref="SimScorer"/> for scoring documents across <c>context</c> </returns>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public abstract SimScorer GetSimScorer(SimWeight weight, AtomicReaderContext context);
 
         /// <summary>
-        /// API for scoring "sloppy" queries such as <seealso cref="TermQuery"/>,
-        /// <seealso cref="SpanQuery"/>, and <seealso cref="PhraseQuery"/>.
-        /// <p>
+        /// API for scoring "sloppy" queries such as <see cref="TermQuery"/>,
+        /// <see cref="Spans.SpanQuery"/>, and <see cref="PhraseQuery"/>.
+        /// <para/>
         /// Frequencies are floating-point values: an approximate
         /// within-document frequency adjusted for "sloppiness" by
-        /// <seealso cref="SimScorer#computeSlopFactor(int)"/>.
+        /// <see cref="SimScorer.ComputeSlopFactor(int)"/>.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -223,8 +224,8 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Stores the weight for a query across the indexed collection. this abstract
-        /// implementation is empty; descendants of {@code Similarity} should
-        /// subclass {@code SimWeight} and define the statistics they require in the
+        /// implementation is empty; descendants of <see cref="Similarity"/> should
+        /// subclass <see cref="SimWeight"/> and define the statistics they require in the
         /// subclass. Examples include idf, average field length, etc.
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -242,8 +243,8 @@ namespace Lucene.Net.Search.Similarities
 
             /// <summary>
             /// The value for normalization of contained query clauses (e.g. sum of squared weights).
-            /// <p>
-            /// NOTE: a Similarity implementation might not use any query normalization at all,
+            /// <para/>
+            /// NOTE: a <see cref="Similarity"/> implementation might not use any query normalization at all,
             /// its not required. However, if it wants to participate in query normalization,
             /// it can return a value here.
             /// </summary>
@@ -251,10 +252,10 @@ namespace Lucene.Net.Search.Similarities
 
             /// <summary>
             /// Assigns the query normalization factor and boost from parent queries to this.
-            /// <p>
-            /// NOTE: a Similarity implementation might not use this normalized value at all,
+            /// <para/>
+            /// NOTE: a <see cref="Similarity"/> implementation might not use this normalized value at all,
             /// its not required. However, its usually a good idea to at least incorporate
-            /// the topLevelBoost (e.g. from an outer BooleanQuery) into its score.
+            /// the <paramref name="topLevelBoost"/> (e.g. from an outer <see cref="BooleanQuery"/>) into its score.
             /// </summary>
             public abstract void Normalize(float queryNorm, float topLevelBoost);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/SimilarityBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/SimilarityBase.cs b/src/Lucene.Net/Search/Similarities/SimilarityBase.cs
index 62288a5..744db3f 100644
--- a/src/Lucene.Net/Search/Similarities/SimilarityBase.cs
+++ b/src/Lucene.Net/Search/Similarities/SimilarityBase.cs
@@ -26,18 +26,19 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// A subclass of {@code Similarity} that provides a simplified API for its
-    /// descendants. Subclasses are only required to implement the <seealso cref="#score"/>
-    /// and <seealso cref="#toString()"/> methods. Implementing
-    /// <seealso cref="#explain(Explanation, BasicStats, int, float, float)"/> is optional,
-    /// inasmuch as SimilarityBase already provides a basic explanation of the score
+    /// A subclass of <see cref="Similarity"/> that provides a simplified API for its
+    /// descendants. Subclasses are only required to implement the <see cref="Score(BasicStats, float, float)"/>
+    /// and <see cref="ToString()"/> methods. Implementing
+    /// <see cref="Explain(Explanation, BasicStats, int, float, float)"/> is optional,
+    /// inasmuch as <see cref="SimilarityBase"/> already provides a basic explanation of the score
     /// and the term frequency. However, implementers of a subclass are encouraged to
     /// include as much detail about the scoring method as possible.
-    /// <p>
+    /// <para/>
     /// Note: multi-word queries such as phrase queries are scored in a different way
     /// than Lucene's default ranking algorithm: whereas it "fakes" an IDF value for
     /// the phrase as a whole (since it does not know it), this class instead scores
     /// phrases as a summation of the individual term scores.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -46,7 +47,7 @@ namespace Lucene.Net.Search.Similarities
     public abstract class SimilarityBase : Similarity
     {
         /// <summary>
-        /// For <seealso cref="#log2(double)"/>. Precomputed for efficiency reasons. </summary>
+        /// For <see cref="Log2(double)"/>. Precomputed for efficiency reasons. </summary>
         private static readonly double LOG_2 = Math.Log(2);
 
         /// <summary>
@@ -65,13 +66,13 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Determines whether overlap tokens (Tokens with
-        ///  0 position increment) are ignored when computing
-        ///  norm.  By default this is true, meaning overlap
-        ///  tokens do not count when computing norms.
-        ///
-        ///  @lucene.experimental
+        /// 0 position increment) are ignored when computing
+        /// norm.  By default this is <c>true</c>, meaning overlap
+        /// tokens do not count when computing norms.
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
-        ///  <seealso cref= #computeNorm </seealso>
+        /// <seealso cref="ComputeNorm(FieldInvertState)"/>
         public virtual bool DiscountOverlaps
         {
             set
@@ -103,8 +104,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Fills all member fields defined in {@code BasicStats} in {@code stats}.
-        ///  Subclasses can override this method to fill additional stats.
+        /// Fills all member fields defined in <see cref="BasicStats"/> in <paramref name="stats"/>.
+        /// Subclasses can override this method to fill additional stats.
         /// </summary>
         protected internal virtual void FillBasicStats(BasicStats stats, CollectionStatistics collectionStats, TermStatistics termStats)
         {
@@ -150,8 +151,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Scores the document {@code doc}.
-        /// <p>Subclasses must apply their scoring formula in this class.</p> </summary>
+        /// Scores the document <c>doc</c>.
+        /// <para>Subclasses must apply their scoring formula in this class.</para> </summary>
         /// <param name="stats"> the corpus level statistics. </param>
         /// <param name="freq"> the term frequency. </param>
         /// <param name="docLen"> the document length. </param>
@@ -159,11 +160,11 @@ namespace Lucene.Net.Search.Similarities
         public abstract float Score(BasicStats stats, float freq, float docLen);
 
         /// <summary>
-        /// Subclasses should implement this method to explain the score. {@code expl}
+        /// Subclasses should implement this method to explain the score. <paramref name="expl"/>
         /// already contains the score, the name of the class and the doc id, as well
         /// as the term frequency and its explanation; subclasses can add additional
         /// clauses to explain details of their scoring formulae.
-        /// <p>The default implementation does nothing.</p>
+        /// <para>The default implementation does nothing.</para>
         /// </summary>
         /// <param name="expl"> the explanation to extend with details. </param>
         /// <param name="stats"> the corpus level statistics. </param>
@@ -176,12 +177,12 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Explains the score. The implementation here provides a basic explanation
-        /// in the format <em>score(name-of-similarity, doc=doc-id,
+        /// in the format <em>Score(name-of-similarity, doc=doc-id,
         /// freq=term-frequency), computed from:</em>, and
-        /// attaches the score (computed via the <seealso cref="#score(BasicStats, float, float)"/>
+        /// attaches the score (computed via the <see cref="Score(BasicStats, float, float)"/>
         /// method) and the explanation for the term frequency. Subclasses content with
         /// this format may add additional details in
-        /// <seealso cref="#explain(Explanation, BasicStats, int, float, float)"/>.
+        /// <see cref="Explain(Explanation, BasicStats, int, float, float)"/>.
         /// </summary>
         /// <param name="stats"> the corpus level statistics. </param>
         /// <param name="doc"> the document id. </param>
@@ -223,7 +224,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Subclasses must override this method to return the name of the Similarity
+        /// Subclasses must override this method to return the name of the <see cref="Similarity"/>
         /// and preferably the values of parameters (if any) as well.
         /// </summary>
         public override abstract string ToString();
@@ -244,7 +245,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Encodes the document length in the same way as <seealso cref="TFIDFSimilarity"/>. </summary>
+        /// Encodes the document length in the same way as <see cref="TFIDFSimilarity"/>. </summary>
         public override long ComputeNorm(FieldInvertState state)
         {
             float numTerms;
@@ -261,14 +262,14 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Decodes a normalization factor (document length) stored in an index. </summary>
-        /// <seealso cref= #encodeNormValue(float,float) </seealso>
+        /// <see cref="EncodeNormValue(float,float)"/>
         protected internal virtual float DecodeNormValue(byte norm)
         {
             return NORM_TABLE[norm & 0xFF]; // & 0xFF maps negative bytes to positive above 127
         }
 
         /// <summary>
-        /// Encodes the length to a byte via SmallFloat. </summary>
+        /// Encodes the length to a byte via <see cref="SmallSingle"/>. </summary>
         protected internal virtual byte EncodeNormValue(float boost, float length)
         {
             return SmallSingle.SingleToByte315((boost / (float)Math.Sqrt(length)));
@@ -277,7 +278,7 @@ namespace Lucene.Net.Search.Similarities
         // ----------------------------- Static methods ------------------------------
 
         /// <summary>
-        /// Returns the base two logarithm of {@code x}. </summary>
+        /// Returns the base two logarithm of <c>x</c>. </summary>
         public static double Log2(double x)
         {
             // Put this to a 'util' class if we need more of these.
@@ -287,10 +288,10 @@ namespace Lucene.Net.Search.Similarities
         // --------------------------------- Classes ---------------------------------
 
         /// <summary>
-        /// Delegates the <seealso cref="#score(int, float)"/> and
-        /// <seealso cref="#explain(int, Explanation)"/> methods to
-        /// <seealso cref="SimilarityBase#score(BasicStats, float, float)"/> and
-        /// <seealso cref="SimilarityBase#explain(BasicStats, int, Explanation, float)"/>,
+        /// Delegates the <see cref="Score(int, float)"/> and
+        /// <see cref="Explain(int, Explanation)"/> methods to
+        /// <see cref="SimilarityBase.Score(BasicStats, float, float)"/> and
+        /// <see cref="SimilarityBase.Explain(BasicStats, int, Explanation, float)"/>,
         /// respectively.
         /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/TFIDFSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/TFIDFSimilarity.cs b/src/Lucene.Net/Search/Similarities/TFIDFSimilarity.cs
index 2b89112..ec8a0b8 100644
--- a/src/Lucene.Net/Search/Similarities/TFIDFSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/TFIDFSimilarity.cs
@@ -26,25 +26,25 @@ namespace Lucene.Net.Search.Similarities
     using NumericDocValues = Lucene.Net.Index.NumericDocValues;
 
     /// <summary>
-    /// Implementation of <seealso cref="Similarity"/> with the Vector Space Model.
-    /// <p>
+    /// Implementation of <see cref="Similarity"/> with the Vector Space Model.
+    /// <para/>
     /// Expert: Scoring API.
-    /// <p>TFIDFSimilarity defines the components of Lucene scoring.
+    /// <para/>TFIDFSimilarity defines the components of Lucene scoring.
     /// Overriding computation of these components is a convenient
     /// way to alter Lucene scoring.
     ///
-    /// <p>Suggested reading:
+    /// <para/>Suggested reading:
     /// <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html">
     /// Introduction To Information Retrieval, Chapter 6</a>.
     ///
-    /// <p>The following describes how Lucene scoring evolves from
+    /// <para/>The following describes how Lucene scoring evolves from
     /// underlying information retrieval models to (efficient) implementation.
     /// We first brief on <i>VSM Score</i>,
     /// then derive from it <i>Lucene's Conceptual Scoring Formula</i>,
     /// from which, finally, evolves <i>Lucene's Practical Scoring Function</i>
     /// (the latter is connected directly with Lucene classes and methods).
     ///
-    /// <p>Lucene combines
+    /// <para/>Lucene combines
     /// <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model">
     /// Boolean model (BM) of Information Retrieval</a>
     /// with
@@ -52,13 +52,13 @@ namespace Lucene.Net.Search.Similarities
     /// Vector Space Model (VSM) of Information Retrieval</a> -
     /// documents "approved" by BM are scored by VSM.
     ///
-    /// <p>In VSM, documents and queries are represented as
+    /// <para/>In VSM, documents and queries are represented as
     /// weighted vectors in a multi-dimensional space,
     /// where each distinct index term is a dimension,
     /// and weights are
     /// <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values.
     ///
-    /// <p>VSM does not require weights to be <i>Tf-idf</i> values,
+    /// <para/>VSM does not require weights to be <i>Tf-idf</i> values,
     /// but <i>Tf-idf</i> values are believed to produce search results of high quality,
     /// and so Lucene is using <i>Tf-idf</i>.
     /// <i>Tf</i> and <i>Idf</i> are described in more detail below,
@@ -69,53 +69,48 @@ namespace Lucene.Net.Search.Similarities
     /// <i>idf(t)</i> similarly varies with the inverse of the
     /// number of index documents containing term <i>t</i>.
     ///
-    /// <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the
+    /// <para/><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the
     /// <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
     /// Cosine Similarity</a>
     /// of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>:
-    ///
-    ///  <br>&nbsp;<br>
-    ///  <table cellpadding="2" cellspacing="2" border="0" align="center" style="width:auto">
-    ///    <tr><td>
-    ///    <table cellpadding="1" cellspacing="0" border="1" align="center">
-    ///      <tr><td>
-    ///      <table cellpadding="2" cellspacing="2" border="0" align="center">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            cosine-similarity(q,d) &nbsp; = &nbsp;
-    ///          </td>
-    ///          <td valign="middle" align="center">
-    ///            <table>
-    ///               <tr><td align="center" style="text-align: center"><small>V(q)&nbsp;&middot;&nbsp;V(d)</small></td></tr>
-    ///               <tr><td align="center" style="text-align: center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
-    ///               <tr><td align="center" style="text-align: center"><small>|V(q)|&nbsp;|V(d)|</small></td></tr>
-    ///            </table>
-    ///          </td>
-    ///        </tr>
-    ///      </table>
-    ///      </td></tr>
-    ///    </table>
-    ///    </td></tr>
-    ///    <tr><td>
-    ///    <center><font size=-1><u>VSM Score</u></font></center>
-    ///    </td></tr>
-    ///  </table>
-    ///  <br>&nbsp;<br>
-    ///
-    ///
-    /// Where <i>V(q)</i> &middot; <i>V(d)</i> is the
+    /// <para/>
+    /// <list type="table">
+    ///     <item>
+    ///         <term>
+    ///             <list type="table">
+    ///                 <item>
+    ///                     <term>cosine-similarity(q,d) &#160; = &#160;</term>
+    ///                     <term>
+    ///                         <table>
+    ///                             <item><term><small>V(q)&#160;&#183;&#160;V(d)</small></term></item>
+    ///                             <item><term>&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;</term></item>
+    ///                             <item><term><small>|V(q)|&#160;|V(d)|</small></term></item>
+    ///                         </table>
+    ///                     </term>
+    ///                 </item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term>VSM Score</term>
+    ///     </item>
+    /// </list>
+    /// <para/>
+    /// 
+    ///
+    /// Where <i>V(q)</i> &#183; <i>V(d)</i> is the
     /// <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a>
     /// of the weighted vectors,
     /// and <i>|V(q)|</i> and <i>|V(d)|</i> are their
     /// <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>.
     ///
-    /// <p>Note: the above equation can be viewed as the dot product of
+    /// <para/>Note: the above equation can be viewed as the dot product of
     /// the normalized weighted vectors, in the sense that dividing
     /// <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector.
     ///
-    /// <p>Lucene refines <i>VSM score</i> for both search quality and usability:
-    /// <ul>
-    ///  <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that
+    /// <para/>Lucene refines <i>VSM score</i> for both search quality and usability:
+    /// <list type="bullet">
+    ///  <item><description>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that
     ///  it removes all document length information.
     ///  For some documents removing this info is probably ok,
     ///  e.g. a document made by duplicating a certain paragraph <i>10</i> times,
@@ -125,89 +120,88 @@ namespace Lucene.Net.Search.Similarities
     ///  To avoid this problem, a different document length normalization
     ///  factor is used, which normalizes to a vector equal to or larger
     ///  than the unit vector: <i>doc-len-norm(d)</i>.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>At indexing, users can specify that certain documents are more
+    ///  <item><description>At indexing, users can specify that certain documents are more
     ///  important than others, by assigning a document boost.
     ///  For this, the score of each document is also multiplied by its boost value
     ///  <i>doc-boost(d)</i>.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>Lucene is field based, hence each query term applies to a single
+    ///  <item><description>Lucene is field based, hence each query term applies to a single
     ///  field, document length normalization is by the length of the certain field,
     ///  and in addition to document boost there are also document fields boosts.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>The same field can be added to a document during indexing several times,
+    ///  <item><description>The same field can be added to a document during indexing several times,
     ///  and so the boost of that field is the multiplication of the boosts of
     ///  the separate additions (or parts) of that field within the document.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>At search time users can specify boosts to each query, sub-query, and
+    ///  <item><description>At search time users can specify boosts to each query, sub-query, and
     ///  each query term, hence the contribution of a query term to the score of
     ///  a document is multiplied by the boost of that query term <i>query-boost(q)</i>.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>A document may match a multi term query without containing all
+    ///  <item><description>A document may match a multi term query without containing all
     ///  the terms of that query (this is correct for some of the queries),
     ///  and users can further reward documents matching more query terms
     ///  through a coordination factor, which is usually larger when
     ///  more terms are matched: <i>coord-factor(q,d)</i>.
-    ///  </li>
-    /// </ul>
+    ///  </description></item>
+    /// </list>
     ///
-    /// <p>Under the simplifying assumption of a single field in the index,
+    /// <para/>Under the simplifying assumption of a single field in the index,
     /// we get <i>Lucene's Conceptual scoring formula</i>:
-    ///
-    ///  <br>&nbsp;<br>
-    ///  <table cellpadding="2" cellspacing="2" border="0" align="center" style="width:auto">
-    ///    <tr><td>
-    ///    <table cellpadding="1" cellspacing="0" border="1" align="center">
-    ///      <tr><td>
-    ///      <table cellpadding="2" cellspacing="2" border="0" align="center">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            score(q,d) &nbsp; = &nbsp;
-    ///            <font color="#FF9933">coord-factor(q,d)</font> &middot; &nbsp;
-    ///            <font color="#CCCC00">query-boost(q)</font> &middot; &nbsp;
-    ///          </td>
-    ///          <td valign="middle" align="center">
-    ///            <table>
-    ///               <tr><td align="center" style="text-align: center"><small><font color="#993399">V(q)&nbsp;&middot;&nbsp;V(d)</font></small></td></tr>
-    ///               <tr><td align="center" style="text-align: center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
-    ///               <tr><td align="center" style="text-align: center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr>
-    ///            </table>
-    ///          </td>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-len-norm(d)</font>
-    ///            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-boost(d)</font>
-    ///          </td>
-    ///        </tr>
-    ///      </table>
-    ///      </td></tr>
-    ///    </table>
-    ///    </td></tr>
-    ///    <tr><td>
-    ///    <center><font size=-1><u>Lucene Conceptual Scoring Formula</u></font></center>
-    ///    </td></tr>
-    ///  </table>
-    ///  <br>&nbsp;<br>
-    ///
-    /// <p>The conceptual formula is a simplification in the sense that (1) terms and documents
+    /// 
+    /// <para/>
+    /// <list type="table">
+    ///     <item>
+    ///         <term>
+    ///             <list type="table">
+    ///                 <item>
+    ///                     <term>
+    ///                         score(q,d) &#160; = &#160;
+    ///                         <font color="#FF9933">coord-factor(q,d)</font> &#183; &#160;
+    ///                         <font color="#CCCC00">query-boost(q)</font> &#183; &#160;
+    ///                     </term>
+    ///                     <term>
+    ///                         <list type="table">
+    ///                             <item><term><small><font color="#993399">V(q)&#160;&#183;&#160;V(d)</font></small></term></item>
+    ///                             <item><term>&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;</term></item>
+    ///                             <item><term><small><font color="#FF33CC">|V(q)|</font></small></term></item>
+    ///                         </list>
+    ///                     </term>
+    ///                     <term>
+    ///                         &#160; &#183; &#160; <font color="#3399FF">doc-len-norm(d)</font>
+    ///                         &#160; &#183; &#160; <font color="#3399FF">doc-boost(d)</font>
+    ///                     </term>
+    ///                 </item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term>Lucene Conceptual Scoring Formula</term>
+    ///     </item>
+    /// </list>
+    /// <para/>
+    ///
+    ///
+    /// <para/>The conceptual formula is a simplification in the sense that (1) terms and documents
     /// are fielded and (2) boosts are usually per query term rather than per query.
     ///
-    /// <p>We now describe how Lucene implements this conceptual scoring formula, and
+    /// <para/>We now describe how Lucene implements this conceptual scoring formula, and
     /// derive from it <i>Lucene's Practical Scoring Function</i>.
     ///
-    /// <p>For efficient score computation some scoring components
+    /// <para/>For efficient score computation some scoring components
     /// are computed and aggregated in advance:
     ///
-    /// <ul>
-    ///  <li><i>Query-boost</i> for the query (actually for each query term)
+    /// <list type="bullet">
+    ///  <item><description><i>Query-boost</i> for the query (actually for each query term)
     ///  is known when search starts.
-    ///  </li>
+    ///  </description></item>
     ///
-    ///  <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts,
+    ///  <item><description>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts,
     ///  as it is independent of the document being scored.
     ///  From search optimization perspective, it is a valid question
     ///  why bother to normalize the query at all, because all
@@ -215,8 +209,8 @@ namespace Lucene.Net.Search.Similarities
     ///  and hence documents ranks (their order by score) will not
     ///  be affected by this normalization.
     ///  There are two good reasons to keep this normalization:
-    ///  <ul>
-    ///   <li>Recall that
+    ///  <list type="bullet">
+    ///   <item><description>Recall that
     ///   <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
     ///   Cosine Similarity</a> can be used find how similar
     ///   two documents are. One can use Lucene for e.g.
@@ -229,70 +223,66 @@ namespace Lucene.Net.Search.Similarities
     ///   There are other applications that may require this.
     ///   And this is exactly what normalizing the query vector <i>V(q)</i>
     ///   provides: comparability (to a certain extent) of two or more queries.
-    ///   </li>
+    ///   </description></item>
     ///
-    ///   <li>Applying query normalization on the scores helps to keep the
+    ///   <item><description>Applying query normalization on the scores helps to keep the
     ///   scores around the unit vector, hence preventing loss of score data
     ///   because of floating point precision limitations.
-    ///   </li>
-    ///  </ul>
-    ///  </li>
+    ///   </description></item>
+    ///  </list>
+    ///  </description></item>
     ///
-    ///  <li>Document length norm <i>doc-len-norm(d)</i> and document
+    ///  <item><description>Document length norm <i>doc-len-norm(d)</i> and document
     ///  boost <i>doc-boost(d)</i> are known at indexing time.
     ///  They are computed in advance and their multiplication
     ///  is saved as a single value in the index: <i>norm(d)</i>.
     ///  (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i>
     ///  where <i>field(t)</i> is the field associated with term <i>t</i>.)
-    ///  </li>
-    /// </ul>
+    ///  </description></item>
+    /// </list>
     ///
-    /// <p><i>Lucene's Practical Scoring Function</i> is derived from the above.
+    /// <para/><i>Lucene's Practical Scoring Function</i> is derived from the above.
     /// The color codes demonstrate how it relates
     /// to those of the <i>conceptual</i> formula:
     ///
-    /// <P>
-    /// <table cellpadding="2" cellspacing="2" border="0" align="center" style="width:auto">
-    ///  <tr><td>
-    ///  <table cellpadding="" cellspacing="2" border="2" align="center">
-    ///  <tr><td>
-    ///   <table cellpadding="2" cellspacing="2" border="0" align="center">
-    ///   <tr>
-    ///     <td valign="middle" align="right" rowspan="1">
-    ///       score(q,d) &nbsp; = &nbsp;
-    ///       <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> &nbsp;&middot;&nbsp;
-    ///       <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> &nbsp;&middot;&nbsp;
-    ///     </td>
-    ///     <td valign="bottom" align="center" rowspan="1" style="text-align: center">
-    ///       <big><big><big>&sum;</big></big></big>
-    ///     </td>
-    ///     <td valign="middle" align="right" rowspan="1">
-    ///       <big><big>(</big></big>
-    ///       <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> &nbsp;&middot;&nbsp;
-    ///       <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> &nbsp;&middot;&nbsp;
-    ///       <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A>&nbsp;&middot;&nbsp;
-    ///       <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A>
-    ///       <big><big>)</big></big>
-    ///     </td>
-    ///   </tr>
-    ///   <tr valigh="top">
-    ///    <td></td>
-    ///    <td align="center" style="text-align: center"><small>t in q</small></td>
-    ///    <td></td>
-    ///   </tr>
-    ///   </table>
-    ///  </td></tr>
-    ///  </table>
-    /// </td></tr>
-    /// <tr><td>
-    ///  <center><font size=-1><u>Lucene Practical Scoring Function</u></font></center>
-    /// </td></tr>
-    /// </table>
-    ///
-    /// <p> where
-    /// <ol>
-    ///    <li>
-    ///      <A NAME="formula_tf"></A>
+    /// <para/>
+    /// <list type="table">
+    ///     <item>
+    ///         <term>
+    ///             <list type="table">
+    ///                 <item>
+    ///                     <term>
+    ///                         score(q,d) &#160; = &#160;
+    ///                         <a href="#formula_coord"><font color="#FF9933">coord(q,d)</font></a> &#160; &#183; &#160;
+    ///                         <a href="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></a> &#160; &#183; &#160;
+    ///                     </term>
+    ///                     <term><big><big><big>&#8721;</big></big></big></term>
+    ///                     <term>
+    ///                         <big><big>(</big></big>
+    ///                         <a href="#formula_tf"><font color="#993399">tf(t in d)</font></a> &#160; &#183; &#160;
+    ///                         <a href="#formula_idf"><font color="#993399">idf(t)</font></a><sup>2</sup> &#160; &#183; &#160;
+    ///                         <a href="#formula_termBoost"><font color="#CCCC00">t.Boost</font></a> &#160; &#183; &#160;
+    ///                         <a href="#formula_norm"><font color="#3399FF">norm(t,d)</font></a>
+    ///                         <big><big>)</big></big>
+    ///                     </term>
+    ///                 </item>
+    ///                 <item>
+    ///                     <term></term>
+    ///                     <term><small>t in q</small></term>
+    ///                     <term></term>
+    ///                 </item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term>Lucene Practical Scoring Function</term>
+    ///     </item>
+    /// </list>
+    ///
+    /// <para/> where
+    /// <list type="number">
+    ///    <item><description>
+    ///      <a name="formula_tf"></a>
     ///      <b><i>tf(t in d)</i></b>
     ///      correlates to the term's <i>frequency</i>,
     ///      defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>.
@@ -302,71 +292,67 @@ namespace Lucene.Net.Search.Similarities
     ///      two term-queries with that same term and hence the computation would still be correct (although
     ///      not very efficient).
     ///      The default computation for <i>tf(t in d)</i> in
-    ///      <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#tf(float) DefaultSimilarity"/> is:
-    ///
-    ///      <br>&nbsp;<br>
-    ///      <table cellpadding="2" cellspacing="2" border="0" align="center" style="width:auto">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#tf(float) tf(t in d)"/> &nbsp; = &nbsp;
-    ///          </td>
-    ///          <td valign="top" align="center" rowspan="1">
-    ///               frequency<sup><big>&frac12;</big></sup>
-    ///          </td>
-    ///        </tr>
-    ///      </table>
-    ///      <br>&nbsp;<br>
-    ///    </li>
-    ///
-    ///    <li>
-    ///      <A NAME="formula_idf"></A>
+    ///      DefaultSimilarity (<see cref="Lucene.Net.Search.Similarities.DefaultSimilarity.Tf(float)"/>) is:
+    ///
+    ///         <para/>
+    ///         <list type="table">
+    ///             <item>
+    ///                 <term>
+    ///                     tf(t in d) &#160; = &#160;
+    ///                 </term>
+    ///                 <term>
+    ///                     frequency<sup><big>&#189;</big></sup>
+    ///                 </term>
+    ///             </item>
+    ///         </list>
+    ///         <para/>
+    ///         
+    ///    </description></item>
+    ///
+    ///    <item><description>
+    ///      <a name="formula_idf"></a>
     ///      <b><i>idf(t)</i></b> stands for Inverse Document Frequency. this value
-    ///      correlates to the inverse of <i>docFreq</i>
+    ///      correlates to the inverse of <i>DocFreq</i>
     ///      (the number of documents in which the term <i>t</i> appears).
     ///      this means rarer terms give higher contribution to the total score.
     ///      <i>idf(t)</i> appears for <i>t</i> in both the query and the document,
     ///      hence it is squared in the equation.
     ///      The default computation for <i>idf(t)</i> in
-    ///      <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#idf(long, long) DefaultSimilarity"/> is:
-    ///
-    ///      <br>&nbsp;<br>
-    ///      <table cellpadding="2" cellspacing="2" border="0" align="center" style="width:auto">
-    ///        <tr>
-    ///          <td valign="middle" align="right">
-    ///            <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#idf(long, long) idf(t)"/>&nbsp; = &nbsp;
-    ///          </td>
-    ///          <td valign="middle" align="center">
-    ///            1 + log <big>(</big>
-    ///          </td>
-    ///          <td valign="middle" align="center">
-    ///            <table>
-    ///               <tr><td align="center" style="text-align: center"><small>numDocs</small></td></tr>
-    ///               <tr><td align="center" style="text-align: center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
-    ///               <tr><td align="center" style="text-align: center"><small>docFreq+1</small></td></tr>
-    ///            </table>
-    ///          </td>
-    ///          <td valign="middle" align="center">
-    ///            <big>)</big>
-    ///          </td>
-    ///        </tr>
-    ///      </table>
-    ///      <br>&nbsp;<br>
-    ///    </li>
-    ///
-    ///    <li>
-    ///      <A NAME="formula_coord"></A>
+    ///      DefaultSimilarity (<see cref="Lucene.Net.Search.Similarities.DefaultSimilarity.Idf(long, long)"/>) is:
+    ///
+    ///         <para/>
+    ///         <list type="table">
+    ///             <item>
+    ///                 <term>idf(t) &#160; = &#160;</term>
+    ///                 <term>1 + log <big>(</big></term>
+    ///                 <term>
+    ///                     <list type="table">
+    ///                         <item><term><small>NumDocs</small></term></item>
+    ///                         <item><term>&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;</term></item>
+    ///                         <item><term><small>DocFreq+1</small></term></item>
+    ///                     </list>
+    ///                 </term>
+    ///                 <term><big>)</big></term>
+    ///             </item>
+    ///         </list>
+    ///         <para/>
+    /// 
+    ///    </description></item>
+    ///
+    ///    <item><description>
+    ///      <a name="formula_coord"></a>
     ///      <b><i>coord(q,d)</i></b>
     ///      is a score factor based on how many of the query terms are found in the specified document.
     ///      Typically, a document that contains more of the query's terms will receive a higher score
     ///      than another document with fewer query terms.
     ///      this is a search time factor computed in
-    ///      <seealso cref="#coord(int, int) coord(q,d)"/>
+    ///      coord(q,d) (<see cref="Coord(int, int)"/>)
     ///      by the Similarity in effect at search time.
-    ///      <br>&nbsp;<br>
-    ///    </li>
+    ///      <para/>
+    ///    </description></item>
     ///
-    ///    <li><b>
-    ///      <A NAME="formula_queryNorm"></A>
+    ///    <item><description><b>
+    ///      <a name="formula_queryNorm"></a>
     ///      <i>queryNorm(q)</i>
     ///      </b>
     ///      is a normalizing factor used to make scores between queries comparable.
@@ -375,128 +361,122 @@ namespace Lucene.Net.Search.Similarities
     ///      this is a search time factor computed by the Similarity in effect at search time.
     ///
     ///      The default computation in
-    ///      <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#queryNorm(float) DefaultSimilarity"/>
+    ///      DefaultSimilarity (<see cref="Lucene.Net.Search.Similarities.DefaultSimilarity.QueryNorm(float)"/>)
     ///      produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>:
-    ///      <br>&nbsp;<br>
-    ///      <table cellpadding="1" cellspacing="0" border="0" align="center" style="width:auto">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            queryNorm(q)  &nbsp; = &nbsp;
-    ///            <seealso cref="Lucene.Net.Search.Similarities.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)"/>
-    ///            &nbsp; = &nbsp;
-    ///          </td>
-    ///          <td valign="middle" align="center" rowspan="1">
-    ///            <table>
-    ///               <tr><td align="center" style="text-align: center"><big>1</big></td></tr>
-    ///               <tr><td align="center" style="text-align: center"><big>
-    ///                  &ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;
-    ///               </big></td></tr>
-    ///               <tr><td align="center" style="text-align: center">sumOfSquaredWeights<sup><big>&frac12;</big></sup></td></tr>
-    ///            </table>
-    ///          </td>
-    ///        </tr>
-    ///      </table>
-    ///      <br>&nbsp;<br>
+    ///      
+    ///      <para/>
+    ///      <list type="table">
+    ///         <item>
+    ///             <term>
+    ///                 queryNorm(q)  &#160; = &#160;
+    ///                 queryNorm(sumOfSquaredWeights)
+    ///                 &#160; = &#160;
+    ///             </term>
+    ///             <term>
+    ///                 <list type="table">
+    ///                     <item><term><big>1</big></term></item>
+    ///                     <item><term><big>&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;</big></term></item>
+    ///                     <item><term>sumOfSquaredWeights<sup><big>&#189;</big></sup></term></item>
+    ///                 </list>
+    ///             </term>
+    ///         </item>
+    ///      </list>
+    ///      <para/>
     ///
     ///      The sum of squared weights (of the query terms) is
-    ///      computed by the query <seealso cref="Lucene.Net.Search.Weight"/> object.
-    ///      For example, a <seealso cref="Lucene.Net.Search.BooleanQuery"/>
+    ///      computed by the query <see cref="Lucene.Net.Search.Weight"/> object.
+    ///      For example, a <see cref="Lucene.Net.Search.BooleanQuery"/>
     ///      computes this value as:
-    ///
-    ///      <br>&nbsp;<br>
-    ///      <table cellpadding="1" cellspacing="0" border="0" align="center" style="width:auto">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            <seealso cref="Lucene.Net.Search.Weight#getValueForNormalization() sumOfSquaredWeights"/> &nbsp; = &nbsp;
-    ///            <seealso cref="Lucene.Net.Search.Query#getBoost() q.getBoost()"/> <sup><big>2</big></sup>
-    ///            &nbsp;&middot;&nbsp;
-    ///          </td>
-    ///          <td valign="bottom" align="center" rowspan="1" style="text-align: center">
-    ///            <big><big><big>&sum;</big></big></big>
-    ///          </td>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            <big><big>(</big></big>
-    ///            <A HREF="#formula_idf">idf(t)</A> &nbsp;&middot;&nbsp;
-    ///            <A HREF="#formula_termBoost">t.getBoost()</A>
-    ///            <big><big>) <sup>2</sup> </big></big>
-    ///          </td>
-    ///        </tr>
-    ///        <tr valigh="top">
-    ///          <td></td>
-    ///          <td align="center" style="text-align: center"><small>t in q</small></td>
-    ///          <td></td>
-    ///        </tr>
-    ///      </table>
-    ///      <br>&nbsp;<br>
-    ///
-    ///    </li>
-    ///
-    ///    <li>
-    ///      <A NAME="formula_termBoost"></A>
-    ///      <b><i>t.getBoost()</i></b>
+    ///      
+    ///      <para/>
+    ///      <list type="table">
+    ///         <item>
+    ///             <term>
+    ///                 sumOfSquaredWeights &#160; = &#160;
+    ///                 q.Boost <sup><big>2</big></sup>
+    ///                 &#160;&#183;&#160;
+    ///             </term> 
+    ///             <term><big><big><big>&#8721;</big></big></big></term>
+    ///             <term>
+    ///                 <big><big>(</big></big>
+    ///                 <a href="#formula_idf">idf(t)</a> &#160;&#183;&#160;
+    ///                 <a href="#formula_termBoost">t.Boost</a>
+    ///                 <big><big>) <sup>2</sup> </big></big>
+    ///             </term>
+    ///         </item>
+    ///         <item>
+    ///             <term></term>
+    ///             <term><small>t in q</small></term>
+    ///             <term></term>
+    ///         </item>
+    ///      </list>
+    ///      where sumOfSquaredWeights is <see cref="Weight.GetValueForNormalization()"/> and
+    ///      q.Boost is <see cref="Query.Boost"/>
+    ///      <para/>
+    ///    </description></item>
+    ///
+    ///    <item><description>
+    ///      <a name="formula_termBoost"></a>
+    ///      <b><i>t.Boost</i></b>
     ///      is a search time boost of term <i>t</i> in the query <i>q</i> as
     ///      specified in the query text
-    ///      (see <A HREF="{@docRoot}/../queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Boosting_a_Term">query syntax</A>),
+    ///      (see <a href="{@docRoot}/../queryparser/org/apache/lucene/queryparser/classic/package-summary.html#Boosting_a_Term">query syntax</a>),
     ///      or as set by application calls to
-    ///      <seealso cref="Lucene.Net.Search.Query#setBoost(float) setBoost()"/>.
+    ///      <see cref="Lucene.Net.Search.Query.Boost"/>.
     ///      Notice that there is really no direct API for accessing a boost of one term in a multi term query,
     ///      but rather multi terms are represented in a query as multi
-    ///      <seealso cref="Lucene.Net.Search.TermQuery TermQuery"/> objects,
+    ///      <see cref="Lucene.Net.Search.TermQuery"/> objects,
     ///      and so the boost of a term in the query is accessible by calling the sub-query
-    ///      <seealso cref="Lucene.Net.Search.Query#getBoost() getBoost()"/>.
-    ///      <br>&nbsp;<br>
-    ///    </li>
+    ///      <see cref="Lucene.Net.Search.Query.Boost"/>.
+    ///      <para/>
+    ///    </description></item>
     ///
-    ///    <li>
-    ///      <A NAME="formula_norm"></A>
+    ///    <item><description>
+    ///      <a name="formula_norm"></a>
     ///      <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors:
     ///
-    ///      <ul>
-    ///        <li><b>Field boost</b> - set by calling
-    ///        <seealso cref="Field#setBoost(float) field.setBoost()"/>
+    ///      <list type="bullet">
+    ///        <item><description><b>Field boost</b> - set
+    ///        <see cref="Documents.Field.Boost"/>
     ///        before adding the field to a document.
-    ///        </li>
-    ///        <li><b>lengthNorm</b> - computed
+    ///        </description></item>
+    ///        <item><description><b>lengthNorm</b> - computed
     ///        when the document is added to the index in accordance with the number of tokens
     ///        of this field in the document, so that shorter fields contribute more to the score.
-    ///        LengthNorm is computed by the Similarity class in effect at indexing.
-    ///        </li>
-    ///      </ul>
-    ///      The <seealso cref="#computeNorm"/> method is responsible for
-    ///      combining all of these factors into a single float.
+    ///        LengthNorm is computed by the <see cref="Similarity"/> class in effect at indexing.
+    ///        </description></item>
+    ///      </list>
+    ///      The <see cref="ComputeNorm(FieldInvertState)"/> method is responsible for
+    ///      combining all of these factors into a single <see cref="float"/>.
     ///
-    ///      <p>
+    ///      <para/>
     ///      When a document is added to the index, all the above factors are multiplied.
     ///      If the document has multiple fields with the same name, all their boosts are multiplied together:
-    ///
-    ///      <br>&nbsp;<br>
-    ///      <table cellpadding="1" cellspacing="0" border="0" align="center" style="width:auto">
-    ///        <tr>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            norm(t,d) &nbsp; = &nbsp;
-    ///            lengthNorm
-    ///            &nbsp;&middot;&nbsp;
-    ///          </td>
-    ///          <td valign="bottom" align="center" rowspan="1" style="text-align: center">
-    ///            <big><big><big>&prod;</big></big></big>
-    ///          </td>
-    ///          <td valign="middle" align="right" rowspan="1">
-    ///            <seealso cref="Lucene.Net.Index.IIndexableField#boost() f.boost"/>()
-    ///          </td>
-    ///        </tr>
-    ///        <tr valigh="top">
-    ///          <td></td>
-    ///          <td align="center" style="text-align: center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td>
-    ///          <td></td>
-    ///        </tr>
-    ///      </table>
+    ///      
+    ///      <para/>
+    ///      <list type="table">
+    ///         <item>
+    ///             <term>
+    ///                 norm(t,d) &#160; = &#160;
+    ///                 lengthNorm
+    ///                 &#160;&#183;&#160;
+    ///             </term>
+    ///             <term><big><big><big>&#8719;</big></big></big></term>
+    ///             <term><see cref="Index.IIndexableField.Boost"/></term>
+    ///         </item>
+    ///         <item>
+    ///             <term></term>
+    ///             <term><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></term>
+    ///             <term></term>
+    ///         </item>
+    ///      </list>
     ///      Note that search time is too late to modify this <i>norm</i> part of scoring,
-    ///      e.g. by using a different <seealso cref="Similarity"/> for search.
-    ///    </li>
-    /// </ol>
+    ///      e.g. by using a different <see cref="Similarity"/> for search.
+    ///    </description></item>
+    /// </list>
     /// </summary>
-    /// <seealso cref= Lucene.Net.Index.IndexWriterConfig#setSimilarity(Similarity) </seealso>
-    /// <seealso cref= IndexSearcher#setSimilarity(Similarity) </seealso>
+    /// <seealso cref="Lucene.Net.Index.IndexWriterConfig.Similarity"/>
+    /// <seealso cref="IndexSearcher.Similarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -514,14 +494,14 @@ namespace Lucene.Net.Search.Similarities
         /// Computes a score factor based on the fraction of all query terms that a
         /// document contains.  this value is multiplied into scores.
         ///
-        /// <p>The presence of a large portion of the query terms indicates a better
+        /// <para/>The presence of a large portion of the query terms indicates a better
         /// match with the query, so implementations of this method usually return
         /// larger values when the ratio between these parameters is large and smaller
         /// values when the ratio between them is small.
         /// </summary>
-        /// <param name="overlap"> the number of query terms matched in the document </param>
-        /// <param name="maxOverlap"> the total number of terms in the query </param>
-        /// <returns> a score factor based on term overlap with the query </returns>
+        /// <param name="overlap"> The number of query terms matched in the document </param>
+        /// <param name="maxOverlap"> The total number of terms in the query </param>
+        /// <returns> A score factor based on term overlap with the query </returns>
         public override abstract float Coord(int overlap, int maxOverlap);
 
         /// <summary>
@@ -531,49 +511,49 @@ namespace Lucene.Net.Search.Similarities
         /// computed as 1/sqrt(sumOfSquaredWeights), other implementations might
         /// completely ignore sumOfSquaredWeights (ie return 1).
         ///
-        /// <p>this does not affect ranking, but the default implementation does make scores
+        /// <para/>This does not affect ranking, but the default implementation does make scores
         /// from different queries more comparable than they would be by eliminating the
-        /// magnitude of the Query vector as a factor in the score.
+        /// magnitude of the <see cref="Query"/> vector as a factor in the score.
         /// </summary>
-        /// <param name="sumOfSquaredWeights"> the sum of the squares of query term weights </param>
-        /// <returns> a normalization factor for query weights </returns>
+        /// <param name="sumOfSquaredWeights"> The sum of the squares of query term weights </param>
+        /// <returns> A normalization factor for query weights </returns>
         public override abstract float QueryNorm(float sumOfSquaredWeights);
 
         /// <summary>
         /// Computes a score factor based on a term or phrase's frequency in a
-        /// document.  this value is multiplied by the <seealso cref="#idf(long, long)"/>
+        /// document.  This value is multiplied by the <see cref="Idf(long, long)"/>
         /// factor for each term in the query and these products are then summed to
         /// form the initial score for a document.
         ///
-        /// <p>Terms and phrases repeated in a document indicate the topic of the
+        /// <para/>Terms and phrases repeated in a document indicate the topic of the
         /// document, so implementations of this method usually return larger values
-        /// when <code>freq</code> is large, and smaller values when <code>freq</code>
+        /// when <paramref name="freq"/> is large, and smaller values when <paramref name="freq"/>
         /// is small.
         /// </summary>
-        /// <param name="freq"> the frequency of a term within a document </param>
-        /// <returns> a score factor based on a term's within-document frequency </returns>
+        /// <param name="freq"> The frequency of a term within a document </param>
+        /// <returns> A score factor based on a term's within-document frequency </returns>
         public abstract float Tf(float freq);
 
         /// <summary>
         /// Computes a score factor for a simple term and returns an explanation
         /// for that score factor.
         ///
-        /// <p>
+        /// <para/>
         /// The default implementation uses:
         ///
-        /// <pre class="prettyprint">
-        /// idf(docFreq, searcher.maxDoc());
-        /// </pre>
+        /// <code>
+        /// Idf(docFreq, searcher.MaxDoc);
+        /// </code>
         ///
-        /// Note that <seealso cref="CollectionStatistics#maxDoc()"/> is used instead of
-        /// <seealso cref="Lucene.Net.Index.IndexReader#numDocs() IndexReader#numDocs()"/> because also
-        /// <seealso cref="TermStatistics#docFreq()"/> is used, and when the latter
-        /// is inaccurate, so is <seealso cref="CollectionStatistics#maxDoc()"/>, and in the same direction.
-        /// In addition, <seealso cref="CollectionStatistics#maxDoc()"/> is more efficient to compute
+        /// Note that <see cref="CollectionStatistics.MaxDoc"/> is used instead of
+        /// <see cref="Lucene.Net.Index.IndexReader.NumDocs"/> because also
+        /// <see cref="TermStatistics.DocFreq"/> is used, and when the latter
+        /// is inaccurate, so is <see cref="CollectionStatistics.MaxDoc"/>, and in the same direction.
+        /// In addition, <see cref="CollectionStatistics.MaxDoc"/> is more efficient to compute
         /// </summary>
-        /// <param name="collectionStats"> collection-level statistics </param>
-        /// <param name="termStats"> term-level statistics for the term </param>
-        /// <returns> an Explain object that includes both an idf score factor
+        /// <param name="collectionStats"> Collection-level statistics </param>
+        /// <param name="termStats"> Term-level statistics for the term </param>
+        /// <returns> An Explain object that includes both an idf score factor
         ///           and an explanation for the term. </returns>
         public virtual Explanation IdfExplain(CollectionStatistics collectionStats, TermStatistics termStats)
         {
@@ -586,13 +566,13 @@ namespace Lucene.Net.Search.Similarities
         /// <summary>
         /// Computes a score factor for a phrase.
         ///
-        /// <p>
+        /// <para/>
         /// The default implementation sums the idf factor for
         /// each term in the phrase.
         /// </summary>
-        /// <param name="collectionStats"> collection-level statistics </param>
-        /// <param name="termStats"> term-level statistics for the terms in the phrase </param>
-        /// <returns> an Explain object that includes both an idf
+        /// <param name="collectionStats"> Collection-level statistics </param>
+        /// <param name="termStats"> Term-level statistics for the terms in the phrase </param>
+        /// <returns> An Explain object that includes both an idf
         ///         score factor for the phrase and an explanation
         ///         for each term. </returns>
         public virtual Explanation IdfExplain(CollectionStatistics collectionStats, TermStatistics[] termStats)
@@ -614,27 +594,27 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Computes a score factor based on a term's document frequency (the number
-        /// of documents which contain the term).  this value is multiplied by the
-        /// <seealso cref="#tf(float)"/> factor for each term in the query and these products are
+        /// of documents which contain the term).  This value is multiplied by the
+        /// <see cref="Tf(float)"/> factor for each term in the query and these products are
         /// then summed to form the initial score for a document.
         ///
-        /// <p>Terms that occur in fewer documents are better indicators of topic, so
+        /// <para/>Terms that occur in fewer documents are better indicators of topic, so
         /// implementations of this method usually return larger values for rare terms,
         /// and smaller values for common terms.
         /// </summary>
-        /// <param name="docFreq"> the number of documents which contain the term </param>
-        /// <param name="numDocs"> the total number of documents in the collection </param>
-        /// <returns> a score factor based on the term's document frequency </returns>
+        /// <param name="docFreq"> The number of documents which contain the term </param>
+        /// <param name="numDocs"> The total number of documents in the collection </param>
+        /// <returns> A score factor based on the term's document frequency </returns>
         public abstract float Idf(long docFreq, long numDocs);
 
         /// <summary>
         /// Compute an index-time normalization value for this field instance.
-        /// <p>
-        /// this value will be stored in a single byte lossy representation by
-        /// <seealso cref="#encodeNormValue(float)"/>.
+        /// <para/>
+        /// This value will be stored in a single byte lossy representation by
+        /// <see cref="EncodeNormValue(float)"/>.
         /// </summary>
-        /// <param name="state"> statistics of the current field (such as length, boost, etc) </param>
-        /// <returns> an index-time normalization value </returns>
+        /// <param name="state"> Statistics of the current field (such as length, boost, etc) </param>
+        /// <returns> An index-time normalization value </returns>
         public abstract float LengthNorm(FieldInvertState state);
 
         public override sealed long ComputeNorm(FieldInvertState state)
@@ -646,7 +626,7 @@ namespace Lucene.Net.Search.Similarities
         /// <summary>
         /// Decodes a normalization factor stored in an index.
         /// </summary>
-        /// <seealso cref= #encodeNormValue(float) </seealso>
+        /// <see cref="EncodeNormValue(float)"/>
         public abstract float DecodeNormValue(long norm);
 
         /// <summary>
@@ -658,14 +638,14 @@ namespace Lucene.Net.Search.Similarities
         /// this value is summed for each sloppy phrase match in a document to form
         /// the frequency to be used in scoring instead of the exact term count.
         ///
-        /// <p>A phrase match with a small edit distance to a document passage more
+        /// <para/>A phrase match with a small edit distance to a document passage more
         /// closely matches the document, so implementations of this method usually
         /// return larger values when the edit distance is small and smaller values
         /// when it is large.
         /// </summary>
-        /// <seealso cref= PhraseQuery#setSlop(int) </seealso>
-        /// <param name="distance"> the edit distance of this sloppy phrase match </param>
-        /// <returns> the frequency increment for this match </returns>
+        /// <seealso cref="PhraseQuery.Slop"/>
+        /// <param name="distance"> The edit distance of this sloppy phrase match </param>
+        /// <returns> The frequency increment for this match </returns>
         public abstract float SloppyFreq(int distance);
 
         /// <summary>


[03/48] lucenenet git commit: Lucene.Net.Search.Spans: Fixed up documentation comments

Posted by ni...@apache.org.
Lucene.Net.Search.Spans: Fixed up documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/396db51b
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/396db51b
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/396db51b

Branch: refs/heads/master
Commit: 396db51b964319e226feb5f519f039807f246c05
Parents: 2a1541c
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sat Jun 3 03:28:13 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sat Jun 3 03:28:13 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  2 +-
 .../Search/Spans/FieldMaskingSpanQuery.cs       | 48 +++++------
 src/Lucene.Net/Search/Spans/NearSpansOrdered.cs | 87 +++++++++++++-------
 .../Search/Spans/NearSpansUnordered.cs          | 18 ++--
 src/Lucene.Net/Search/Spans/SpanFirstQuery.cs   | 11 +--
 .../Search/Spans/SpanMultiTermQueryWrapper.cs   | 61 ++++++++------
 .../Search/Spans/SpanNearPayloadCheckQuery.cs   |  2 -
 src/Lucene.Net/Search/Spans/SpanNearQuery.cs    | 17 ++--
 src/Lucene.Net/Search/Spans/SpanNotQuery.cs     | 26 +++---
 src/Lucene.Net/Search/Spans/SpanOrQuery.cs      |  4 +-
 .../Search/Spans/SpanPayloadCheckQuery.cs       | 17 ++--
 .../Search/Spans/SpanPositionCheckQuery.cs      | 28 +++----
 .../Search/Spans/SpanPositionRangeQuery.cs      |  6 +-
 src/Lucene.Net/Search/Spans/SpanQuery.cs        |  4 +-
 src/Lucene.Net/Search/Spans/SpanScorer.cs       |  3 +-
 src/Lucene.Net/Search/Spans/SpanTermQuery.cs    |  2 +-
 src/Lucene.Net/Search/Spans/Spans.cs            | 57 +++++++------
 17 files changed, 210 insertions(+), 183 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 68b0f1c..45f77ee 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,7 +52,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Search (namespace) (Except for Search.Payloads and Search.Similarities)
+   2. Search (namespace) (Except for Search.Payloads, Search.Similarities, and Search.Spans)
    3. Support (namespace)
    4. Util (namespace) (Except for Util.Fst)
 2. Lucene.Net.Codecs (project)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs b/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
index 0b73439..feb70ae 100644
--- a/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
+++ b/src/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
@@ -30,20 +30,20 @@ namespace Lucene.Net.Search.Spans
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// <p>Wrapper to allow <seealso cref="SpanQuery"/> objects participate in composite
+    /// <para>Wrapper to allow <see cref="SpanQuery"/> objects participate in composite
     /// single-field SpanQueries by 'lying' about their search field. That is,
-    /// the masked SpanQuery will function as normal,
-    /// but <seealso cref="SpanQuery#getField()"/> simply hands back the value supplied
-    /// in this class's constructor.</p>
+    /// the masked <see cref="SpanQuery"/> will function as normal,
+    /// but <see cref="SpanQuery.Field"/> simply hands back the value supplied
+    /// in this class's constructor.</para>
     ///
-    /// <p>this can be used to support Queries like <seealso cref="SpanNearQuery"/> or
-    /// <seealso cref="SpanOrQuery"/> across different fields, which is not ordinarily
-    /// permitted.</p>
+    /// <para>This can be used to support Queries like <see cref="SpanNearQuery"/> or
+    /// <see cref="SpanOrQuery"/> across different fields, which is not ordinarily
+    /// permitted.</para>
     ///
-    /// <p>this can be useful for denormalized relational data: for example, when
-    /// indexing a document with conceptually many 'children': </p>
+    /// <para>This can be useful for denormalized relational data: for example, when
+    /// indexing a document with conceptually many 'children': </para>
     ///
-    /// <pre>
+    /// <code>
     ///  teacherid: 1
     ///  studentfirstname: james
     ///  studentsurname: jones
@@ -53,24 +53,24 @@ namespace Lucene.Net.Search.Spans
     ///  studentsurname: smith
     ///  studentfirstname: sally
     ///  studentsurname: jones
-    /// </pre>
+    /// </code>
     ///
-    /// <p>a SpanNearQuery with a slop of 0 can be applied across two
-    /// <seealso cref="SpanTermQuery"/> objects as follows:
-    /// <pre class="prettyprint">
-    ///    SpanQuery q1  = new SpanTermQuery(new Term("studentfirstname", "james"));
-    ///    SpanQuery q2  = new SpanTermQuery(new Term("studentsurname", "jones"));
-    ///    SpanQuery q2m = new FieldMaskingSpanQuery(q2, "studentfirstname");
-    ///    Query q = new SpanNearQuery(new SpanQuery[]{q1, q2m}, -1, false);
-    /// </pre>
+    /// <para>A <see cref="SpanNearQuery"/> with a slop of 0 can be applied across two
+    /// <see cref="SpanTermQuery"/> objects as follows:
+    /// <code>
+    ///     SpanQuery q1  = new SpanTermQuery(new Term("studentfirstname", "james"));
+    ///     SpanQuery q2  = new SpanTermQuery(new Term("studentsurname", "jones"));
+    ///     SpanQuery q2m = new FieldMaskingSpanQuery(q2, "studentfirstname");
+    ///     Query q = new SpanNearQuery(new SpanQuery[] { q1, q2m }, -1, false);
+    /// </code>
     /// to search for 'studentfirstname:james studentsurname:jones' and find
     /// teacherid 1 without matching teacherid 2 (which has a 'james' in position 0
-    /// and 'jones' in position 1). </p>
+    /// and 'jones' in position 1). </para>
     ///
-    /// <p>Note: as <seealso cref="#getField()"/> returns the masked field, scoring will be
-    /// done using the Similarity and collection statistics of the field name supplied,
-    /// but with the term statistics of the real field. this may lead to exceptions,
-    /// poor performance, and unexpected scoring behaviour.</p>
+    /// <para>Note: as <see cref="Field"/> returns the masked field, scoring will be
+    /// done using the <see cref="Similarities.Similarity"/> and collection statistics of the field name supplied,
+    /// but with the term statistics of the real field. This may lead to exceptions,
+    /// poor performance, and unexpected scoring behavior.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs b/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
index 8af1196..9c606cb 100644
--- a/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
+++ b/src/Lucene.Net/Search/Spans/NearSpansOrdered.cs
@@ -31,25 +31,27 @@ namespace Lucene.Net.Search.Spans
     using TermContext = Lucene.Net.Index.TermContext;
 
     /// <summary>
-    /// A Spans that is formed from the ordered subspans of a SpanNearQuery
+    /// A <see cref="Spans"/> that is formed from the ordered subspans of a <see cref="SpanNearQuery"/>
     /// where the subspans do not overlap and have a maximum slop between them.
-    /// <p>
-    /// The formed spans only contains minimum slop matches.<br>
+    /// <para/>
+    /// The formed spans only contains minimum slop matches.
+    /// <para/>
     /// The matching slop is computed from the distance(s) between
-    /// the non overlapping matching Spans.<br>
-    /// Successive matches are always formed from the successive Spans
-    /// of the SpanNearQuery.
-    /// <p>
+    /// the non overlapping matching <see cref="Spans"/>.
+    /// <para/>
+    /// Successive matches are always formed from the successive <see cref="Spans"/>
+    /// of the <see cref="SpanNearQuery"/>.
+    /// <para/>
     /// The formed spans may contain overlaps when the slop is at least 1.
     /// For example, when querying using
-    /// <pre>t1 t2 t3</pre>
+    /// <c>t1 t2 t3</c>
     /// with slop at least 1, the fragment:
-    /// <pre>t1 t2 t1 t3 t2 t3</pre>
+    /// <c>t1 t2 t1 t3 t2 t3</c>
     /// matches twice:
-    /// <pre>t1 t2 .. t3      </pre>
-    /// <pre>      t1 .. t2 t3</pre>
-    ///
+    /// <c>t1 t2 .. t3      </c>
+    /// <c>      t1 .. t2 t3</c>
     ///
+    /// <para/>
     /// Expert:
     /// Only public for subclassing.  Most implementations should not need this class
     /// </summary>
@@ -63,11 +65,11 @@ namespace Lucene.Net.Search.Spans
         private bool more = false;
 
         /// <summary>
-        /// The spans in the same order as the SpanNearQuery </summary>
+        /// The spans in the same order as the <see cref="SpanNearQuery"/> </summary>
         private readonly Spans[] subSpans;
 
         /// <summary>
-        /// Indicates that all subSpans have same doc() </summary>
+        /// Indicates that all subSpans have same <see cref="Doc"/> </summary>
         private bool inSameDoc = false;
 
         private int matchDoc = -1;
@@ -133,19 +135,22 @@ namespace Lucene.Net.Search.Spans
             query = spanNearQuery; // kept for toString() only.
         }
 
-        // inherit javadocs
+        /// <summary>
+        /// Returns the document number of the current match.  Initially invalid. </summary>
         public override int Doc
-        // inherit javadocs
         {
             get { return matchDoc; }
         }
 
+        /// <summary>
+        /// Returns the start position of the current match.  Initially invalid. </summary>
         public override int Start
-        // inherit javadocs
         {
             get { return matchStart; }
         }
 
+        /// <summary>
+        /// Returns the end position of the current match.  Initially invalid. </summary>
         public override int End
         {
             get { return matchEnd; }
@@ -184,7 +189,8 @@ namespace Lucene.Net.Search.Spans
             return minCost;
         }
 
-        // inherit javadocs
+        /// <summary>
+        /// Move to the next match, returning true iff any such exists. </summary>
         public override bool Next()
         {
             if (firstTime)
@@ -207,7 +213,28 @@ namespace Lucene.Net.Search.Spans
             return AdvanceAfterOrdered();
         }
 
-        // inherit javadocs
+        /// <summary>
+        /// Skips to the first match beyond the current, whose document number is
+        /// greater than or equal to <i>target</i>.
+        /// <para/>The behavior of this method is <b>undefined</b> when called with
+        /// <c> target &lt;= current</c>, or after the iterator has exhausted.
+        /// Both cases may result in unpredicted behavior.
+        /// <para/>Returns <c>true</c> if there is such
+        /// a match.  
+        /// <para/>Behaves as if written: 
+        /// <code>
+        ///     bool SkipTo(int target) 
+        ///     {
+        ///         do 
+        ///         {
+        ///             if (!Next())
+        ///                 return false;
+        ///         } while (target > Doc);
+        ///         return true;
+        ///     }
+        /// </code>
+        /// Most implementations are considerably more efficient than that.
+        /// </summary>
         public override bool SkipTo(int target)
         {
             if (firstTime)
@@ -243,9 +270,9 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Advances the subSpans to just after an ordered match with a minimum slop
-        /// that is smaller than the slop allowed by the SpanNearQuery. </summary>
-        /// <returns> true iff there is such a match. </returns>
+        /// Advances the <see cref="SubSpans"/> to just after an ordered match with a minimum slop
+        /// that is smaller than the slop allowed by the <see cref="SpanNearQuery"/>. </summary>
+        /// <returns> <c>true</c> if there is such a match. </returns>
         private bool AdvanceAfterOrdered()
         {
             while (more && (inSameDoc || ToSameDoc()))
@@ -259,7 +286,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Advance the subSpans to the same document </summary>
+        /// Advance the <see cref="SubSpans"/> to the same document </summary>
         private bool ToSameDoc()
         {
             sorter.Sort(0, subSpansByDoc.Length);
@@ -288,10 +315,10 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Check whether two Spans in the same document are ordered. </summary>
-        /// <returns> true iff spans1 starts before spans2
+        /// Check whether two <see cref="Spans"/> in the same document are ordered. </summary>
+        /// <returns> <c>true</c> if <paramref name="spans1"/> starts before <paramref name="spans2"/>
         ///              or the spans start at the same position,
-        ///              and spans1 ends before spans2. </returns>
+        ///              and <paramref name="spans1"/> ends before <paramref name="spans2"/>. </returns>
         internal static bool DocSpansOrdered(Spans spans1, Spans spans2)
         {
             Debug.Assert(spans1.Doc == spans2.Doc, "doc1 " + spans1.Doc + " != doc2 " + spans2.Doc);
@@ -302,7 +329,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Like <seealso cref="#docSpansOrdered(Spans,Spans)"/>, but use the spans
+        /// Like <see cref="DocSpansOrdered(Spans, Spans)"/>, but use the spans
         /// starts and ends as parameters.
         /// </summary>
         private static bool DocSpansOrdered(int start1, int end1, int start2, int end2)
@@ -311,7 +338,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Order the subSpans within the same document by advancing all later spans
+        /// Order the <see cref="SubSpans"/> within the same document by advancing all later spans
         /// after the previous one.
         /// </summary>
         private bool StretchToOrder()
@@ -338,9 +365,9 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// The subSpans are ordered in the same doc, so there is a possible match.
+        /// The <see cref="SubSpans"/> are ordered in the same doc, so there is a possible match.
         /// Compute the slop while making the match as short as possible by advancing
-        /// all subSpans except the last one in reverse order.
+        /// all <see cref="SubSpans"/> except the last one in reverse order.
         /// </summary>
         private bool ShrinkToAfterShortestMatch()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs b/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
index dc5959c..40c4d6c 100644
--- a/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
+++ b/src/Lucene.Net/Search/Spans/NearSpansUnordered.cs
@@ -28,8 +28,8 @@ namespace Lucene.Net.Search.Spans
     using TermContext = Lucene.Net.Index.TermContext;
 
     /// <summary>
-    /// Similar to <seealso cref="NearSpansOrdered"/>, but for the unordered case.
-    ///
+    /// Similar to <see cref="NearSpansOrdered"/>, but for the unordered case.
+    /// <para/>
     /// Expert:
     /// Only public for subclassing.  Most implementations should not need this class
     /// </summary>
@@ -82,7 +82,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Wraps a Spans, and can be used to form a linked list. </summary>
+        /// Wraps a <see cref="Spans"/>, and can be used to form a linked list. </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -142,8 +142,8 @@ namespace Lucene.Net.Search.Spans
                 get { return spans.Start; }
             }
 
-            public override int End
             // TODO: Remove warning after API has been finalized
+            public override int End
             {
                 get { return spans.End; }
             }
@@ -309,17 +309,17 @@ namespace Lucene.Net.Search.Spans
             get { return Min.Start; }
         }
 
-        // TODO: Remove warning after API has been finalized
-        /// <summary>
-        /// WARNING: The List is not necessarily in order of the the positions </summary>
-        /// <returns> Collection of <code>byte[]</code> payloads </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
         public override int End
         
         {
             get { return max.End; }
         }
 
+        // TODO: Remove warning after API has been finalized
+        /// <summary>
+        /// WARNING: The List is not necessarily in order of the the positions </summary>
+        /// <returns> Collection of <see cref="T:byte[]"/> payloads </returns>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public override ICollection<byte[]> GetPayload()
         {
             var matchPayload = new HashSet<byte[]>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanFirstQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanFirstQuery.cs b/src/Lucene.Net/Search/Spans/SpanFirstQuery.cs
index 38bc9ad..6a7b402 100644
--- a/src/Lucene.Net/Search/Spans/SpanFirstQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanFirstQuery.cs
@@ -26,12 +26,9 @@ namespace Lucene.Net.Search.Spans
 
     /// <summary>
     /// Matches spans near the beginning of a field.
-    /// <p/>
-    /// this class is a simple extension of <seealso cref="SpanPositionRangeQuery"/> in that it assumes the
+    /// <para/>
+    /// This class is a simple extension of <see cref="SpanPositionRangeQuery"/> in that it assumes the
     /// start to be zero and only checks the end boundary.
-    ///
-    ///
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -39,8 +36,8 @@ namespace Lucene.Net.Search.Spans
     public class SpanFirstQuery : SpanPositionRangeQuery
     {
         /// <summary>
-        /// Construct a SpanFirstQuery matching spans in <code>match</code> whose end
-        /// position is less than or equal to <code>end</code>.
+        /// Construct a <see cref="SpanFirstQuery"/> matching spans in <paramref name="match"/> whose end
+        /// position is less than or equal to <paramref name="end"/>.
         /// </summary>
         public SpanFirstQuery(SpanQuery match, int end)
             : base(match, 0, end)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanMultiTermQueryWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanMultiTermQueryWrapper.cs b/src/Lucene.Net/Search/Spans/SpanMultiTermQueryWrapper.cs
index ef4af40..f52ece5 100644
--- a/src/Lucene.Net/Search/Spans/SpanMultiTermQueryWrapper.cs
+++ b/src/Lucene.Net/Search/Spans/SpanMultiTermQueryWrapper.cs
@@ -28,20 +28,18 @@ namespace Lucene.Net.Search.Spans
     using TermContext = Lucene.Net.Index.TermContext;
 
     /// <summary>
-    /// Wraps any <seealso cref="MultiTermQuery"/> as a <seealso cref="SpanQuery"/>,
-    /// so it can be nested within other SpanQuery classes.
-    /// <p>
-    /// The query is rewritten by default to a <seealso cref="SpanOrQuery"/> containing
+    /// Wraps any <see cref="MultiTermQuery"/> as a <see cref="SpanQuery"/>,
+    /// so it can be nested within other <see cref="SpanQuery"/> classes.
+    /// <para/>
+    /// The query is rewritten by default to a <see cref="SpanOrQuery"/> containing
     /// the expanded terms, but this can be customized.
-    /// <p>
+    /// <para/>
     /// Example:
-    /// <blockquote><pre class="prettyprint">
-    /// {@code
+    /// <code>
     /// WildcardQuery wildcard = new WildcardQuery(new Term("field", "bro?n"));
-    /// SpanQuery spanWildcard = new SpanMultiTermQueryWrapper<WildcardQuery>(wildcard);
+    /// SpanQuery spanWildcard = new SpanMultiTermQueryWrapper&lt;WildcardQuery&gt;(wildcard);
     /// // do something with spanWildcard, such as use it in a SpanFirstQuery
-    /// }
-    /// </pre></blockquote>
+    /// </code>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -51,14 +49,14 @@ namespace Lucene.Net.Search.Spans
         protected readonly Q m_query;
 
         /// <summary>
-        /// Create a new SpanMultiTermQueryWrapper.
+        /// Create a new <see cref="SpanMultiTermQueryWrapper{Q}"/>.
         /// </summary>
         /// <param name="query"> Query to wrap.
-        /// <p>
-        /// NOTE: this will call <seealso cref="MultiTermQuery#setRewriteMethod(MultiTermQuery.RewriteMethod)"/>
-        /// on the wrapped <code>query</code>, changing its rewrite method to a suitable one for spans.
+        /// <para/>
+        /// NOTE: This will set <see cref="MultiTermQuery.MultiTermRewriteMethod"/>
+        /// on the wrapped <paramref name="query"/>, changing its rewrite method to a suitable one for spans.
         /// Be sure to not change the rewrite method on the wrapped query afterwards! Doing so will
-        /// throw <seealso cref="UnsupportedOperationException"/> on rewriting this query! </param>
+        /// throw <see cref="NotSupportedException"/> on rewriting this query! </param>
         public SpanMultiTermQueryWrapper(Q query)
         {
             this.m_query = query;
@@ -76,7 +74,8 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Expert: returns the rewriteMethod
+        /// Expert: Gets or Sets the rewrite method. This only makes sense
+        /// to be a span rewrite method.
         /// </summary>
         public SpanRewriteMethod MultiTermRewriteMethod
         {
@@ -176,11 +175,11 @@ namespace Lucene.Net.Search.Spans
         // LUCENENET NOTE: Moved SpanRewriteMethod outside of this class
 
         /// <summary>
-        /// A rewrite method that first translates each term into a SpanTermQuery in a
-        /// <seealso cref="Occur#SHOULD"/> clause in a BooleanQuery, and keeps the
+        /// A rewrite method that first translates each term into a <see cref="SpanTermQuery"/> in a
+        /// <see cref="Occur.SHOULD"/> clause in a <see cref="BooleanQuery"/>, and keeps the
         /// scores as computed by the query.
         /// </summary>
-        /// <seealso cref= #setRewriteMethod </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
         public static readonly SpanRewriteMethod SCORING_SPAN_QUERY_REWRITE = new SpanRewriteMethodAnonymousInnerClassHelper();
 
 #if FEATURE_SERIALIZABLE
@@ -231,15 +230,15 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// A rewrite method that first translates each term into a SpanTermQuery in a
-        /// <seealso cref="Occur#SHOULD"/> clause in a BooleanQuery, and keeps the
+        /// A rewrite method that first translates each term into a <see cref="SpanTermQuery"/> in a
+        /// <see cref="Occur.SHOULD"/> clause in a <see cref="BooleanQuery"/>, and keeps the
         /// scores as computed by the query.
         ///
-        /// <p>
-        /// this rewrite method only uses the top scoring terms so it will not overflow
+        /// <para/>
+        /// This rewrite method only uses the top scoring terms so it will not overflow
         /// the boolean max clause count.
         /// </summary>
-        /// <seealso cref= #setRewriteMethod </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -248,8 +247,8 @@ namespace Lucene.Net.Search.Spans
             private readonly TopTermsRewrite<SpanOrQuery> @delegate;
 
             /// <summary>
-            /// Create a TopTermsSpanBooleanQueryRewrite for
-            /// at most <code>size</code> terms.
+            /// Create a <see cref="TopTermsSpanBooleanQueryRewrite"/> for
+            /// at most <paramref name="size"/> terms.
             /// </summary>
             public TopTermsSpanBooleanQueryRewrite(int size)
             {
@@ -292,6 +291,7 @@ namespace Lucene.Net.Search.Spans
 
             /// <summary>
             /// return the maximum priority queue size.
+            /// <para/>
             /// NOTE: This was size() in Lucene.
             /// </summary>
             public int Count
@@ -344,14 +344,21 @@ namespace Lucene.Net.Search.Spans
     }
 
     /// <summary>
-    /// LUCENENET specific interface for referring to/identifying a SpanMultipTermQueryWrapper without
+    /// LUCENENET specific interface for referring to/identifying a <see cref="Search.Spans.SpanMultiTermQueryWrapper{Q}"/> without
     /// referring to its generic closing type.
     /// </summary>
     public interface ISpanMultiTermQueryWrapper
     {
+        /// <summary>
+        /// Expert: Gets or Sets the rewrite method. This only makes sense
+        /// to be a span rewrite method.
+        /// </summary>
         SpanRewriteMethod MultiTermRewriteMethod { get; }
         Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts);
         string Field { get; }
+
+        /// <summary>
+        /// Returns the wrapped query </summary>
         Query WrappedQuery { get; }
         Query Rewrite(IndexReader reader);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanNearPayloadCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanNearPayloadCheckQuery.cs b/src/Lucene.Net/Search/Spans/SpanNearPayloadCheckQuery.cs
index d00091f..b86e5d8 100644
--- a/src/Lucene.Net/Search/Spans/SpanNearPayloadCheckQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanNearPayloadCheckQuery.cs
@@ -28,8 +28,6 @@ namespace Lucene.Net.Search.Spans
     /// <summary>
     /// Only return those matches that have a specific payload at
     /// the given position.
-    /// <p/>
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanNearQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanNearQuery.cs b/src/Lucene.Net/Search/Spans/SpanNearQuery.cs
index 6c9a9e5..0ad2cb3 100644
--- a/src/Lucene.Net/Search/Spans/SpanNearQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanNearQuery.cs
@@ -48,14 +48,13 @@ namespace Lucene.Net.Search.Spans
         private bool collectPayloads;
 
         /// <summary>
-        /// Construct a SpanNearQuery.  Matches spans matching a span from each
-        /// clause, with up to <code>slop</code> total unmatched positions between
-        /// them.  * When <code>inOrder</code> is true, the spans from each clause
-        /// must be * ordered as in <code>clauses</code>. </summary>
-        /// <param name="clauses"> the clauses to find near each other </param>
+        /// Construct a <see cref="SpanNearQuery"/>.  Matches spans matching a span from each
+        /// clause, with up to <paramref name="slop"/> total unmatched positions between
+        /// them.  * When <paramref name="inOrder"/> is <c>true</c>, the spans from each clause
+        /// must be * ordered as in <paramref name="clauses"/>. </summary>
+        /// <param name="clauses"> The clauses to find near each other </param>
         /// <param name="slop"> The slop value </param>
-        /// <param name="inOrder"> true if order is important
-        ///  </param>
+        /// <param name="inOrder"> <c>true</c> if order is important</param>
         public SpanNearQuery(SpanQuery[] clauses, int slop, bool inOrder)
             : this(clauses, slop, inOrder, true)
         {
@@ -101,7 +100,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Return true if matches are required to be in-order. </summary>
+        /// Return <c>true</c> if matches are required to be in-order. </summary>
         public virtual bool IsInOrder
         {
             get
@@ -129,7 +128,7 @@ namespace Lucene.Net.Search.Spans
         public override string ToString(string field)
         {
             StringBuilder buffer = new StringBuilder();
-            buffer.Append("spanNear([");
+            buffer.Append("SpanNear([");
             IEnumerator<SpanQuery> i = m_clauses.GetEnumerator();
             bool isFirst = true;
             while (i.MoveNext())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanNotQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanNotQuery.cs b/src/Lucene.Net/Search/Spans/SpanNotQuery.cs
index 3d69c34..bb69f28 100644
--- a/src/Lucene.Net/Search/Spans/SpanNotQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanNotQuery.cs
@@ -30,8 +30,8 @@ namespace Lucene.Net.Search.Spans
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// Removes matches which overlap with another SpanQuery or
-    /// within a x tokens before or y tokens after another SpanQuery.
+    /// Removes matches which overlap with another <see cref="SpanQuery"/> or
+    /// within a x tokens before or y tokens after another <see cref="SpanQuery"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -44,8 +44,8 @@ namespace Lucene.Net.Search.Spans
         private readonly int post;
 
         /// <summary>
-        /// Construct a SpanNotQuery matching spans from <code>include</code> which
-        /// have no overlap with spans from <code>exclude</code>.
+        /// Construct a <see cref="SpanNotQuery"/> matching spans from <paramref name="include"/> which
+        /// have no overlap with spans from <paramref name="exclude"/>.
         /// </summary>
         public SpanNotQuery(SpanQuery include, SpanQuery exclude)
             : this(include, exclude, 0, 0)
@@ -53,9 +53,9 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Construct a SpanNotQuery matching spans from <code>include</code> which
-        /// have no overlap with spans from <code>exclude</code> within
-        /// <code>dist</code> tokens of <code>include</code>.
+        /// Construct a <see cref="SpanNotQuery"/> matching spans from <paramref name="include"/> which
+        /// have no overlap with spans from <paramref name="exclude"/> within
+        /// <paramref name="dist"/> tokens of <paramref name="include"/>.
         /// </summary>
         public SpanNotQuery(SpanQuery include, SpanQuery exclude, int dist)
             : this(include, exclude, dist, dist)
@@ -63,9 +63,9 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Construct a SpanNotQuery matching spans from <code>include</code> which
-        /// have no overlap with spans from <code>exclude</code> within
-        /// <code>pre</code> tokens before or <code>post</code> tokens of <code>include</code>.
+        /// Construct a <see cref="SpanNotQuery"/> matching spans from <paramref name="include"/> which
+        /// have no overlap with spans from <paramref name="exclude"/> within
+        /// <paramref name="pre"/> tokens before or <paramref name="post"/> tokens of <paramref name="include"/>.
         /// </summary>
         public SpanNotQuery(SpanQuery include, SpanQuery exclude, int pre, int post)
         {
@@ -81,7 +81,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Return the SpanQuery whose matches are filtered. </summary>
+        /// Return the <see cref="SpanQuery"/> whose matches are filtered. </summary>
         public virtual SpanQuery Include
         {
             get
@@ -91,7 +91,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Return the SpanQuery whose matches must not overlap those returned. </summary>
+        /// Return the <see cref="SpanQuery"/> whose matches must not overlap those returned. </summary>
         public virtual SpanQuery Exclude
         {
             get
@@ -306,7 +306,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!base.Equals(o))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanOrQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanOrQuery.cs b/src/Lucene.Net/Search/Spans/SpanOrQuery.cs
index bfaecda..0d18490 100644
--- a/src/Lucene.Net/Search/Spans/SpanOrQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanOrQuery.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Search.Spans
         private string field;
 
         /// <summary>
-        /// Construct a SpanOrQuery merging the provided clauses. </summary>
+        /// Construct a <see cref="SpanOrQuery"/> merging the provided <paramref name="clauses"/>. </summary>
         public SpanOrQuery(params SpanQuery[] clauses)
         {
             // copy clauses array into an ArrayList
@@ -53,7 +53,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Adds a clause to this query </summary>
+        /// Adds a <paramref name="clause"/> to this query </summary>
         public void AddClause(SpanQuery clause)
         {
             if (field == null)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanPayloadCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanPayloadCheckQuery.cs b/src/Lucene.Net/Search/Spans/SpanPayloadCheckQuery.cs
index 1dcb40b..0a68745 100644
--- a/src/Lucene.Net/Search/Spans/SpanPayloadCheckQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanPayloadCheckQuery.cs
@@ -1,7 +1,6 @@
 using Lucene.Net.Support;
 using System.Collections.Generic;
 using System;
-using System.Linq;
 using System.Text;
 
 namespace Lucene.Net.Search.Spans
@@ -26,14 +25,12 @@ namespace Lucene.Net.Search.Spans
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    ///   Only return those matches that have a specific payload at
-    ///  the given position.
-    /// <p/>
-    /// Do not use this with an SpanQuery that contains a <seealso cref="Lucene.Net.Search.Spans.SpanNearQuery"/>.  Instead, use
-    /// <seealso cref="SpanNearPayloadCheckQuery"/> since it properly handles the fact that payloads
-    /// aren't ordered by <seealso cref="Lucene.Net.Search.Spans.SpanNearQuery"/>.
-    ///
-    ///
+    /// Only return those matches that have a specific payload at
+    /// the given position.
+    /// <para/>
+    /// Do not use this with a <see cref="SpanQuery"/> that contains a <see cref="Lucene.Net.Search.Spans.SpanNearQuery"/>.  Instead, use
+    /// <see cref="SpanNearPayloadCheckQuery"/> since it properly handles the fact that payloads
+    /// aren't ordered by <see cref="Lucene.Net.Search.Spans.SpanNearQuery"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -44,7 +41,7 @@ namespace Lucene.Net.Search.Spans
 
         ///
         /// <param name="match"> The underlying <see cref="SpanQuery"/> to check </param>
-        /// <param name="payloadToMatch"> The <see cref="T:ICollection<byte[]>"/> of payloads to match. 
+        /// <param name="payloadToMatch"> The <see cref="T:ICollection{byte[]}"/> of payloads to match. 
         /// IMPORTANT: If the type provided does not implement <see cref="IList{T}"/> (including arrays), 
         /// <see cref="ISet{T}"/>, or <see cref="IDictionary{TKey, TValue}"/>, it should provide an 
         /// <see cref="object.Equals(object)"/> and <see cref="object.GetHashCode()"/> implementation 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanPositionCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanPositionCheckQuery.cs b/src/Lucene.Net/Search/Spans/SpanPositionCheckQuery.cs
index 492cbdb..ec66553 100644
--- a/src/Lucene.Net/Search/Spans/SpanPositionCheckQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanPositionCheckQuery.cs
@@ -27,8 +27,7 @@ namespace Lucene.Net.Search.Spans
     using TermContext = Lucene.Net.Index.TermContext;
 
     /// <summary>
-    /// Base class for filtering a SpanQuery based on the position of a match.
-    ///
+    /// Base class for filtering a <see cref="SpanQuery"/> based on the position of a match.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -42,9 +41,9 @@ namespace Lucene.Net.Search.Spans
             this.m_match = match;
         }
 
-        /// <returns> the SpanQuery whose matches are filtered.
-        ///
-        ///  </returns>
+        /// <returns> 
+        /// The <see cref="SpanQuery"/> whose matches are filtered.
+        /// </returns>
         public virtual SpanQuery Match
         {
             get
@@ -67,7 +66,7 @@ namespace Lucene.Net.Search.Spans
         }
 
         /// <summary>
-        /// Return value for <seealso cref="SpanPositionCheckQuery#acceptPosition(Spans)"/>.
+        /// Return value for <see cref="SpanPositionCheckQuery.AcceptPosition(Spans)"/>.
         /// </summary>
         protected internal enum AcceptStatus
         {
@@ -88,17 +87,15 @@ namespace Lucene.Net.Search.Spans
 
         /// <summary>
         /// Implementing classes are required to return whether the current position is a match for the passed in
-        /// "match" <seealso cref="Lucene.Net.Search.Spans.SpanQuery"/>.
-        ///
-        /// this is only called if the underlying <seealso cref="Lucene.Net.Search.Spans.Spans#next()"/> for the
+        /// "match" <see cref="Lucene.Net.Search.Spans.SpanQuery"/>.
+        /// <para/>
+        /// This is only called if the underlying <see cref="Lucene.Net.Search.Spans.Spans.Next()"/> for the
         /// match is successful
-        ///
         /// </summary>
-        /// <param name="spans"> The <seealso cref="Lucene.Net.Search.Spans.Spans"/> instance, positioned at the spot to check </param>
-        /// <returns> whether the match is accepted, rejected, or rejected and should move to the next doc.
+        /// <param name="spans"> The <see cref="Lucene.Net.Search.Spans.Spans"/> instance, positioned at the spot to check </param>
+        /// <returns> Whether the match is accepted, rejected, or rejected and should move to the next doc.
         /// </returns>
-        /// <seealso cref= Lucene.Net.Search.Spans.Spans#next()
-        ///  </seealso>
+        /// <seealso cref="Lucene.Net.Search.Spans.Spans.Next()"/>
         protected abstract AcceptStatus AcceptPosition(Spans spans);
 
         public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
@@ -198,8 +195,8 @@ namespace Lucene.Net.Search.Spans
                 get { return spans.Start; }
             }
 
-            public override int End
             // TODO: Remove warning after API has been finalized
+            public override int End
             {
                 get { return spans.End; }
             }
@@ -215,7 +212,6 @@ namespace Lucene.Net.Search.Spans
             }
 
             // TODO: Remove warning after API has been finalized
-
             public override bool IsPayloadAvailable
             {
                 get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanPositionRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanPositionRangeQuery.cs b/src/Lucene.Net/Search/Spans/SpanPositionRangeQuery.cs
index dc54154..4eb3285 100644
--- a/src/Lucene.Net/Search/Spans/SpanPositionRangeQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanPositionRangeQuery.cs
@@ -25,9 +25,9 @@ namespace Lucene.Net.Search.Spans
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// Checks to see if the <seealso cref="#getMatch()"/> lies between a start and end position
+    /// Checks to see if the <see cref="SpanPositionCheckQuery.Match"/> lies between a start and end position
     /// </summary>
-    /// <seealso cref= Lucene.Net.Search.Spans.SpanFirstQuery for a derivation that is optimized for the case where start position is 0 </seealso>
+    /// <seealso cref="Lucene.Net.Search.Spans.SpanFirstQuery">for a derivation that is optimized for the case where start position is 0</seealso>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -69,7 +69,7 @@ namespace Lucene.Net.Search.Spans
             }
         }
 
-        /// <returns> the maximum end position permitted in a match. </returns>
+        /// <returns> The maximum end position permitted in a match. </returns>
         public virtual int End
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanQuery.cs b/src/Lucene.Net/Search/Spans/SpanQuery.cs
index 2ec08fd..976b343 100644
--- a/src/Lucene.Net/Search/Spans/SpanQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanQuery.cs
@@ -40,8 +40,8 @@ namespace Lucene.Net.Search.Spans
 
         /// <summary>
         /// Returns the name of the field matched by this query.
-        /// <p>
-        /// Note that this may return null if the query matches no terms.
+        /// <para/>
+        /// Note that this may return <c>null</c> if the query matches no terms.
         /// </summary>
         public abstract string Field { get; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanScorer.cs b/src/Lucene.Net/Search/Spans/SpanScorer.cs
index 7590003..cf13628 100644
--- a/src/Lucene.Net/Search/Spans/SpanScorer.cs
+++ b/src/Lucene.Net/Search/Spans/SpanScorer.cs
@@ -110,7 +110,8 @@ namespace Lucene.Net.Search.Spans
 
         /// <summary>
         /// Returns the intermediate "sloppy freq" adjusted for edit distance
-        ///  @lucene.internal
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         // only public so .payloads can see it.
         public virtual float SloppyFreq

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/SpanTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/SpanTermQuery.cs b/src/Lucene.Net/Search/Spans/SpanTermQuery.cs
index d053dbd..71bd0c5 100644
--- a/src/Lucene.Net/Search/Spans/SpanTermQuery.cs
+++ b/src/Lucene.Net/Search/Spans/SpanTermQuery.cs
@@ -43,7 +43,7 @@ namespace Lucene.Net.Search.Spans
         protected Term m_term;
 
         /// <summary>
-        /// Construct a SpanTermQuery matching the named term's spans. </summary>
+        /// Construct a <see cref="SpanTermQuery"/> matching the named term's spans. </summary>
         public SpanTermQuery(Term term)
         {
             this.m_term = term;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/396db51b/src/Lucene.Net/Search/Spans/Spans.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Spans/Spans.cs b/src/Lucene.Net/Search/Spans/Spans.cs
index abd9e38..df9ef5d 100644
--- a/src/Lucene.Net/Search/Spans/Spans.cs
+++ b/src/Lucene.Net/Search/Spans/Spans.cs
@@ -32,25 +32,29 @@ namespace Lucene.Net.Search.Spans
     public abstract class Spans
     {
         /// <summary>
-        /// Move to the next match, returning true iff any such exists. </summary>
+        /// Move to the next match, returning true if any such exists. </summary>
         public abstract bool Next();
 
         /// <summary>
         /// Skips to the first match beyond the current, whose document number is
         /// greater than or equal to <i>target</i>.
-        /// <p>The behavior of this method is <b>undefined</b> when called with
-        /// <code> target &lt;= current</code>, or after the iterator has exhausted.
+        /// <para/>The behavior of this method is <b>undefined</b> when called with
+        /// <c> target &lt;= current</c>, or after the iterator has exhausted.
         /// Both cases may result in unpredicted behavior.
-        /// <p>Returns true iff there is such
-        /// a match.  <p>Behaves as if written: <pre class="prettyprint">
-        ///   boolean skipTo(int target) {
-        ///     do {
-        ///       if (!next())
-        ///         return false;
-        ///     } while (target > doc());
-        ///     return true;
-        ///   }
-        /// </pre>
+        /// <para/>Returns <c>true</c> if there is such
+        /// a match.  
+        /// <para/>Behaves as if written: 
+        /// <code>
+        ///     bool SkipTo(int target) 
+        ///     {
+        ///         do 
+        ///         {
+        ///             if (!Next())
+        ///                 return false;
+        ///         } while (target > Doc);
+        ///         return true;
+        ///     }
+        /// </code>
         /// Most implementations are considerably more efficient than that.
         /// </summary>
         public abstract bool SkipTo(int target);
@@ -69,38 +73,39 @@ namespace Lucene.Net.Search.Spans
 
         /// <summary>
         /// Returns the payload data for the current span.
-        /// this is invalid until <seealso cref="#next()"/> is called for
+        /// this is invalid until <see cref="Next()"/> is called for
         /// the first time.
-        /// this method must not be called more than once after each call
-        /// of <seealso cref="#next()"/>. However, most payloads are loaded lazily,
+        /// This method must not be called more than once after each call
+        /// of <see cref="Next()"/>. However, most payloads are loaded lazily,
         /// so if the payload data for the current position is not needed,
         /// this method may not be called at all for performance reasons. An ordered
         /// SpanQuery does not lazy load, so if you have payloads in your index and
         /// you do not want ordered SpanNearQuerys to collect payloads, you can
-        /// disable collection with a constructor option.<br>
-        /// <br>
+        /// disable collection with a constructor option.
+        /// <para/>
         /// Note that the return type is a collection, thus the ordering should not be relied upon.
-        /// <br/>
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
-        /// <returns> a List of byte arrays containing the data of this payload, otherwise null if isPayloadAvailable is false </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <returns> A <see cref="T:ICollection{byte[]}"/> of byte arrays containing the data of this payload, 
+        /// otherwise <c>null</c> if <see cref="IsPayloadAvailable"/> is <c>false</c> </returns>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         // TODO: Remove warning after API has been finalized
         public abstract ICollection<byte[]> GetPayload();
 
         /// <summary>
         /// Checks if a payload can be loaded at this position.
-        /// <p/>
+        /// <para/>
         /// Payloads can only be loaded once per call to
-        /// <seealso cref="#next()"/>.
+        /// <see cref="Next()"/>.
         /// </summary>
-        /// <returns> true if there is a payload available at this position that can be loaded </returns>
+        /// <returns> <c>true</c> if there is a payload available at this position that can be loaded </returns>
         public abstract bool IsPayloadAvailable { get; }
 
         /// <summary>
         /// Returns the estimated cost of this spans.
-        /// <p>
-        /// this is generally an upper bound of the number of documents this iterator
+        /// <para/>
+        /// This is generally an upper bound of the number of documents this iterator
         /// might match, but may be a rough heuristic, hardcoded value, or otherwise
         /// completely inaccurate.
         /// </summary>


[43/48] lucenenet git commit: Lucene.Net.Codecs.Compressing: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Compressing: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/5dc5193a
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/5dc5193a
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/5dc5193a

Branch: refs/heads/master
Commit: 5dc5193a89e01c703095c653325f4b665aa386b9
Parents: 95b5d4b
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 15:08:06 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:42 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  5 +-
 .../CompressingStoredFieldsFormat.cs            | 61 ++++++++++----------
 .../CompressingStoredFieldsIndexReader.cs       |  3 +-
 .../CompressingStoredFieldsIndexWriter.cs       | 49 ++++++++--------
 .../CompressingStoredFieldsReader.cs            |  9 +--
 .../CompressingStoredFieldsWriter.cs            |  5 +-
 .../Compressing/CompressingTermVectorsFormat.cs | 35 +++++------
 .../Compressing/CompressingTermVectorsReader.cs |  5 +-
 .../Compressing/CompressingTermVectorsWriter.cs |  9 +--
 .../Codecs/Compressing/CompressionMode.cs       |  9 +--
 src/Lucene.Net/Codecs/Compressing/Compressor.cs |  4 +-
 .../Codecs/Compressing/Decompressor.cs          | 22 +++----
 src/Lucene.Net/Codecs/Compressing/LZ4.cs        | 20 +++----
 13 files changed, 123 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5c39dc4..36b780b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -50,9 +50,8 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 ### Documentation Comments == up for grabs:
 
-1. Lucene.Net.Core (project)
-   1. Codecs.Compressing (namespace)
-   2. Util.Packed (namespace)
+1. Lucene.Net (project)
+   1. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsFormat.cs b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsFormat.cs
index 2577368..c88d8e7 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsFormat.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsFormat.cs
@@ -23,17 +23,18 @@ namespace Lucene.Net.Codecs.Compressing
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// A <seealso cref="StoredFieldsFormat"/> that is very similar to
-    /// <seealso cref="Lucene40StoredFieldsFormat"/> but compresses documents in chunks in
+    /// A <see cref="StoredFieldsFormat"/> that is very similar to
+    /// <see cref="Lucene40.Lucene40StoredFieldsFormat"/> but compresses documents in chunks in
     /// order to improve the compression ratio.
-    /// <p>
-    /// For a chunk size of <tt>chunkSize</tt> bytes, this <seealso cref="StoredFieldsFormat"/>
-    /// does not support documents larger than (<tt>2<sup>31</sup> - chunkSize</tt>)
+    /// <para/>
+    /// For a chunk size of <c>chunkSize</c> bytes, this <see cref="StoredFieldsFormat"/>
+    /// does not support documents larger than (<c>2<sup>31</sup> - chunkSize</c>)
     /// bytes. In case this is a problem, you should use another format, such as
-    /// <seealso cref="Lucene40StoredFieldsFormat"/>.
-    /// <p>
-    /// For optimal performance, you should use a <seealso cref="MergePolicy"/> that returns
+    /// <see cref="Lucene40.Lucene40StoredFieldsFormat"/>.
+    /// <para/>
+    /// For optimal performance, you should use a <see cref="Index.MergePolicy"/> that returns
     /// segments that have the biggest byte size first.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class CompressingStoredFieldsFormat : StoredFieldsFormat
@@ -44,47 +45,47 @@ namespace Lucene.Net.Codecs.Compressing
         private readonly int chunkSize;
 
         /// <summary>
-        /// Create a new <seealso cref="CompressingStoredFieldsFormat"/> with an empty segment
+        /// Create a new <see cref="CompressingStoredFieldsFormat"/> with an empty segment
         /// suffix.
         /// </summary>
-        /// <seealso cref= CompressingStoredFieldsFormat#CompressingStoredFieldsFormat(String, String, CompressionMode, int) </seealso>
+        /// <seealso cref="CompressingStoredFieldsFormat.CompressingStoredFieldsFormat(string, string, CompressionMode, int)"/>
         public CompressingStoredFieldsFormat(string formatName, CompressionMode compressionMode, int chunkSize)
             : this(formatName, "", compressionMode, chunkSize)
         {
         }
 
         /// <summary>
-        /// Create a new <seealso cref="CompressingStoredFieldsFormat"/>.
-        /// <p>
-        /// <code>formatName</code> is the name of the format. this name will be used
+        /// Create a new <see cref="CompressingStoredFieldsFormat"/>.
+        /// <para/>
+        /// <paramref name="formatName"/> is the name of the format. This name will be used
         /// in the file formats to perform
-        /// <seealso cref="CodecUtil#checkHeader(Lucene.Net.Store.DataInput, String, int, int) codec header checks"/>.
-        /// <p>
-        /// <code>segmentSuffix</code> is the segment suffix. this suffix is added to
+        /// codec header checks (<see cref="CodecUtil.CheckHeader(Lucene.Net.Store.DataInput, string, int, int)"/>).
+        /// <para/>
+        /// <paramref name="segmentSuffix"/> is the segment suffix. this suffix is added to
         /// the result file name only if it's not the empty string.
-        /// <p>
-        /// The <code>compressionMode</code> parameter allows you to choose between
+        /// <para/>
+        /// The <paramref name="compressionMode"/> parameter allows you to choose between
         /// compression algorithms that have various compression and decompression
         /// speeds so that you can pick the one that best fits your indexing and
         /// searching throughput. You should never instantiate two
-        /// <seealso cref="CompressingStoredFieldsFormat"/>s that have the same name but
-        /// different <seealso cref="compressionMode"/>s.
-        /// <p>
-        /// <code>chunkSize</code> is the minimum byte size of a chunk of documents.
-        /// A value of <code>1</code> can make sense if there is redundancy across
+        /// <see cref="CompressingStoredFieldsFormat"/>s that have the same name but
+        /// different <see cref="compressionMode"/>s.
+        /// <para/>
+        /// <paramref name="chunkSize"/> is the minimum byte size of a chunk of documents.
+        /// A value of <c>1</c> can make sense if there is redundancy across
         /// fields. In that case, both performance and compression ratio should be
-        /// better than with <seealso cref="Lucene40StoredFieldsFormat"/> with compressed
+        /// better than with <see cref="Lucene40.Lucene40StoredFieldsFormat"/> with compressed
         /// fields.
-        /// <p>
-        /// Higher values of <code>chunkSize</code> should improve the compression
+        /// <para/>
+        /// Higher values of <paramref name="chunkSize"/> should improve the compression
         /// ratio but will require more memory at indexing time and might make document
         /// loading a little slower (depending on the size of your OS cache compared
         /// to the size of your index).
         /// </summary>
-        /// <param name="formatName"> the name of the <seealso cref="StoredFieldsFormat"/> </param>
-        /// <param name="compressionMode"> the <seealso cref="compressionMode"/> to use </param>
-        /// <param name="chunkSize"> the minimum number of bytes of a single chunk of stored documents </param>
-        /// <seealso cref= compressionMode </seealso>
+        /// <param name="formatName"> The name of the <see cref="StoredFieldsFormat"/>. </param>
+        /// <param name="compressionMode"> The <see cref="CompressionMode"/> to use. </param>
+        /// <param name="chunkSize"> The minimum number of bytes of a single chunk of stored documents. </param>
+        /// <seealso cref="CompressionMode"/>
         public CompressingStoredFieldsFormat(string formatName, string segmentSuffix, CompressionMode compressionMode, int chunkSize)
         {
             this.formatName = formatName;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
index a27fc40..fb889e6 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
@@ -27,7 +27,8 @@ namespace Lucene.Net.Codecs.Compressing
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Random-access reader for <seealso cref="CompressingStoredFieldsIndexWriter"/>.
+    /// Random-access reader for <see cref="CompressingStoredFieldsIndexWriter"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class CompressingStoredFieldsIndexReader

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
index 981e476..588cc2b 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
@@ -24,45 +24,48 @@ namespace Lucene.Net.Codecs.Compressing
     using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
 
     /// <summary>
-    /// Efficient index format for block-based <seealso cref="Codec"/>s.
-    /// <p> this writer generates a file which can be loaded into memory using
+    /// Efficient index format for block-based <see cref="Codec"/>s.
+    /// <para/> this writer generates a file which can be loaded into memory using
     /// memory-efficient data structures to quickly locate the block that contains
     /// any document.
-    /// <p>In order to have a compact in-memory representation, for every block of
+    /// <para>In order to have a compact in-memory representation, for every block of
     /// 1024 chunks, this index computes the average number of bytes per
-    /// chunk and for every chunk, only stores the difference between<ul>
+    /// chunk and for every chunk, only stores the difference between
+    /// <list type="bullet">
     /// <li>${chunk number} * ${average length of a chunk}</li>
-    /// <li>and the actual start offset of the chunk</li></ul></p>
-    /// <p>Data is written as follows:</p>
-    /// <ul>
+    /// <li>and the actual start offset of the chunk</li>
+    /// </list>
+    /// </para>
+    /// <para>Data is written as follows:</para>
+    /// <list type="bullet">
     /// <li>PackedIntsVersion, &lt;Block&gt;<sup>BlockCount</sup>, BlocksEndMarker</li>
-    /// <li>PackedIntsVersion --&gt; <seealso cref="PackedInt32s#VERSION_CURRENT"/> as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>BlocksEndMarker --&gt; <tt>0</tt> as a <seealso cref="DataOutput#writeVInt VInt"/>, this marks the end of blocks since blocks are not allowed to start with <tt>0</tt></li>
+    /// <li>PackedIntsVersion --&gt; <see cref="PackedInt32s.VERSION_CURRENT"/> as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </li>
+    /// <li>BlocksEndMarker --&gt; <tt>0</tt> as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) , this marks the end of blocks since blocks are not allowed to start with <tt>0</tt></li>
     /// <li>Block --&gt; BlockChunks, &lt;DocBases&gt;, &lt;StartPointers&gt;</li>
-    /// <li>BlockChunks --&gt; a <seealso cref="DataOutput#writeVInt VInt"/> which is the number of chunks encoded in the block</li>
+    /// <li>BlockChunks --&gt; a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>)  which is the number of chunks encoded in the block</li>
     /// <li>DocBases --&gt; DocBase, AvgChunkDocs, BitsPerDocBaseDelta, DocBaseDeltas</li>
-    /// <li>DocBase --&gt; first document ID of the block of chunks, as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>AvgChunkDocs --&gt; average number of documents in a single chunk, as a <seealso cref="DataOutput#writeVInt VInt"/></li>
+    /// <li>DocBase --&gt; first document ID of the block of chunks, as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </li>
+    /// <li>AvgChunkDocs --&gt; average number of documents in a single chunk, as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </li>
     /// <li>BitsPerDocBaseDelta --&gt; number of bits required to represent a delta from the average using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a></li>
-    /// <li>DocBaseDeltas --&gt; <seealso cref="PackedInt32s packed"/> array of BlockChunks elements of BitsPerDocBaseDelta bits each, representing the deltas from the average doc base using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a>.</li>
+    /// <li>DocBaseDeltas --&gt; packed (<see cref="PackedInt32s"/>) array of BlockChunks elements of BitsPerDocBaseDelta bits each, representing the deltas from the average doc base using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a>.</li>
     /// <li>StartPointers --&gt; StartPointerBase, AvgChunkSize, BitsPerStartPointerDelta, StartPointerDeltas</li>
-    /// <li>StartPointerBase --&gt; the first start pointer of the block, as a <seealso cref="DataOutput#writeVLong VLong"/></li>
-    /// <li>AvgChunkSize --&gt; the average size of a chunk of compressed documents, as a <seealso cref="DataOutput#writeVLong VLong"/></li>
+    /// <li>StartPointerBase --&gt; the first start pointer of the block, as a VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </li>
+    /// <li>AvgChunkSize --&gt; the average size of a chunk of compressed documents, as a VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </li>
     /// <li>BitsPerStartPointerDelta --&gt; number of bits required to represent a delta from the average using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a></li>
-    /// <li>StartPointerDeltas --&gt; <seealso cref="PackedInt32s packed"/> array of BlockChunks elements of BitsPerStartPointerDelta bits each, representing the deltas from the average start pointer using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a></li>
-    /// <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes</p>
-    /// <ul>
+    /// <li>StartPointerDeltas --&gt; packed (<see cref="PackedInt32s"/>) array of BlockChunks elements of BitsPerStartPointerDelta bits each, representing the deltas from the average start pointer using <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">ZigZag encoding</a></li>
+    /// <li>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </li>
+    /// </list>
+    /// <para>Notes</para>
+    /// <list type="bullet">
     /// <li>For any block, the doc base of the n-th chunk can be restored with
-    /// <code>DocBase + AvgChunkDocs * n + DocBaseDeltas[n]</code>.</li>
+    /// <c>DocBase + AvgChunkDocs * n + DocBaseDeltas[n]</c>.</li>
     /// <li>For any block, the start pointer of the n-th chunk can be restored with
-    /// <code>StartPointerBase + AvgChunkSize * n + StartPointerDeltas[n]</code>.</li>
+    /// <c>StartPointerBase + AvgChunkSize * n + StartPointerDeltas[n]</c>.</li>
     /// <li>Once data is loaded into memory, you can lookup the start pointer of any
     /// document by performing two binary searches: a first one based on the values
     /// of DocBase in order to find the right block, and then inside the block based
     /// on DocBaseDeltas (by reconstructing the doc bases for every chunk).</li>
-    /// </ul>
+    /// </list>
     /// @lucene.internal
     /// </summary>
     public sealed class CompressingStoredFieldsIndexWriter : IDisposable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsReader.cs b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsReader.cs
index 335aa08..b2fbb74 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsReader.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsReader.cs
@@ -43,7 +43,8 @@ namespace Lucene.Net.Codecs.Compressing
     using StoredFieldVisitor = Lucene.Net.Index.StoredFieldVisitor;
 
     /// <summary>
-    /// <seealso cref="StoredFieldsReader"/> impl for <seealso cref="CompressingStoredFieldsFormat"/>.
+    /// <see cref="StoredFieldsReader"/> impl for <see cref="CompressingStoredFieldsFormat"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class CompressingStoredFieldsReader : StoredFieldsReader
@@ -163,7 +164,7 @@ namespace Lucene.Net.Codecs.Compressing
             }
         }
 
-        /// <exception cref="ObjectDisposedException"> if this FieldsReader is closed </exception>
+        /// <exception cref="ObjectDisposedException"> If this FieldsReader is disposed. </exception>
         private void EnsureOpen()
         {
             if (closed)
@@ -173,7 +174,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Close the underlying <seealso cref="IndexInput"/>s.
+        /// Dispose the underlying <see cref="IndexInput"/>s.
         /// </summary>
         protected override void Dispose(bool disposing)
         {
@@ -501,7 +502,7 @@ namespace Lucene.Net.Codecs.Compressing
             }
 
             /// <summary>
-            /// Go to the chunk containing the provided doc ID.
+            /// Go to the chunk containing the provided <paramref name="doc"/> ID.
             /// </summary>
             internal void Next(int doc)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsWriter.cs b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsWriter.cs
index 4f8f949..465e1d1 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsWriter.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingStoredFieldsWriter.cs
@@ -28,7 +28,8 @@ namespace Lucene.Net.Codecs.Compressing
      */
 
     /// <summary>
-    /// <seealso cref="StoredFieldsWriter"/> impl for <seealso cref="CompressingStoredFieldsFormat"/>.
+    /// <see cref="StoredFieldsWriter"/> impl for <see cref="CompressingStoredFieldsFormat"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class CompressingStoredFieldsWriter : StoredFieldsWriter
@@ -167,7 +168,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// NOTE: This was saveInts() in Lucene
+        /// NOTE: This was saveInts() in Lucene.
         /// </summary>
         private static void SaveInt32s(int[] values, int length, DataOutput @out)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsFormat.cs b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsFormat.cs
index 8952cc5..7d4a22e 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsFormat.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsFormat.cs
@@ -24,8 +24,9 @@ namespace Lucene.Net.Codecs.Compressing
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// A <seealso cref="TermVectorsFormat"/> that compresses chunks of documents together in
+    /// A <see cref="TermVectorsFormat"/> that compresses chunks of documents together in
     /// order to improve the compression ratio.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class CompressingTermVectorsFormat : TermVectorsFormat
@@ -36,30 +37,30 @@ namespace Lucene.Net.Codecs.Compressing
         private readonly int chunkSize;
 
         /// <summary>
-        /// Create a new <seealso cref="CompressingTermVectorsFormat"/>.
-        /// <p>
-        /// <code>formatName</code> is the name of the format. this name will be used
+        /// Create a new <see cref="CompressingTermVectorsFormat"/>.
+        /// <para/>
+        /// <paramref name="formatName"/> is the name of the format. this name will be used
         /// in the file formats to perform
-        /// <seealso cref="CodecUtil#checkHeader(Lucene.Net.Store.DataInput, String, int, int) codec header checks"/>.
-        /// <p>
-        /// The <code>compressionMode</code> parameter allows you to choose between
+        /// codec header checks (<see cref="CodecUtil.CheckHeader(Lucene.Net.Store.DataInput, string, int, int)"/>).
+        /// <para/>
+        /// The <paramref name="compressionMode"/> parameter allows you to choose between
         /// compression algorithms that have various compression and decompression
         /// speeds so that you can pick the one that best fits your indexing and
         /// searching throughput. You should never instantiate two
-        /// <seealso cref="CompressingTermVectorsFormat"/>s that have the same name but
-        /// different <seealso cref="compressionMode"/>s.
-        /// <p>
-        /// <code>chunkSize</code> is the minimum byte size of a chunk of documents.
-        /// Higher values of <code>chunkSize</code> should improve the compression
+        /// <see cref="CompressingTermVectorsFormat"/>s that have the same name but
+        /// different <see cref="CompressionMode"/>s.
+        /// <para/>
+        /// <paramref name="chunkSize"/> is the minimum byte size of a chunk of documents.
+        /// Higher values of <paramref name="chunkSize"/> should improve the compression
         /// ratio but will require more memory at indexing time and might make document
         /// loading a little slower (depending on the size of your OS cache compared
         /// to the size of your index).
         /// </summary>
-        /// <param name="formatName"> the name of the <seealso cref="StoredFieldsFormat"/> </param>
-        /// <param name="segmentSuffix"> a suffix to append to files created by this format </param>
-        /// <param name="compressionMode"> the <seealso cref="compressionMode"/> to use </param>
-        /// <param name="chunkSize"> the minimum number of bytes of a single chunk of stored documents </param>
-        /// <seealso cref= compressionMode </seealso>
+        /// <param name="formatName"> The name of the <see cref="StoredFieldsFormat"/>. </param>
+        /// <param name="segmentSuffix"> A suffix to append to files created by this format. </param>
+        /// <param name="compressionMode"> The <see cref="CompressionMode"/> to use. </param>
+        /// <param name="chunkSize"> The minimum number of bytes of a single chunk of stored documents. </param>
+        /// <seealso cref="CompressionMode"/>
         public CompressingTermVectorsFormat(string formatName, string segmentSuffix, CompressionMode compressionMode, int chunkSize)
         {
             this.formatName = formatName;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsReader.cs b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsReader.cs
index 2d8ea75..5b69663 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsReader.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsReader.cs
@@ -29,7 +29,8 @@ namespace Lucene.Net.Codecs.Compressing
      */
 
     /// <summary>
-    /// <seealso cref="TermVectorsReader"/> for <seealso cref="CompressingTermVectorsFormat"/>.
+    /// <see cref="TermVectorsReader"/> for <see cref="CompressingTermVectorsFormat"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class CompressingTermVectorsReader : TermVectorsReader, IDisposable
@@ -174,7 +175,7 @@ namespace Lucene.Net.Codecs.Compressing
             }
         }
 
-        /// <exception cref="ObjectDisposedException"> if this TermVectorsReader is closed </exception>
+        /// <exception cref="ObjectDisposedException"> if this <see cref="TermVectorsReader"/> is disposed. </exception>
         private void EnsureOpen()
         {
             if (closed)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsWriter.cs b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsWriter.cs
index 042c319..2d23dbc 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsWriter.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressingTermVectorsWriter.cs
@@ -48,7 +48,8 @@ namespace Lucene.Net.Codecs.Compressing
     using StringHelper = Lucene.Net.Util.StringHelper;
 
     /// <summary>
-    /// <seealso cref="TermVectorsWriter"/> for <seealso cref="CompressingTermVectorsFormat"/>.
+    /// <see cref="TermVectorsWriter"/> for <see cref="CompressingTermVectorsFormat"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class CompressingTermVectorsWriter : TermVectorsWriter
@@ -84,7 +85,7 @@ namespace Lucene.Net.Codecs.Compressing
         private readonly int chunkSize;
 
         /// <summary>
-        /// a pending doc </summary>
+        /// A pending doc. </summary>
         private class DocData
         {
             private readonly CompressingTermVectorsWriter outerInstance;
@@ -152,7 +153,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// a pending field </summary>
+        /// A pending field. </summary>
         private class FieldData
         {
             private readonly CompressingTermVectorsWriter outerInstance;
@@ -441,7 +442,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Returns a sorted array containing unique field numbers </summary>
+        /// Returns a sorted array containing unique field numbers. </summary>
         private int[] FlushFieldNums()
         {
             SortedSet<int> fieldNums = new SortedSet<int>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/CompressionMode.cs b/src/Lucene.Net/Codecs/Compressing/CompressionMode.cs
index ce0857c..dda993a 100644
--- a/src/Lucene.Net/Codecs/Compressing/CompressionMode.cs
+++ b/src/Lucene.Net/Codecs/Compressing/CompressionMode.cs
@@ -30,6 +30,7 @@ namespace Lucene.Net.Codecs.Compressing
     /// <summary>
     /// A compression mode. Tells how much effort should be spent on compression and
     /// decompression of stored fields.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class CompressionMode
@@ -95,8 +96,8 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// this compression mode is similar to <seealso cref="#FAST"/> but it spends more time
-        /// compressing in order to improve the compression ratio. this compression
+        /// This compression mode is similar to <see cref="FAST"/> but it spends more time
+        /// compressing in order to improve the compression ratio. This compression
         /// mode is best used with indices that have a low update rate but should be
         /// able to load documents from disk quickly.
         /// </summary>
@@ -131,12 +132,12 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Create a new <seealso cref="Compressor"/> instance.
+        /// Create a new <see cref="Compressor"/> instance.
         /// </summary>
         public abstract Compressor NewCompressor();
 
         /// <summary>
-        /// Create a new <seealso cref="Decompressor"/> instance.
+        /// Create a new <see cref="Decompressor"/> instance.
         /// </summary>
         public abstract Decompressor NewDecompressor();
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/Compressor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/Compressor.cs b/src/Lucene.Net/Codecs/Compressing/Compressor.cs
index 666e90a..0f0e523 100644
--- a/src/Lucene.Net/Codecs/Compressing/Compressor.cs
+++ b/src/Lucene.Net/Codecs/Compressing/Compressor.cs
@@ -31,8 +31,8 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Compress bytes into <code>out</code>. It it the responsibility of the
-        /// compressor to add all necessary information so that a <seealso cref="Decompressor"/>
+        /// Compress bytes into <paramref name="out"/>. It it the responsibility of the
+        /// compressor to add all necessary information so that a <see cref="Decompressor"/>
         /// will know when to stop decompressing bytes from the stream.
         /// </summary>
         public abstract void Compress(byte[] bytes, int off, int len, DataOutput @out);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/Decompressor.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/Decompressor.cs b/src/Lucene.Net/Codecs/Compressing/Decompressor.cs
index d1e0641..726841d 100644
--- a/src/Lucene.Net/Codecs/Compressing/Decompressor.cs
+++ b/src/Lucene.Net/Codecs/Compressing/Decompressor.cs
@@ -33,18 +33,18 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Decompress bytes that were stored between offsets <code>offset</code> and
-        /// <code>offset+length</code> in the original stream from the compressed
-        /// stream <code>in</code> to <code>bytes</code>. After returning, the length
-        /// of <code>bytes</code> (<code>bytes.length</code>) must be equal to
-        /// <code>length</code>. Implementations of this method are free to resize
-        /// <code>bytes</code> depending on their needs.
+        /// Decompress bytes that were stored between offsets <paramref name="offset"/> and
+        /// <c>offset+length</c> in the original stream from the compressed
+        /// stream <paramref name="in"/> to <paramref name="bytes"/>. After returning, the length
+        /// of <paramref name="bytes"/> (<c>bytes.Length</c>) must be equal to
+        /// <paramref name="length"/>. Implementations of this method are free to resize
+        /// <paramref name="bytes"/> depending on their needs.
         /// </summary>
-        /// <param name="in"> the input that stores the compressed stream </param>
-        /// <param name="originalLength"> the length of the original data (before compression) </param>
-        /// <param name="offset"> bytes before this offset do not need to be decompressed </param>
-        /// <param name="length"> bytes after <code>offset+length</code> do not need to be decompressed </param>
-        /// <param name="bytes"> a <seealso cref="BytesRef"/> where to store the decompressed data </param>
+        /// <param name="in"> The input that stores the compressed stream. </param>
+        /// <param name="originalLength"> The length of the original data (before compression). </param>
+        /// <param name="offset"> Bytes before this offset do not need to be decompressed. </param>
+        /// <param name="length"> Bytes after <c>offset+length</c> do not need to be decompressed. </param>
+        /// <param name="bytes"> a <see cref="BytesRef"/> where to store the decompressed data. </param>
         public abstract void Decompress(DataInput @in, int originalLength, int offset, int length, BytesRef bytes);
 
         public abstract object Clone();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5dc5193a/src/Lucene.Net/Codecs/Compressing/LZ4.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Compressing/LZ4.cs b/src/Lucene.Net/Codecs/Compressing/LZ4.cs
index cf05994..0c4e38d 100644
--- a/src/Lucene.Net/Codecs/Compressing/LZ4.cs
+++ b/src/Lucene.Net/Codecs/Compressing/LZ4.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Codecs.Compressing
 
     /// <summary>
     /// LZ4 compression and decompression routines.
-    ///
+    /// <para/>
     /// http://code.google.com/p/lz4/
     /// http://fastcompression.blogspot.fr/p/lz4.html
     /// </summary>
@@ -56,7 +56,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// NOTE: This was readInt() in Lucene
+        /// NOTE: This was readInt() in Lucene.
         /// </summary>
         private static int ReadInt32(byte[] buf, int i)
         {
@@ -65,7 +65,7 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// NOTE: This was readIntEquals() in Lucene
+        /// NOTE: This was readIntEquals() in Lucene.
         /// </summary>
         private static bool ReadInt32Equals(byte[] buf, int i, int j)
         {
@@ -94,8 +94,8 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Decompress at least <code>decompressedLen</code> bytes into
-        /// <code>dest[dOff:]</code>. Please note that <code>dest</code> must be large
+        /// Decompress at least <paramref name="decompressedLen"/> bytes into
+        /// <c>dest[dOff]</c>. Please note that <paramref name="dest"/> must be large
         /// enough to be able to hold <b>all</b> decompressed data (meaning that you
         /// need to know the total decompressed length).
         /// </summary>
@@ -241,8 +241,8 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Compress <code>bytes[off:off+len]</code> into <code>out</code> using
-        /// at most 16KB of memory. <code>ht</code> shouldn't be shared across threads
+        /// Compress <c>bytes[off:off+len]</c> into <paramref name="out"/> using
+        /// at most 16KB of memory. <paramref name="ht"/> shouldn't be shared across threads
         /// but can safely be reused.
         /// </summary>
         public static void Compress(byte[] bytes, int off, int len, DataOutput @out, HashTable ht)
@@ -475,12 +475,12 @@ namespace Lucene.Net.Codecs.Compressing
         }
 
         /// <summary>
-        /// Compress <code>bytes[off:off+len]</code> into <code>out</code>. Compared to
-        /// <seealso cref="LZ4#compress(byte[], int, int, DataOutput, HashTable)"/>, this method
+        /// Compress <c>bytes[off:off+len]</c> into <paramref name="out"/>. Compared to
+        /// <see cref="LZ4.Compress(byte[], int, int, DataOutput, HashTable)"/>, this method
         /// is slower and uses more memory (~ 256KB per thread) but should provide
         /// better compression ratios (especially on large inputs) because it chooses
         /// the best match among up to 256 candidates and then performs trade-offs to
-        /// fix overlapping matches. <code>ht</code> shouldn't be shared across threads
+        /// fix overlapping matches. <paramref name="ht"/> shouldn't be shared across threads
         /// but can safely be reused.
         /// </summary>
         public static void CompressHC(byte[] src, int srcOff, int srcLen, DataOutput @out, HCHashTable ht)


[31/48] lucenenet git commit: Lucene.Net.Util.Automaton: Fixed XML documentation comments

Posted by ni...@apache.org.
Lucene.Net.Util.Automaton: Fixed XML documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/9bd4dc81
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/9bd4dc81
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/9bd4dc81

Branch: refs/heads/master
Commit: 9bd4dc81e541e325d17e742090b97a5e85693690
Parents: 7303348
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 02:26:30 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Mon Jun 5 06:16:27 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   5 +-
 src/Lucene.Net/Util/Automaton/Automaton.cs      | 137 +++--
 .../Util/Automaton/AutomatonProvider.cs         |  12 +-
 src/Lucene.Net/Util/Automaton/BasicAutomata.cs  |  30 +-
 .../Util/Automaton/BasicOperations.cs           |  77 +--
 .../Util/Automaton/ByteRunAutomaton.cs          |   6 +-
 .../Util/Automaton/CharacterRunAutomaton.cs     |   6 +-
 .../Util/Automaton/CompiledAutomaton.cs         |  35 +-
 .../Automaton/DaciukMihovAutomatonBuilder.cs    |  50 +-
 .../Util/Automaton/LevenshteinAutomata.cs       |  55 +-
 .../Util/Automaton/MinimizationOperations.cs    |   4 +-
 src/Lucene.Net/Util/Automaton/RegExp.cs         | 576 ++++++++++---------
 src/Lucene.Net/Util/Automaton/RunAutomaton.cs   |  13 +-
 src/Lucene.Net/Util/Automaton/SortedIntSet.cs   |   6 +-
 .../Util/Automaton/SpecialOperations.cs         |  20 +-
 src/Lucene.Net/Util/Automaton/State.cs          |  44 +-
 src/Lucene.Net/Util/Automaton/StatePair.cs      |  18 +-
 src/Lucene.Net/Util/Automaton/Transition.cs     |  26 +-
 src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs    |  11 +-
 19 files changed, 567 insertions(+), 564 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0f04d03..d387b33 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,9 +52,8 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Util.Automaton (namespace)
-   3. Util.Mutable (namespace)
-   4. Util.Packed (namespace)
+   2. Util.Mutable (namespace)
+   3. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/Automaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/Automaton.cs b/src/Lucene.Net/Util/Automaton/Automaton.cs
index 5741337..497fd97 100644
--- a/src/Lucene.Net/Util/Automaton/Automaton.cs
+++ b/src/Lucene.Net/Util/Automaton/Automaton.cs
@@ -39,35 +39,35 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Finite-state automaton with regular expression operations.
-    /// <p>
+    /// <para/>
     /// Class invariants:
-    /// <ul>
-    /// <li>An automaton is either represented explicitly (with <seealso cref="State"/> and
-    /// <seealso cref="Transition"/> objects) or with a singleton string (see
-    /// <seealso cref="Singleton"/> and <seealso cref="ExpandSingleton()"/>) in case the automaton
-    /// is known to accept exactly one string. (Implicitly, all states and
-    /// transitions of an automaton are reachable from its initial state.)
-    /// <li>Automata are always reduced (see <seealso cref="#reduce()"/>) and have no
-    /// transitions to dead states (see <seealso cref="#removeDeadTransitions()"/>).
-    /// <li>If an automaton is nondeterministic, then <seealso cref="#isDeterministic()"/>
-    /// returns false (but the converse is not required).
-    /// <li>Automata provided as input to operations are generally assumed to be
-    /// disjoint.
-    /// </ul>
-    /// <p>
+    /// <list type="bullet">
+    ///     <item><description>An automaton is either represented explicitly (with <see cref="State"/> and
+    ///         <see cref="Transition"/> objects) or with a singleton string (see
+    ///         <see cref="Singleton"/> and <see cref="ExpandSingleton()"/>) in case the automaton
+    ///         is known to accept exactly one string. (Implicitly, all states and
+    ///         transitions of an automaton are reachable from its initial state.)</description></item>
+    ///     <item><description>Automata are always reduced (see <see cref="Reduce()"/>) and have no
+    ///         transitions to dead states (see <see cref="RemoveDeadTransitions()"/>).</description></item>
+    ///     <item><description>If an automaton is nondeterministic, then <see cref="IsDeterministic"/>
+    ///         returns <c>false</c> (but the converse is not required).</description></item>
+    ///     <item><description>Automata provided as input to operations are generally assumed to be
+    ///         disjoint.</description></item>
+    /// </list>
+    /// <para/>
     /// If the states or transitions are manipulated manually, the
-    /// <seealso cref="#restoreInvariant()"/> and <seealso cref="#setDeterministic(boolean)"/> methods
+    /// <see cref="RestoreInvariant()"/> method and <see cref="IsDeterministic"/> setter
     /// should be used afterwards to restore representation invariants that are
     /// assumed by the built-in automata operations.
     ///
-    /// <p>
-    /// <p>
+    /// <para/>
+    /// <para>
     /// Note: this class has internal mutable state and is not thread safe. It is
     /// the caller's responsibility to ensure any necessary synchronization if you
     /// wish to use the same Automaton from multiple threads. In general it is instead
-    /// recommended to use a <seealso cref="RunAutomaton"/> for multithreaded matching: it is immutable,
+    /// recommended to use a <see cref="RunAutomaton"/> for multithreaded matching: it is immutable,
     /// thread safe, and much faster.
-    /// </p>
+    /// </para>
     /// @lucene.experimental
     /// </summary>
     public class Automaton
@@ -76,11 +76,11 @@ namespace Lucene.Net.Util.Automaton
         /// Minimize using Hopcroft's O(n log n) algorithm. this is regarded as one of
         /// the most generally efficient algorithms that exist.
         /// </summary>
-        /// <seealso cref= #setMinimization(int) </seealso>
+        /// <seealso cref="SetMinimization(int)"/>
         public const int MINIMIZE_HOPCROFT = 2;
 
         /// <summary>
-        /// Selects minimization algorithm (default: <code>MINIMIZE_HOPCROFT</code>). </summary>
+        /// Selects minimization algorithm (default: <c>MINIMIZE_HOPCROFT</c>). </summary>
         internal static int minimization = MINIMIZE_HOPCROFT;
 
         /// <summary>
@@ -88,19 +88,18 @@ namespace Lucene.Net.Util.Automaton
         internal State initial;
 
         /// <summary>
-        /// If true, then this automaton is definitely deterministic (i.e., there are
+        /// If <c>true</c>, then this automaton is definitely deterministic (i.e., there are
         /// no choices for any run, but a run may crash).
         /// </summary>
         internal bool deterministic;
 
         /// <summary>
         /// Extra data associated with this automaton. </summary>
-        
         internal object info;
 
-        /// <summary>
-        /// Hash code. Recomputed by <seealso cref="MinimizationOperations#minimize(Automaton)"/>
-        /// </summary>
+        ///// <summary>
+        ///// Hash code. Recomputed by <see cref="MinimizationOperations#minimize(Automaton)"/>
+        ///// </summary>
         //int hash_code;
 
         /// <summary>
@@ -113,17 +112,17 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Selects whether operations may modify the input automata (default:
-        /// <code>false</code>).
+        /// <c>false</c>).
         /// </summary>
         internal static bool allow_mutation = false;
 
         /// <summary>
         /// Constructs a new automaton that accepts the empty language. Using this
-        /// constructor, automata can be constructed manually from <seealso cref="State"/> and
-        /// <seealso cref="Transition"/> objects.
+        /// constructor, automata can be constructed manually from <see cref="State"/> and
+        /// <see cref="Transition"/> objects.
         /// </summary>
-        /// <seealso cref= State </seealso>
-        /// <seealso cref= Transition </seealso>
+        /// <seealso cref="State"/>
+        /// <seealso cref="Transition"/>
         public Automaton(State initial)
         {
             this.initial = initial;
@@ -137,7 +136,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Selects minimization algorithm (default: <code>MINIMIZE_HOPCROFT</code>).
+        /// Selects minimization algorithm (default: <c>MINIMIZE_HOPCROFT</c>).
         /// </summary>
         /// <param name="algorithm"> minimization algorithm </param>
         public static void SetMinimization(int algorithm)
@@ -147,11 +146,11 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Sets or resets minimize always flag. If this flag is set, then
-        /// <seealso cref="MinimizationOperations#minimize(Automaton)"/> will automatically be
+        /// <see cref="MinimizationOperations.Minimize(Automaton)"/> will automatically be
         /// invoked after all operations that otherwise may produce non-minimal
         /// automata. By default, the flag is not set.
         /// </summary>
-        /// <param name="flag"> if true, the flag is set </param>
+        /// <param name="flag"> if <c>true</c>, the flag is set </param>
         public static void SetMinimizeAlways(bool flag)
         {
             minimize_always = flag;
@@ -163,7 +162,7 @@ namespace Lucene.Net.Util.Automaton
         /// always leave input automata languages unmodified. By default, the flag is
         /// not set.
         /// </summary>
-        /// <param name="flag"> if true, the flag is set </param>
+        /// <param name="flag"> if <c>true</c>, the flag is set </param>
         /// <returns> previous value of the flag </returns>
         public static bool SetAllowMutate(bool flag)
         {
@@ -208,7 +207,7 @@ namespace Lucene.Net.Util.Automaton
         /// exactly one string <i>may</i> be represented in singleton mode. In that
         /// case, this method may be used to obtain the string.
         /// </summary>
-        /// <returns> string, null if this automaton is not in singleton mode. </returns>
+        /// <returns> String, <c>null</c> if this automaton is not in singleton mode. </returns>
         public virtual string Singleton
         {
             get
@@ -217,10 +216,10 @@ namespace Lucene.Net.Util.Automaton
             }
         }
 
-        /// <summary>
-        /// Sets initial state.
-        /// </summary>
-        /// <param name="s"> state </param>
+        ///// <summary>
+        ///// Sets initial state.
+        ///// </summary>
+        ///// <param name="s"> state </param>
         /*
         public void setInitialState(State s) {
           initial = s;
@@ -241,7 +240,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns deterministic flag for this automaton.
         /// </summary>
-        /// <returns> true if the automaton is definitely deterministic, false if the
+        /// <returns> <c>true</c> if the automaton is definitely deterministic, <c>false</c> if the
         ///         automaton may be nondeterministic </returns>
         public virtual bool IsDeterministic
         {
@@ -258,7 +257,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Associates extra information with this automaton.
         /// </summary>
-        /// <param name="info"> extra information </param>
+        /// <param name="value"> extra information </param>
         public virtual object Info
         {
             set
@@ -352,7 +351,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns the set of reachable accept states.
         /// </summary>
-        /// <returns> set of <seealso cref="State"/> objects </returns>
+        /// <returns> Set of <see cref="State"/> objects. </returns>
         public virtual ISet<State> GetAcceptStates()
         {
             ExpandSingleton();
@@ -413,11 +412,11 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Restores representation invariant. this method must be invoked before any
+        /// Restores representation invariant. This method must be invoked before any
         /// built-in automata operation is performed if automaton states or transitions
         /// are manipulated manually.
         /// </summary>
-        /// <seealso cref= #setDeterministic(boolean) </seealso>
+        /// <seealso cref="IsDeterministic"/>
         public virtual void RestoreInvariant()
         {
             RemoveDeadTransitions();
@@ -473,7 +472,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns the set of live states. A state is "live" if an accept state is
         /// reachable from it.
         /// </summary>
-        /// <returns> set of <seealso cref="State"/> objects </returns>
+        /// <returns> Set of <see cref="State"/> objects. </returns>
         private State[] GetLiveStates()
         {
             State[] states = GetNumberedStates();
@@ -517,7 +516,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Removes transitions to dead states and calls <seealso cref="#reduce()"/>.
+        /// Removes transitions to dead states and calls <see cref="Reduce()"/>.
         /// (A state is "dead" if no accept state is
         /// reachable from it.)
         /// </summary>
@@ -620,7 +619,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns the number of transitions in this automaton. this number is counted
+        /// Returns the number of transitions in this automaton. This number is counted
         /// as the total number of edges, where one edge may be a character interval.
         /// </summary>
         public virtual int GetNumberOfTransitions()
@@ -693,9 +692,9 @@ namespace Lucene.Net.Util.Automaton
             //throw new System.NotSupportedException();
         }
 
-        /// <summary>
-        /// Must be invoked when the stored hash code may no longer be valid.
-        /// </summary>
+        ///// <summary>
+        ///// Must be invoked when the stored hash code may no longer be valid.
+        ///// </summary>
         /*
         void clearHashCode() {
           hash_code = 0;
@@ -780,7 +779,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns a clone of this automaton unless <code>allow_mutation</code> is
+        /// Returns a clone of this automaton unless <see cref="allow_mutation"/> is
         /// set, expands if singleton.
         /// </summary>
         internal virtual Automaton CloneExpandedIfRequired()
@@ -830,7 +829,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns a clone of this automaton, or this automaton itself if
-        /// <code>allow_mutation</code> flag is set.
+        /// <see cref="allow_mutation"/> flag is set.
         /// </summary>
         internal virtual Automaton CloneIfRequired()
         {
@@ -845,7 +844,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#concatenate(Automaton, Automaton)"/>.
+        /// See <see cref="BasicOperations.Concatenate(Automaton, Automaton)"/>.
         /// </summary>
         public virtual Automaton Concatenate(Automaton a)
         {
@@ -853,7 +852,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#concatenate(List)"/>.
+        /// See <see cref="BasicOperations.Concatenate(IList{Automaton})"/>.
         /// </summary>
         public static Automaton Concatenate(IList<Automaton> l)
         {
@@ -861,7 +860,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#optional(Automaton)"/>.
+        /// See <see cref="BasicOperations.Optional(Automaton)"/>.
         /// </summary>
         public virtual Automaton Optional()
         {
@@ -869,7 +868,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#repeat(Automaton)"/>.
+        /// See <see cref="BasicOperations.Repeat(Automaton)"/>.
         /// </summary>
         public virtual Automaton Repeat()
         {
@@ -877,7 +876,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#repeat(Automaton, int)"/>.
+        /// See <see cref="BasicOperations.Repeat(Automaton, int)"/>.
         /// </summary>
         public virtual Automaton Repeat(int min)
         {
@@ -885,7 +884,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#repeat(Automaton, int, int)"/>.
+        /// See <see cref="BasicOperations.Repeat(Automaton, int, int)"/>.
         /// </summary>
         public virtual Automaton Repeat(int min, int max)
         {
@@ -893,7 +892,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#complement(Automaton)"/>.
+        /// See <see cref="BasicOperations.Complement(Automaton)"/>.
         /// </summary>
         public virtual Automaton Complement()
         {
@@ -901,7 +900,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#minus(Automaton, Automaton)"/>.
+        /// See <see cref="BasicOperations.Minus(Automaton, Automaton)"/>.
         /// </summary>
         public virtual Automaton Minus(Automaton a)
         {
@@ -909,7 +908,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#intersection(Automaton, Automaton)"/>.
+        /// See <see cref="BasicOperations.Intersection(Automaton, Automaton)"/>.
         /// </summary>
         public virtual Automaton Intersection(Automaton a)
         {
@@ -917,7 +916,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#subsetOf(Automaton, Automaton)"/>.
+        /// See <see cref="BasicOperations.SubsetOf(Automaton, Automaton)"/>.
         /// </summary>
         public virtual bool SubsetOf(Automaton a)
         {
@@ -925,7 +924,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#union(Automaton, Automaton)"/>.
+        /// See <see cref="BasicOperations.Union(Automaton, Automaton)"/>.
         /// </summary>
         public virtual Automaton Union(Automaton a)
         {
@@ -933,7 +932,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#union(Collection)"/>.
+        /// See <see cref="BasicOperations.Union(ICollection{Automaton})"/>.
         /// </summary>
         public static Automaton Union(ICollection<Automaton> l)
         {
@@ -941,7 +940,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#determinize(Automaton)"/>.
+        /// See <see cref="BasicOperations.Determinize(Automaton)"/>.
         /// </summary>
         public virtual void Determinize()
         {
@@ -949,7 +948,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="BasicOperations#isEmptyString(Automaton)"/>.
+        /// See <see cref="BasicOperations.IsEmptyString(Automaton)"/>.
         /// </summary>
         public virtual bool IsEmptyString
         {
@@ -960,7 +959,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// See <seealso cref="MinimizationOperations#minimize(Automaton)"/>. Returns the
+        /// See <see cref="MinimizationOperations.Minimize(Automaton)"/>. Returns the
         /// automaton being given as argument.
         /// </summary>
         public static Automaton Minimize(Automaton a)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/AutomatonProvider.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/AutomatonProvider.cs b/src/Lucene.Net/Util/Automaton/AutomatonProvider.cs
index bc0a1e5..1bb92ac 100644
--- a/src/Lucene.Net/Util/Automaton/AutomatonProvider.cs
+++ b/src/Lucene.Net/Util/Automaton/AutomatonProvider.cs
@@ -30,19 +30,19 @@
 namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
-    /// Automaton provider for <code>RegExp.</code>
-    /// <seealso cref="RegExp#toAutomaton(AutomatonProvider)"/>
-    ///
+    /// Automaton provider for <see cref="RegExp"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <seealso cref="RegExp.ToAutomaton(IAutomatonProvider)"/>
     public interface IAutomatonProvider
     {
         /// <summary>
         /// Returns automaton of the given name.
         /// </summary>
-        /// <param name="name"> automaton name </param>
-        /// <returns> automaton </returns>
-        /// <exception cref="IOException"> if errors occur </exception>
+        /// <param name="name"> Automaton name. </param>
+        /// <returns> Automaton. </returns>
+        /// <exception cref="System.IO.IOException"> If errors occur. </exception>
         Automaton GetAutomaton(string name);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/BasicAutomata.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/BasicAutomata.cs b/src/Lucene.Net/Util/Automaton/BasicAutomata.cs
index ff0f20f..d7b50e0 100644
--- a/src/Lucene.Net/Util/Automaton/BasicAutomata.cs
+++ b/src/Lucene.Net/Util/Automaton/BasicAutomata.cs
@@ -36,7 +36,7 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Construction of basic automata.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     internal sealed class BasicAutomata
@@ -127,7 +127,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Constructs sub-automaton corresponding to decimal numbers of length
-        /// x.substring(n).length().
+        /// <c>x.Substring(n).Length</c>.
         /// </summary>
         private static State AnyOfRightLength(string x, int n)
         {
@@ -145,7 +145,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Constructs sub-automaton corresponding to decimal numbers of value at least
-        /// x.substring(n) and length x.substring(n).length().
+        /// <c>x.Substring(n)</c> and length <c>x.Substring(n).Length</c>.
         /// </summary>
         private static State AtLeast(string x, int n, ICollection<State> initials, bool zeros)
         {
@@ -172,7 +172,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Constructs sub-automaton corresponding to decimal numbers of value at most
-        /// x.substring(n) and length x.substring(n).length().
+        /// <c>x.Substring(n)</c> and length <c>x.Substring(n).Length</c>.
         /// </summary>
         private static State AtMost(string x, int n)
         {
@@ -195,8 +195,8 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Constructs sub-automaton corresponding to decimal numbers of value between
-        /// x.substring(n) and y.substring(n) and of length x.substring(n).length()
-        /// (which must be equal to y.substring(n).length()).
+        /// <c>x.Substring(n)</c> and <c>y.Substring(n)</c> and of length <c>x.Substring(n).Length</c>
+        /// (which must be equal to <c>y.Substring(n).Length</c>).
         /// </summary>
         private static State Between(string x, string y, int n, ICollection<State> initials, bool zeros)
         {
@@ -234,15 +234,15 @@ namespace Lucene.Net.Util.Automaton
         /// Returns a new automaton that accepts strings representing decimal
         /// non-negative integers in the given interval.
         /// </summary>
-        /// <param name="min"> minimal value of interval </param>
-        /// <param name="max"> maximal value of interval (both end points are included in the
-        ///          interval) </param>
-        /// <param name="digits"> if >0, use fixed number of digits (strings must be prefixed
+        /// <param name="min"> Minimal value of interval. </param>
+        /// <param name="max"> Maximal value of interval (both end points are included in the
+        ///          interval). </param>
+        /// <param name="digits"> If &gt; 0, use fixed number of digits (strings must be prefixed
         ///          by 0's to obtain the right length) - otherwise, the number of
-        ///          digits is not fixed </param>
-        /// <exception cref="IllegalArgumentException"> if min>max or if numbers in the
+        ///          digits is not fixed. </param>
+        /// <exception cref="ArgumentException"> If min &gt; max or if numbers in the
         ///              interval cannot be expressed with the given fixed number of
-        ///              digits </exception>
+        ///              digits. </exception>
         public static Automaton MakeInterval(int min, int max, int digits)
         {
             Automaton a = new Automaton();
@@ -329,14 +329,14 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns a new (deterministic and minimal) automaton that accepts the union
-        /// of the given collection of <seealso cref="BytesRef"/>s representing UTF-8 encoded
+        /// of the given collection of <see cref="BytesRef"/>s representing UTF-8 encoded
         /// strings.
         /// </summary>
         /// <param name="utf8Strings">
         ///          The input strings, UTF-8 encoded. The collection must be in sorted
         ///          order.
         /// </param>
-        /// <returns> An <seealso cref="Automaton"/> accepting all input strings. The resulting
+        /// <returns> An <see cref="Automaton"/> accepting all input strings. The resulting
         ///         automaton is codepoint based (full unicode codepoints on
         ///         transitions). </returns>
         public static Automaton MakeStringUnion(ICollection<BytesRef> utf8Strings)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/BasicOperations.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/BasicOperations.cs b/src/Lucene.Net/Util/Automaton/BasicOperations.cs
index a5f53f7..62927dc 100644
--- a/src/Lucene.Net/Util/Automaton/BasicOperations.cs
+++ b/src/Lucene.Net/Util/Automaton/BasicOperations.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Basic automata operations.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     internal sealed class BasicOperations
@@ -50,7 +50,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the concatenation of the languages of the
         /// given automata.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states.
         /// </summary>
         public static Automaton Concatenate(Automaton a1, Automaton a2)
@@ -92,7 +92,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the concatenation of the languages of the
         /// given automata.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in total number of states.
         /// </summary>
         public static Automaton Concatenate(IList<Automaton> l)
@@ -190,7 +190,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the union of the empty string and the
         /// language of the given automaton.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states.
         /// </summary>
         public static Automaton Optional(Automaton a)
@@ -211,7 +211,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns an automaton that accepts the Kleene star (zero or more
         /// concatenated repetitions) of the language of the given automaton. Never
         /// modifies the input automaton language.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states.
         /// </summary>
         public static Automaton Repeat(Automaton a)
@@ -233,10 +233,10 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns an automaton that accepts <code>min</code> or more concatenated
+        /// Returns an automaton that accepts <paramref name="min"/> or more concatenated
         /// repetitions of the language of the given automaton.
-        /// <p>
-        /// Complexity: linear in number of states and in <code>min</code>.
+        /// <para/>
+        /// Complexity: linear in number of states and in <paramref name="min"/>.
         /// </summary>
         public static Automaton Repeat(Automaton a, int min)
         {
@@ -254,12 +254,12 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns an automaton that accepts between <code>min</code> and
-        /// <code>max</code> (including both) concatenated repetitions of the language
+        /// Returns an automaton that accepts between <paramref name="min"/> and
+        /// <paramref name="max"/> (including both) concatenated repetitions of the language
         /// of the given automaton.
-        /// <p>
-        /// Complexity: linear in number of states and in <code>min</code> and
-        /// <code>max</code>.
+        /// <para/>
+        /// Complexity: linear in number of states and in <paramref name="min"/> and
+        /// <paramref name="max"/>.
         /// </summary>
         public static Automaton Repeat(Automaton a, int min, int max)
         {
@@ -314,7 +314,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns a (deterministic) automaton that accepts the complement of the
         /// language of the given automaton.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states (if already deterministic).
         /// </summary>
         public static Automaton Complement(Automaton a)
@@ -332,10 +332,10 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns a (deterministic) automaton that accepts the intersection of the
-        /// language of <code>a1</code> and the complement of the language of
-        /// <code>a2</code>. As a side-effect, the automata may be determinized, if not
+        /// language of <paramref name="a1"/> and the complement of the language of
+        /// <paramref name="a2"/>. As a side-effect, the automata may be determinized, if not
         /// already deterministic.
-        /// <p>
+        /// <para/>
         /// Complexity: quadratic in number of states (if already deterministic).
         /// </summary>
         public static Automaton Minus(Automaton a1, Automaton a2)
@@ -365,7 +365,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the intersection of the languages of the
         /// given automata. Never modifies the input automata languages.
-        /// <p>
+        /// <para/>
         /// Complexity: quadratic in number of states.
         /// </summary>
         public static Automaton Intersection(Automaton a1, Automaton a2)
@@ -445,10 +445,10 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if these two automata accept exactly the
-        ///  same language.  this is a costly computation!  Note
-        ///  also that a1 and a2 will be determinized as a side
-        ///  effect.
+        /// Returns <c>true</c> if these two automata accept exactly the
+        /// same language.  This is a costly computation!  Note
+        /// also that <paramref name="a1"/> and <paramref name="a2"/> will be determinized as a side
+        /// effect.
         /// </summary>
         public static bool SameLanguage(Automaton a1, Automaton a2)
         {
@@ -472,10 +472,10 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the language of <code>a1</code> is a subset of the language
-        /// of <code>a2</code>. As a side-effect, <code>a2</code> is determinized if
+        /// Returns true if the language of <paramref name="a1"/> is a subset of the language
+        /// of <paramref name="a2"/>. As a side-effect, <paramref name="a2"/> is determinized if
         /// not already marked as deterministic.
-        /// <p>
+        /// <para/>
         /// Complexity: quadratic in number of states.
         /// </summary>
         public static bool SubsetOf(Automaton a1, Automaton a2)
@@ -552,7 +552,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the union of the languages of the given
         /// automata.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states.
         /// </summary>
         public static Automaton Union(Automaton a1, Automaton a2)
@@ -585,7 +585,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns an automaton that accepts the union of the languages of the given
         /// automata.
-        /// <p>
+        /// <para/>
         /// Complexity: linear in number of states.
         /// </summary>
         public static Automaton Union(ICollection<Automaton> l)
@@ -780,7 +780,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Determinizes the given automaton.
-        /// <p>
+        /// <para/>
         /// Worst case complexity: exponential in number of states.
         /// </summary>
         public static void Determinize(Automaton a)
@@ -922,13 +922,14 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Adds epsilon transitions to the given automaton. this method adds extra
+        /// Adds epsilon transitions to the given automaton. This method adds extra
         /// character interval transitions that are equivalent to the given set of
         /// epsilon transitions.
         /// </summary>
-        /// <param name="pairs"> collection of <seealso cref="StatePair"/> objects representing pairs of
+        /// <param name="a"> Automaton. </param>
+        /// <param name="pairs"> Collection of <see cref="StatePair"/> objects representing pairs of
         ///          source/destination states where epsilon transitions should be
-        ///          added </param>
+        ///          added. </param>
         public static void AddEpsilons(Automaton a, ICollection<StatePair> pairs)
         {
             a.ExpandSingleton();
@@ -1001,7 +1002,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given automaton accepts the empty string and nothing
+        /// Returns <c>true</c> if the given automaton accepts the empty string and nothing
         /// else.
         /// </summary>
         public static bool IsEmptyString(Automaton a)
@@ -1017,7 +1018,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given automaton accepts no strings.
+        /// Returns <c>true</c> if the given automaton accepts no strings.
         /// </summary>
         public static bool IsEmpty(Automaton a)
         {
@@ -1029,7 +1030,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given automaton accepts all strings.
+        /// Returns <c>true</c> if the given automaton accepts all strings.
         /// </summary>
         public static bool IsTotal(Automaton a)
         {
@@ -1048,11 +1049,11 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given string is accepted by the automaton.
-        /// <p>
+        /// Returns <c>true</c> if the given string is accepted by the automaton.
+        /// <para/>
         /// Complexity: linear in the length of the string.
-        /// <p>
-        /// <b>Note:</b> for full performance, use the <seealso cref="RunAutomaton"/> class.
+        /// <para/>
+        /// <b>Note:</b> for full performance, use the <see cref="RunAutomaton"/> class.
         /// </summary>
         public static bool Run(Automaton a, string s)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/ByteRunAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/ByteRunAutomaton.cs b/src/Lucene.Net/Util/Automaton/ByteRunAutomaton.cs
index 096df3c..3a12c55 100644
--- a/src/Lucene.Net/Util/Automaton/ByteRunAutomaton.cs
+++ b/src/Lucene.Net/Util/Automaton/ByteRunAutomaton.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Util.Automaton
      */
 
     /// <summary>
-    /// Automaton representation for matching UTF-8 byte[].
+    /// Automaton representation for matching UTF-8 <see cref="T:byte[]"/>.
     /// </summary>
     public class ByteRunAutomaton : RunAutomaton
     {
@@ -28,14 +28,14 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// expert: if utf8 is true, the input is already byte-based </summary>
+        /// Expert: if utf8 is true, the input is already byte-based </summary>
         public ByteRunAutomaton(Automaton a, bool utf8)
             : base(utf8 ? a : (new UTF32ToUTF8()).Convert(a), 256, true)
         {
         }
 
         /// <summary>
-        /// Returns true if the given byte array is accepted by this automaton
+        /// Returns <c>true</c> if the given byte array is accepted by this automaton.
         /// </summary>
         public virtual bool Run(byte[] s, int offset, int length)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/CharacterRunAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/CharacterRunAutomaton.cs b/src/Lucene.Net/Util/Automaton/CharacterRunAutomaton.cs
index dc97e70..e9a31b5 100644
--- a/src/Lucene.Net/Util/Automaton/CharacterRunAutomaton.cs
+++ b/src/Lucene.Net/Util/Automaton/CharacterRunAutomaton.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Util.Automaton
      */
 
     /// <summary>
-    /// Automaton representation for matching char[].
+    /// Automaton representation for matching <see cref="T:char[]"/>.
     /// </summary>
     public class CharacterRunAutomaton : RunAutomaton
     {
@@ -30,7 +30,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given string is accepted by this automaton.
+        /// Returns <c>true</c> if the given string is accepted by this automaton.
         /// </summary>
         public virtual bool Run(string s)
         {
@@ -45,7 +45,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the given string is accepted by this automaton
+        /// Returns <c>true</c> if the given string is accepted by this automaton.
         /// </summary>
         public virtual bool Run(char[] s, int offset, int length)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/CompiledAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/CompiledAutomaton.cs b/src/Lucene.Net/Util/Automaton/CompiledAutomaton.cs
index bd60de4..9f1baa2 100644
--- a/src/Lucene.Net/Util/Automaton/CompiledAutomaton.cs
+++ b/src/Lucene.Net/Util/Automaton/CompiledAutomaton.cs
@@ -31,9 +31,9 @@ namespace Lucene.Net.Util.Automaton
 
     /// <summary>
     /// Immutable class holding compiled details for a given
-    /// Automaton.  The Automaton is deterministic, must not have
+    /// <see cref="Automaton"/>.  The <see cref="Automaton"/> is deterministic, must not have
     /// dead states but is not necessarily minimal.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class CompiledAutomaton
@@ -57,7 +57,7 @@ namespace Lucene.Net.Util.Automaton
             SINGLE,
 
             /// <summary>
-            /// Automaton that matches all Strings with a constant prefix. </summary>
+            /// Automaton that matches all strings with a constant prefix. </summary>
             PREFIX,
 
             /// <summary>
@@ -68,14 +68,14 @@ namespace Lucene.Net.Util.Automaton
         public AUTOMATON_TYPE Type { get; private set; }
 
         /// <summary>
-        /// For <seealso cref="AUTOMATON_TYPE#PREFIX"/>, this is the prefix term;
-        /// for <seealso cref="AUTOMATON_TYPE#SINGLE"/> this is the singleton term.
+        /// For <see cref="AUTOMATON_TYPE.PREFIX"/>, this is the prefix term;
+        /// for <see cref="AUTOMATON_TYPE.SINGLE"/> this is the singleton term.
         /// </summary>
         public BytesRef Term { get; private set; }
 
         /// <summary>
-        /// Matcher for quickly determining if a byte[] is accepted.
-        /// only valid for <seealso cref="AUTOMATON_TYPE#NORMAL"/>.
+        /// Matcher for quickly determining if a <see cref="T:byte[]"/> is accepted.
+        /// only valid for <see cref="AUTOMATON_TYPE.NORMAL"/>.
         /// </summary>
         public ByteRunAutomaton RunAutomaton { get; private set; }
 
@@ -84,8 +84,9 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Two dimensional array of transitions, indexed by state
         /// number for traversal. The state numbering is consistent with
-        /// <seealso cref="#runAutomaton"/>.
-        /// Only valid for <seealso cref="AUTOMATON_TYPE#NORMAL"/>.
+        /// <see cref="RunAutomaton"/>.
+        /// <para/>
+        /// Only valid for <see cref="AUTOMATON_TYPE.NORMAL"/>.
         /// </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
@@ -97,7 +98,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Shared common suffix accepted by the automaton. Only valid
-        /// for <seealso cref="AUTOMATON_TYPE#NORMAL"/>, and only when the
+        /// for <see cref="AUTOMATON_TYPE.NORMAL"/>, and only when the
         /// automaton accepts an infinite language.
         /// </summary>
         public BytesRef CommonSuffixRef { get; private set; }
@@ -105,7 +106,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Indicates if the automaton accepts a finite set of strings.
         /// Null if this was not computed.
-        /// Only valid for <seealso cref="AUTOMATON_TYPE#NORMAL"/>.
+        /// Only valid for <see cref="AUTOMATON_TYPE.NORMAL"/>.
         /// </summary>
         public bool? Finite { get; private set; }
 
@@ -312,12 +313,12 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Finds largest term accepted by this Automaton, that's
-        ///  <= the provided input term.  The result is placed in
-        ///  output; it's fine for output and input to point to
-        ///  the same BytesRef.  The returned result is either the
-        ///  provided output, or null if there is no floor term
-        ///  (ie, the provided input term is before the first term
-        ///  accepted by this Automaton).
+        /// &lt;= the provided input term.  The result is placed in
+        /// output; it's fine for output and input to point to
+        /// the same <see cref="BytesRef"/>.  The returned result is either the
+        /// provided output, or <c>null</c> if there is no floor term
+        /// (ie, the provided input term is before the first term
+        /// accepted by this <see cref="Automaton"/>).
         /// </summary>
         public virtual BytesRef Floor(BytesRef input, BytesRef output)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/DaciukMihovAutomatonBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/DaciukMihovAutomatonBuilder.cs b/src/Lucene.Net/Util/Automaton/DaciukMihovAutomatonBuilder.cs
index 233dd62..5602c53 100644
--- a/src/Lucene.Net/Util/Automaton/DaciukMihovAutomatonBuilder.cs
+++ b/src/Lucene.Net/Util/Automaton/DaciukMihovAutomatonBuilder.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Util.Automaton
      */
 
     /// <summary>
-    /// Builds a minimal, deterministic <seealso cref="Automaton"/> that accepts a set of
+    /// Builds a minimal, deterministic <see cref="Automaton"/> that accepts a set of
     /// strings. The algorithm requires sorted input data, but is very fast
     /// (nearly linear with the input size).
     /// </summary>
-    /// <seealso cref= #build(Collection) </seealso>
-    /// <seealso cref= BasicAutomata#makeStringUnion(Collection) </seealso>
+    /// <seealso cref="Build(ICollection{BytesRef})"/>
+    /// <seealso cref="BasicAutomata.MakeStringUnion(ICollection{BytesRef})"/>
     internal sealed class DaciukMihovAutomatonBuilder
     {
         /// <summary>
-        /// DFSA state with <code>char</code> labels on transitions.
+        /// DFSA state with <see cref="char"/> labels on transitions.
         /// </summary>
         public sealed class State // LUCENENET NOTE: Made public because it is returned from a public member
         {
@@ -45,27 +45,27 @@ namespace Lucene.Net.Util.Automaton
             private static readonly State[] NO_STATES = new State[0];
 
             /// <summary>
-            /// Labels of outgoing transitions. Indexed identically to <seealso cref="#states"/>.
+            /// Labels of outgoing transitions. Indexed identically to <see cref="states"/>.
             /// Labels must be sorted lexicographically.
             /// </summary>
             internal int[] labels = NO_LABELS;
 
             /// <summary>
             /// States reachable from outgoing transitions. Indexed identically to
-            /// <seealso cref="#labels"/>.
+            /// <see cref="labels"/>.
             /// </summary>
             internal State[] states = NO_STATES;
 
             /// <summary>
-            /// <code>true</code> if this state corresponds to the end of at least one
+            /// <c>true</c> if this state corresponds to the end of at least one
             /// input sequence.
             /// </summary>
             internal bool is_final;
 
             /// <summary>
             /// Returns the target state of a transition leaving this state and labeled
-            /// with <code>label</code>. If no such transition exists, returns
-            /// <code>null</code>.
+            /// with <paramref name="label"/>. If no such transition exists, returns
+            /// <c>null</c>.
             /// </summary>
             internal State GetState(int label)
             {
@@ -75,12 +75,12 @@ namespace Lucene.Net.Util.Automaton
 
             /// <summary>
             /// Two states are equal if:
-            /// <ul>
-            /// <li>they have an identical number of outgoing transitions, labeled with
-            /// the same labels</li>
-            /// <li>corresponding outgoing transitions lead to the same states (to states
-            /// with an identical right-language).
-            /// </ul>
+            /// <list type="bullet">
+            ///     <item><description>They have an identical number of outgoing transitions, labeled with
+            ///         the same labels.</description></item>
+            ///     <item><description>Corresponding outgoing transitions lead to the same states (to states
+            ///         with an identical right-language).</description></item>
+            /// </list>
             /// </summary>
             public override bool Equals(object obj)
             {
@@ -116,7 +116,7 @@ namespace Lucene.Net.Util.Automaton
             }
 
             /// <summary>
-            /// Return <code>true</code> if this state has any children (outgoing
+            /// Return <c>true</c> if this state has any children (outgoing
             /// transitions).
             /// </summary>
             internal bool HasChildren
@@ -125,7 +125,7 @@ namespace Lucene.Net.Util.Automaton
             }
 
             /// <summary>
-            /// Create a new outgoing transition labeled <code>label</code> and return
+            /// Create a new outgoing transition labeled <paramref name="label"/> and return
             /// the newly created target state for this transition.
             /// </summary>
             internal State NewState(int label)
@@ -150,7 +150,7 @@ namespace Lucene.Net.Util.Automaton
 
             /// <summary>
             /// Return the associated state if the most recent transition is labeled with
-            /// <code>label</code>.
+            /// <paramref name="label"/>.
             /// </summary>
             internal State LastChild(int label)
             {
@@ -166,7 +166,7 @@ namespace Lucene.Net.Util.Automaton
 
             /// <summary>
             /// Replace the last added outgoing transition's target state with the given
-            /// state.
+            /// <paramref name="state"/>.
             /// </summary>
             internal void ReplaceLastChild(State state)
             {
@@ -207,7 +207,7 @@ namespace Lucene.Net.Util.Automaton
         private State root = new State();
 
         /// <summary>
-        /// Previous sequence added to the automaton in <seealso cref="#add(CharsRef)"/>.
+        /// Previous sequence added to the automaton in <see cref="Add(CharsRef)"/>.
         /// </summary>
         private CharsRef previous;
 
@@ -295,7 +295,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Build a minimal, deterministic automaton from a sorted list of <seealso cref="BytesRef"/> representing
+        /// Build a minimal, deterministic automaton from a sorted list of <see cref="BytesRef"/> representing
         /// strings in UTF-8. These strings must be binary-sorted.
         /// </summary>
         public static Automaton Build(ICollection<BytesRef> input)
@@ -316,7 +316,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Copy <code>current</code> into an internal buffer.
+        /// Copy <paramref name="current"/> into an internal buffer.
         /// </summary>
         private bool SetPrevious(CharsRef current)
         {
@@ -327,7 +327,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Replace last child of <code>state</code> with an already registered state
+        /// Replace last child of <paramref name="state"/> with an already registered state
         /// or stateRegistry the last child state.
         /// </summary>
         private void ReplaceOrRegister(State state)
@@ -351,8 +351,8 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Add a suffix of <code>current</code> starting at <code>fromIndex</code>
-        /// (inclusive) to state <code>state</code>.
+        /// Add a suffix of <paramref name="current"/> starting at <paramref name="fromIndex"/>
+        /// (inclusive) to state <paramref name="state"/>.
         /// </summary>
         private void AddSuffix(State state, ICharSequence current, int fromIndex)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs b/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
index 9014336..f6e82f7 100644
--- a/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
+++ b/src/Lucene.Net/Util/Automaton/LevenshteinAutomata.cs
@@ -24,10 +24,10 @@ namespace Lucene.Net.Util.Automaton
 
     /// <summary>
     /// Class to construct DFAs that match a word within some edit distance.
-    /// <p>
+    /// <para/>
     /// Implements the algorithm described in:
     /// Schulz and Mihov: Fast String Correction with Levenshtein Automata
-    /// <p>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class LevenshteinAutomata
@@ -51,7 +51,7 @@ namespace Lucene.Net.Util.Automaton
         internal ParametricDescription[] descriptions;
 
         /// <summary>
-        /// Create a new LevenshteinAutomata for some input String.
+        /// Create a new <see cref="LevenshteinAutomata"/> for some <paramref name="input"/> string.
         /// Optionally count transpositions as a primitive edit.
         /// </summary>
         public LevenshteinAutomata(string input, bool withTranspositions)
@@ -61,7 +61,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Expert: specify a custom maximum possible symbol
-        /// (alphaMax); default is Character.MAX_CODE_POINT.
+        /// (alphaMax); default is <see cref="Character.MAX_CODE_POINT"/>.
         /// </summary>
         public LevenshteinAutomata(int[] word, int alphaMax, bool withTranspositions)
         {
@@ -130,15 +130,15 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Compute a DFA that accepts all strings within an edit distance of <code>n</code>.
-        /// <p>
+        /// Compute a DFA that accepts all strings within an edit distance of <paramref name="n"/>.
+        /// <para>
         /// All automata have the following properties:
-        /// <ul>
-        /// <li>They are deterministic (DFA).
-        /// <li>There are no transitions to dead states.
-        /// <li>They are not minimal (some transitions could be combined).
-        /// </ul>
-        /// </p>
+        /// <list type="bullet">
+        ///     <item><description>They are deterministic (DFA).</description></item>
+        ///     <item><description>There are no transitions to dead states.</description></item>
+        ///     <item><description>They are not minimal (some transitions could be combined).</description></item>
+        /// </list>
+        /// </para>
         /// </summary>
         public virtual Automaton ToAutomaton(int n)
         {
@@ -211,8 +211,8 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Get the characteristic vector <code>X(x, V)</code>
-        /// where V is <code>substring(pos, end)</code>
+        /// Get the characteristic vector <c>X(x, V)</c>
+        /// where V is <c>Substring(pos, end - pos)</c>.
         /// </summary>
         internal virtual int GetVector(int x, int pos, int end)
         {
@@ -229,16 +229,16 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// A ParametricDescription describes the structure of a Levenshtein DFA for some degree n.
-        /// <p>
+        /// A <see cref="ParametricDescription"/> describes the structure of a Levenshtein DFA for some degree <c>n</c>.
+        /// <para/>
         /// There are four components of a parametric description, all parameterized on the length
-        /// of the word <code>w</code>:
-        /// <ol>
-        /// <li>The number of states: <seealso cref="#size()"/>
-        /// <li>The set of final states: <seealso cref="#isAccept(int)"/>
-        /// <li>The transition function: <seealso cref="#transition(int, int, int)"/>
-        /// <li>Minimal boundary function: <seealso cref="#getPosition(int)"/>
-        /// </ol>
+        /// of the word <c>w</c>:
+        /// <list type="number">
+        ///     <item><description>The number of states: <see cref="Count"/></description></item>
+        ///     <item><description>The set of final states: <see cref="IsAccept(int)"/></description></item>
+        ///     <item><description>The transition function: <see cref="Transition(int, int, int)"/></description></item>
+        ///     <item><description>Minimal boundary function: <see cref="GetPosition(int)"/></description></item>
+        /// </list>
         /// </summary>
         internal abstract class ParametricDescription
         {
@@ -255,6 +255,7 @@ namespace Lucene.Net.Util.Automaton
 
             /// <summary>
             /// Return the number of states needed to compute a Levenshtein DFA.
+            /// <para/>
             /// NOTE: This was size() in Lucene.
             /// </summary>
             internal virtual int Count
@@ -263,7 +264,7 @@ namespace Lucene.Net.Util.Automaton
             }
 
             /// <summary>
-            /// Returns true if the <code>state</code> in any Levenshtein DFA is an accept state (final state).
+            /// Returns <c>true</c> if the <c>state</c> in any Levenshtein DFA is an accept state (final state).
             /// </summary>
             internal virtual bool IsAccept(int absState)
             {
@@ -275,7 +276,7 @@ namespace Lucene.Net.Util.Automaton
             }
 
             /// <summary>
-            /// Returns the position in the input word for a given <code>state</code>.
+            /// Returns the position in the input word for a given <c>state</c>.
             /// this is the minimal boundary for the state.
             /// </summary>
             internal virtual int GetPosition(int absState)
@@ -284,8 +285,8 @@ namespace Lucene.Net.Util.Automaton
             }
 
             /// <summary>
-            /// Returns the state number for a transition from the given <code>state</code>,
-            /// assuming <code>position</code> and characteristic vector <code>vector</code>
+            /// Returns the state number for a transition from the given <paramref name="state"/>,
+            /// assuming <paramref name="position"/> and characteristic vector <paramref name="vector"/>.
             /// </summary>
             internal abstract int Transition(int state, int position, int vector);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/MinimizationOperations.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/MinimizationOperations.cs b/src/Lucene.Net/Util/Automaton/MinimizationOperations.cs
index 4555f70..2d4490b 100644
--- a/src/Lucene.Net/Util/Automaton/MinimizationOperations.cs
+++ b/src/Lucene.Net/Util/Automaton/MinimizationOperations.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Operations for minimizing automata.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     internal sealed class MinimizationOperations
@@ -48,7 +48,7 @@ namespace Lucene.Net.Util.Automaton
         /// Minimizes (and determinizes if not already deterministic) the given
         /// automaton.
         /// </summary>
-        /// <seealso cref= Automaton#setMinimization(int) </seealso>
+        /// <seealso cref="Automaton.SetMinimization(int)"/>
         public static void Minimize(Automaton a)
         {
             if (!a.IsSingleton)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/RegExp.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/RegExp.cs b/src/Lucene.Net/Util/Automaton/RegExp.cs
index 1bfe383..a6b5242 100644
--- a/src/Lucene.Net/Util/Automaton/RegExp.cs
+++ b/src/Lucene.Net/Util/Automaton/RegExp.cs
@@ -83,281 +83,283 @@ namespace Lucene.Net.Util.Automaton
 
 
     /// <summary>
-    /// Regular Expression extension to <code>Automaton</code>.
-    /// <p>
+    /// Regular Expression extension to <see cref="Util.Automaton.Automaton"/>.
+    /// <para/>
     /// Regular expressions are built from the following abstract syntax:
-    /// <p>
-    /// <table border=0>
-    /// <tr>
-    /// <td><i>regexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>unionexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>unionexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>interexp</i>&nbsp;<tt><b>|</b></tt>&nbsp;<i>unionexp</i></td>
-    /// <td>(union)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>interexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>interexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>concatexp</i>&nbsp;<tt><b>&amp;</b></tt>&nbsp;<i>interexp</i></td>
-    /// <td>(intersection)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>concatexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>concatexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>repeatexp</i>&nbsp;<i>concatexp</i></td>
-    /// <td>(concatenation)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>repeatexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>?</b></tt></td>
-    /// <td>(zero or one occurrence)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>*</b></tt></td>
-    /// <td>(zero or more occurrences)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>+</b></tt></td>
-    /// <td>(one or more occurrences)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>{</b><i>n</i><b>}</b></tt></td>
-    /// <td>(<tt><i>n</i></tt> occurrences)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>{</b><i>n</i><b>,}</b></tt></td>
-    /// <td>(<tt><i>n</i></tt> or more occurrences)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>repeatexp</i>&nbsp;<tt><b>{</b><i>n</i><b>,</b><i>m</i><b>}</b></tt></td>
-    /// <td>(<tt><i>n</i></tt> to <tt><i>m</i></tt> occurrences, including both)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>complexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>complexp</i></td>
-    /// <td>::=</td>
-    /// <td><tt><b>~</b></tt>&nbsp;<i>complexp</i></td>
-    /// <td>(complement)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>charclassexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>charclassexp</i></td>
-    /// <td>::=</td>
-    /// <td><tt><b>[</b></tt>&nbsp;<i>charclasses</i>&nbsp;<tt><b>]</b></tt></td>
-    /// <td>(character class)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>[^</b></tt>&nbsp;<i>charclasses</i>&nbsp;<tt><b>]</b></tt></td>
-    /// <td>(negated character class)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>simpleexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>charclasses</i></td>
-    /// <td>::=</td>
-    /// <td><i>charclass</i>&nbsp;<i>charclasses</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>charclass</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>charclass</i></td>
-    /// <td>::=</td>
-    /// <td><i>charexp</i>&nbsp;<tt><b>-</b></tt>&nbsp;<i>charexp</i></td>
-    /// <td>(character range, including end-points)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><i>charexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>simpleexp</i></td>
-    /// <td>::=</td>
-    /// <td><i>charexp</i></td>
-    /// <td></td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>.</b></tt></td>
-    /// <td>(any single character)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>#</b></tt></td>
-    /// <td>(the empty language)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>@</b></tt></td>
-    /// <td>(any string)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>"</b></tt>&nbsp;&lt;Unicode string without double-quotes&gt;&nbsp; <tt><b>"</b></tt></td>
-    /// <td>(a string)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>(</b></tt>&nbsp;<tt><b>)</b></tt></td>
-    /// <td>(the empty string)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>(</b></tt>&nbsp;<i>unionexp</i>&nbsp;<tt><b>)</b></tt></td>
-    /// <td>(precedence override)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>&lt;</b></tt>&nbsp;&lt;identifier&gt;&nbsp;<tt><b>&gt;</b></tt></td>
-    /// <td>(named automaton)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>&lt;</b><i>n</i>-<i>m</i><b>&gt;</b></tt></td>
-    /// <td>(numerical interval)</td>
-    /// <td><small>[OPTIONAL]</small></td>
-    /// </tr>
-    ///
-    /// <tr>
-    /// <td><i>charexp</i></td>
-    /// <td>::=</td>
-    /// <td>&lt;Unicode character&gt;</td>
-    /// <td>(a single non-reserved character)</td>
-    /// <td></td>
-    /// </tr>
-    /// <tr>
-    /// <td></td>
-    /// <td>|</td>
-    /// <td><tt><b>\</b></tt>&nbsp;&lt;Unicode character&gt;&nbsp;</td>
-    /// <td>(a single character)</td>
-    /// <td></td>
-    /// </tr>
-    /// </table>
-    /// <p>
+    /// <para/>
+    /// <list type="table">
+    ///     <item>
+    ///         <term><i>regexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>unionexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>unionexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>interexp</i>&#160;<tt><b>|</b></tt>&#160;<i>unionexp</i></term>
+    ///         <term>(union)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>interexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>interexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>concatexp</i>&#160;<tt><b>&amp;</b></tt>&#160;<i>interexp</i></term>
+    ///         <term>(intersection)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>concatexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>concatexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>repeatexp</i>&#160;<i>concatexp</i></term>
+    ///         <term>(concatenation)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>repeatexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>?</b></tt></term>
+    ///         <term>(zero or one occurrence)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>*</b></tt></term>
+    ///         <term>(zero or more occurrences)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>+</b></tt></term>
+    ///         <term>(one or more occurrences)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>{</b><i>n</i><b>}</b></tt></term>
+    ///         <term>(<tt><i>n</i></tt> occurrences)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>{</b><i>n</i><b>,}</b></tt></term>
+    ///         <term>(<tt><i>n</i></tt> or more occurrences)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>repeatexp</i>&#160;<tt><b>{</b><i>n</i><b>,</b><i>m</i><b>}</b></tt></term>
+    ///         <term>(<tt><i>n</i></tt> to <tt><i>m</i></tt> occurrences, including both)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>complexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>complexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><tt><b>~</b></tt>&#160;<i>complexp</i></term>
+    ///         <term>(complement)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>charclassexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>charclassexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><tt><b>[</b></tt>&#160;<i>charclasses</i>&#160;<tt><b>]</b></tt></term>
+    ///         <term>(character class)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>[^</b></tt>&#160;<i>charclasses</i>&#160;<tt><b>]</b></tt></term>
+    ///         <term>(negated character class)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>simpleexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>charclasses</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>charclass</i>&#160;<i>charclasses</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>charclass</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>charclass</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>charexp</i>&#160;<tt><b>-</b></tt>&#160;<i>charexp</i></term>
+    ///         <term>(character range, including end-points)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><i>charexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>simpleexp</i></term>
+    ///         <term>::=</term>
+    ///         <term><i>charexp</i></term>
+    ///         <term></term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>.</b></tt></term>
+    ///         <term>(any single character)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>#</b></tt></term>
+    ///         <term>(the empty language)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>@</b></tt></term>
+    ///         <term>(any string)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>"</b></tt>&#160;&lt;Unicode string without double-quotes&gt;&#160; <tt><b>"</b></tt></term>
+    ///         <term>(a string)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>(</b></tt>&#160;<tt><b>)</b></tt></term>
+    ///         <term>(the empty string)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>(</b></tt>&#160;<i>unionexp</i>&#160;<tt><b>)</b></tt></term>
+    ///         <term>(precedence override)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>&lt;</b></tt>&#160;&lt;identifier&gt;&#160;<tt><b>&gt;</b></tt></term>
+    ///         <term>(named automaton)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>&lt;</b><i>n</i>-<i>m</i><b>&gt;</b></tt></term>
+    ///         <term>(numerical interval)</term>
+    ///         <term><small>[OPTIONAL]</small></term>
+    ///     </item>
+    ///     
+    ///     <item>
+    ///         <term><i>charexp</i></term>
+    ///         <term>::=</term>
+    ///         <term>&lt;Unicode character&gt;</term>
+    ///         <term>(a single non-reserved character)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     <item>
+    ///         <term></term>
+    ///         <term>|</term>
+    ///         <term><tt><b>\</b></tt>&#160;&lt;Unicode character&gt;&#160;</term>
+    ///         <term>(a single character)</term>
+    ///         <term></term>
+    ///     </item>
+    ///     
+    /// </list>
+    /// 
+    /// <para/>
     /// The productions marked <small>[OPTIONAL]</small> are only allowed if
-    /// specified by the syntax flags passed to the <code>RegExp</code> constructor.
+    /// specified by the syntax flags passed to the <see cref="RegExp"/> constructor.
     /// The reserved characters used in the (enabled) syntax must be escaped with
-    /// backslash (<tt><b>\</b></tt>) or double-quotes (<tt><b>"..."</b></tt>). (In
+    /// backslash (<c>\</c>) or double-quotes (<c>"..."</c>). (In
     /// contrast to other regexp syntaxes, this is required also in character
-    /// classes.) Be aware that dash (<tt><b>-</b></tt>) has a special meaning in
+    /// classes.) Be aware that dash (<c>-</c>) has a special meaning in
     /// <i>charclass</i> expressions. An identifier is a string not containing right
-    /// angle bracket (<tt><b>&gt;</b></tt>) or dash (<tt><b>-</b></tt>). Numerical
+    /// angle bracket (<c>&gt;</c>) or dash (<c>-</c>). Numerical
     /// intervals are specified by non-negative decimal integers and include both end
-    /// points, and if <tt><i>n</i></tt> and <tt><i>m</i></tt> have the same number
+    /// points, and if <c>n</c> and <c>m</c> have the same number
     /// of digits, then the conforming strings must have that length (i.e. prefixed
     /// by 0's).
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class RegExp
@@ -406,9 +408,9 @@ namespace Lucene.Net.Util.Automaton
         /// Constructs new <see cref="RegExp"/> from a string. Same as
         /// <c>RegExp(s, RegExpSyntax.ALL)</c>.
         /// </summary>
-        /// <param name="s"> regexp string </param>
-        /// <exception cref="ArgumentException"> if an error occured while parsing the
-        ///              regular expression </exception>
+        /// <param name="s"> Regexp string. </param>
+        /// <exception cref="ArgumentException"> If an error occured while parsing the
+        ///              regular expression. </exception>
         public RegExp(string s)
             : this(s, RegExpSyntax.ALL)
         {
@@ -417,10 +419,10 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Constructs new <see cref="RegExp"/> from a string.
         /// </summary>
-        /// <param name="s"> regexp string </param>
-        /// <param name="syntax_flags"> boolean 'or' of optional <see cref="RegExpSyntax"/> constructs to be
-        ///          enabled </param>
-        /// <exception cref="ArgumentException"> if an error occured while parsing the
+        /// <param name="s"> Regexp string. </param>
+        /// <param name="syntax_flags"> Boolean 'or' of optional <see cref="RegExpSyntax"/> constructs to be
+        ///          enabled. </param>
+        /// <exception cref="ArgumentException"> If an error occured while parsing the
         ///              regular expression </exception>
         public RegExp(string s, RegExpSyntax syntax_flags)
         {
@@ -453,8 +455,8 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Constructs new <code>Automaton</code> from this <code>RegExp</code>. Same
-        /// as <code>toAutomaton(null)</code> (empty automaton map).
+        /// Constructs new <see cref="Automaton"/> from this <see cref="RegExp"/>. Same
+        /// as <c>ToAutomaton(null)</c> (empty automaton map).
         /// </summary>
         public virtual Automaton ToAutomaton()
         {
@@ -462,27 +464,27 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Constructs new <code>Automaton</code> from this <code>RegExp</code>. The
+        /// Constructs new <see cref="Automaton"/> from this <see cref="RegExp"/>. The
         /// constructed automaton is minimal and deterministic and has no transitions
         /// to dead states.
         /// </summary>
-        /// <param name="automaton_provider"> provider of automata for named identifiers </param>
-        /// <exception cref="IllegalArgumentException"> if this regular expression uses a named
-        ///              identifier that is not available from the automaton provider </exception>
+        /// <param name="automaton_provider"> Provider of automata for named identifiers. </param>
+        /// <exception cref="ArgumentException"> If this regular expression uses a named
+        ///              identifier that is not available from the automaton provider. </exception>
         public virtual Automaton ToAutomaton(IAutomatonProvider automaton_provider)
         {
             return ToAutomatonAllowMutate(null, automaton_provider);
         }
 
         /// <summary>
-        /// Constructs new <code>Automaton</code> from this <code>RegExp</code>. The
+        /// Constructs new <see cref="Automaton"/> from this <see cref="RegExp"/>. The
         /// constructed automaton is minimal and deterministic and has no transitions
         /// to dead states.
         /// </summary>
-        /// <param name="automata"> a map from automaton identifiers to automata (of type
-        ///          <code>Automaton</code>). </param>
-        /// <exception cref="IllegalArgumentException"> if this regular expression uses a named
-        ///              identifier that does not occur in the automaton map </exception>
+        /// <param name="automata"> A map from automaton identifiers to automata (of type
+        ///          <see cref="Automaton"/>). </param>
+        /// <exception cref="ArgumentException"> If this regular expression uses a named
+        ///              identifier that does not occur in the automaton map. </exception>
         public virtual Automaton ToAutomaton(IDictionary<string, Automaton> automata)
         {
             return ToAutomatonAllowMutate(automata, null);
@@ -493,8 +495,8 @@ namespace Lucene.Net.Util.Automaton
         /// construction uses mutable automata, which is slightly faster but not thread
         /// safe. By default, the flag is not set.
         /// </summary>
-        /// <param name="flag"> if true, the flag is set </param>
-        /// <returns> previous value of the flag </returns>
+        /// <param name="flag"> If <c>true</c>, the flag is set </param>
+        /// <returns> Previous value of the flag. </returns>
         public virtual bool SetAllowMutate(bool flag)
         {
             bool b = allow_mutation;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/RunAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/RunAutomaton.cs b/src/Lucene.Net/Util/Automaton/RunAutomaton.cs
index 7359c1f..509b526 100644
--- a/src/Lucene.Net/Util/Automaton/RunAutomaton.cs
+++ b/src/Lucene.Net/Util/Automaton/RunAutomaton.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Finite-state automaton with fast run operation.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class RunAutomaton
@@ -100,6 +100,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns number of states in automaton.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -139,7 +140,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Gets character class of given codepoint
+        /// Gets character class of given codepoint.
         /// </summary>
         internal int GetCharClass(int c)
         {
@@ -147,10 +148,10 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Constructs a new <code>RunAutomaton</code> from a deterministic
-        /// <code>Automaton</code>.
+        /// Constructs a new <see cref="RunAutomaton"/> from a deterministic
+        /// <see cref="Automaton"/>.
         /// </summary>
-        /// <param name="a"> an automaton </param>
+        /// <param name="a"> An automaton. </param>
         /// <param name="maxInterval"></param>
         /// <param name="tableize"></param>
         public RunAutomaton(Automaton a, int maxInterval, bool tableize)
@@ -205,7 +206,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns the state obtained by reading the given char from the given state.
         /// Returns -1 if not obtaining any such state. (If the original
-        /// <code>Automaton</code> had no dead states, -1 is returned here if and only
+        /// <see cref="Automaton"/> had no dead states, -1 is returned here if and only
         /// if a dead state is entered in an equivalent automaton with a total
         /// transition function.)
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/SortedIntSet.cs b/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
index 7412746..97258f5 100644
--- a/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
+++ b/src/Lucene.Net/Util/Automaton/SortedIntSet.cs
@@ -23,9 +23,9 @@ namespace Lucene.Net.Util.Automaton
      */
 
     /// <summary>
-    /// Just holds a set of int[] states, plus a corresponding
-    /// int[] count per state.  Used by
-    /// BasicOperations.determinize
+    /// Just holds a set of <see cref="T:int[]"/> states, plus a corresponding
+    /// <see cref="T:int[]"/> count per state.  Used by
+    /// <see cref="BasicOperations.Determinize(Automaton)"/>.
     /// <para/>
     /// NOTE: This was SortedIntSet in Lucene
     /// </summary>


[28/48] lucenenet git commit: Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z

Posted by ni...@apache.org.
Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/268e78d4
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/268e78d4
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/268e78d4

Branch: refs/heads/master
Commit: 268e78d421ba86810646029fddf90c966bf24be5
Parents: dd55920
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 23:12:03 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Mon Jun 5 06:16:23 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   4 +-
 src/Lucene.Net/Util/IOUtils.cs                  | 145 ++++++------
 src/Lucene.Net/Util/InPlaceMergeSorter.cs       |  15 +-
 .../Util/IndexableBinaryStringTools.cs          | 118 +++++-----
 src/Lucene.Net/Util/InfoStream.cs               |  26 ++-
 src/Lucene.Net/Util/IntBlockPool.cs             |  83 +++----
 src/Lucene.Net/Util/IntroSorter.cs              |  19 +-
 src/Lucene.Net/Util/IntsRef.cs                  |  39 ++--
 src/Lucene.Net/Util/LongBitSet.cs               |  52 ++---
 src/Lucene.Net/Util/LongValues.cs               |  16 +-
 src/Lucene.Net/Util/LongsRef.cs                 |  37 +--
 src/Lucene.Net/Util/MapOfSets.cs                |  30 +--
 src/Lucene.Net/Util/MathUtil.cs                 |  68 +++---
 src/Lucene.Net/Util/MergedIterator.cs           | 135 ++---------
 src/Lucene.Net/Util/MutableBits.cs              |   6 +-
 src/Lucene.Net/Util/NumericUtils.cs             | 199 ++++++++--------
 src/Lucene.Net/Util/OfflineSorter.cs            | 100 ++++----
 src/Lucene.Net/Util/OpenBitSet.cs               | 229 ++++++++++---------
 src/Lucene.Net/Util/OpenBitSetDISI.cs           |  22 +-
 src/Lucene.Net/Util/OpenBitSetIterator.cs       |  48 ++--
 src/Lucene.Net/Util/PForDeltaDocIdSet.cs        |  20 +-
 src/Lucene.Net/Util/PagedBytes.cs               |  68 +++---
 src/Lucene.Net/Util/PrintStreamInfoStream.cs    |   8 +-
 src/Lucene.Net/Util/PriorityQueue.cs            | 117 +++++-----
 src/Lucene.Net/Util/QueryBuilder.cs             | 152 ++++++------
 src/Lucene.Net/Util/RamUsageEstimator.cs        | 103 +++++----
 .../Util/RecyclingByteBlockAllocator.cs         |  41 ++--
 .../Util/RecyclingIntBlockAllocator.cs          |  31 ++-
 src/Lucene.Net/Util/RollingBuffer.cs            |  23 +-
 src/Lucene.Net/Util/SPIClassIterator.cs         |   5 +-
 src/Lucene.Net/Util/SentinelIntSet.cs           |  32 +--
 src/Lucene.Net/Util/SetOnce.cs                  |   3 +
 src/Lucene.Net/Util/SloppyMath.cs               |  30 +--
 src/Lucene.Net/Util/SmallFloat.cs               |  24 +-
 src/Lucene.Net/Util/Sorter.cs                   |  13 +-
 src/Lucene.Net/Util/StringHelper.cs             |  46 ++--
 src/Lucene.Net/Util/TimSorter.cs                |  50 ++--
 src/Lucene.Net/Util/ToStringUtils.cs            |   4 +-
 src/Lucene.Net/Util/UnicodeUtil.cs              |  84 ++++---
 src/Lucene.Net/Util/Version.cs                  |  12 +-
 src/Lucene.Net/Util/WAH8DocIdSet.cs             |  94 ++++----
 src/Lucene.Net/Util/WeakIdentityMap.cs          |  92 ++++----
 42 files changed, 1208 insertions(+), 1235 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cec7578..9fb8f3c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -53,7 +53,9 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
    2. Support (namespace)
-   3. Util (namespace) Types starting with I-Z, Util.Automaton, 			Util.Mutable, and Util.Packed.
+   3. Util.Automaton (namespace)
+   4. Util.Mutable (namespace)
+   5. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/IOUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IOUtils.cs b/src/Lucene.Net/Util/IOUtils.cs
index 380f54c..87049da 100644
--- a/src/Lucene.Net/Util/IOUtils.cs
+++ b/src/Lucene.Net/Util/IOUtils.cs
@@ -28,24 +28,24 @@ namespace Lucene.Net.Util
     using Directory = Lucene.Net.Store.Directory;
 
     /// <summary>
-    /// this class emulates the new Java 7 "Try-With-Resources" statement.
+    /// This class emulates the new Java 7 "Try-With-Resources" statement.
     /// Remove once Lucene is on Java 7.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     [ExceptionToClassNameConvention]
     public sealed class IOUtils
     {
         /// <summary>
-        /// UTF-8 <seealso cref="Charset"/> instance to prevent repeated
-        /// <seealso cref="Charset#forName(String)"/> lookups </summary>
-        /// @deprecated Use <seealso cref="StandardCharsets#UTF_8"/> instead.
+        /// UTF-8 <see cref="Encoding"/> instance to prevent repeated
+        /// <see cref="Encoding.UTF8"/> lookups </summary>
         [Obsolete("Use Encoding.UTF8 instead.")]
         public static readonly Encoding CHARSET_UTF_8 = Encoding.UTF8;
 
         /// <summary>
         /// UTF-8 charset string.
         /// <para/>Where possible, use <see cref="Encoding.UTF8"/> instead,
-        /// as using the String constant may slow things down. </summary>
+        /// as using the <see cref="string"/> constant may slow things down. </summary>
         /// <seealso cref="Encoding.UTF8"/>
         public static readonly string UTF_8 = "UTF-8";
 
@@ -54,26 +54,32 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// <p>Closes all given <tt>IDisposable</tt>s, suppressing all thrown exceptions. Some of the <tt>IDisposable</tt>s
-        /// may be null, they are ignored. After everything is closed, method either throws <tt>priorException</tt>,
-        /// if one is supplied, or the first of suppressed exceptions, or completes normally.</p>
-        /// <p>Sample usage:<br/>
-        /// <pre class="prettyprint">
+        /// <para>Disposes all given <c>IDisposable</c>s, suppressing all thrown exceptions. Some of the <c>IDisposable</c>s
+        /// may be <c>null</c>, they are ignored. After everything is disposed, method either throws <paramref name="priorException"/>,
+        /// if one is supplied, or the first of suppressed exceptions, or completes normally.</para>
+        /// <para>Sample usage:
+        /// <code>
         /// IDisposable resource1 = null, resource2 = null, resource3 = null;
         /// ExpectedException priorE = null;
-        /// try {
-        ///   resource1 = ...; resource2 = ...; resource3 = ...; // Acquisition may throw ExpectedException
-        ///   ..do..stuff.. // May throw ExpectedException
-        /// } catch (ExpectedException e) {
-        ///   priorE = e;
-        /// } finally {
-        ///   closeWhileHandlingException(priorE, resource1, resource2, resource3);
+        /// try 
+        /// {
+        ///     resource1 = ...; resource2 = ...; resource3 = ...; // Acquisition may throw ExpectedException
+        ///     ..do..stuff.. // May throw ExpectedException
+        /// } 
+        /// catch (ExpectedException e) 
+        /// {
+        ///     priorE = e;
+        /// } 
+        /// finally 
+        /// {
+        ///     CloseWhileHandlingException(priorE, resource1, resource2, resource3);
         /// }
-        /// </pre>
-        /// </p> </summary>
-        /// <param name="priorException">  <tt>null</tt> or an exception that will be rethrown after method completion </param>
-        /// <param name="objects">         objects to call <tt>close()</tt> on </param>
-        public static void CloseWhileHandlingException(Exception priorException, params IDisposable[] objects)
+        /// </code>
+        /// </para> 
+        /// </summary>
+        /// <param name="priorException">  <c>null</c> or an exception that will be rethrown after method completion. </param>
+        /// <param name="objects">         Objects to call <see cref="IDisposable.Dispose()"/> on. </param>
+        public static void CloseWhileHandlingException(Exception priorException, params IDisposable[] objects) // LUCENENET TODO: API rename DisposeWhileHandlingException()
         {
             Exception th = null;
 
@@ -107,9 +113,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes all given <tt>IDisposable</tt>s, suppressing all thrown exceptions. </summary>
-        /// <seealso> cref= #closeWhileHandlingException(Exception, IDisposable...)  </seealso>
-        public static void CloseWhileHandlingException(Exception priorException, IEnumerable<IDisposable> objects)
+        /// Disposes all given <see cref="IDisposable"/>s, suppressing all thrown exceptions. </summary>
+        /// <seealso cref="CloseWhileHandlingException(Exception, IDisposable[])"/>
+        public static void CloseWhileHandlingException(Exception priorException, IEnumerable<IDisposable> objects) // LUCENENET TODO: API rename DisposeWhileHandlingException()
         {
             Exception th = null;
 
@@ -143,15 +149,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes all given <tt>IDisposable</tt>s.  Some of the
-        /// <tt>IDisposable</tt>s may be null; they are
+        /// Disposes all given <see cref="IDisposable"/>s.  Some of the
+        /// <see cref="IDisposable"/>s may be <c>null</c>; they are
         /// ignored.  After everything is closed, the method either
         /// throws the first exception it hit while closing, or
         /// completes normally if there were no exceptions.
         /// </summary>
         /// <param name="objects">
-        ///          objects to call <tt>close()</tt> on </param>
-        public static void Close(params IDisposable[] objects)
+        ///          Objects to call <see cref="IDisposable.Dispose()"/> on </param>
+        public static void Close(params IDisposable[] objects) // LUCENENET TODO: API rename Dispose()
         {
             Exception th = null;
 
@@ -178,9 +184,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes all given <tt>IDisposable</tt>s. </summary>
-        /// <seealso cref= #close(IDisposable...) </seealso>
-        public static void Close(IEnumerable<IDisposable> objects)
+        /// Disposes all given <see cref="IDisposable"/>s. </summary>
+        /// <seealso cref="Close(IDisposable[])"/>
+        public static void Close(IEnumerable<IDisposable> objects) // LUCENENET TODO: API rename Dispose()
         {
             Exception th = null;
 
@@ -207,12 +213,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes all given <tt>IDisposable</tt>s, suppressing all thrown exceptions.
-        /// Some of the <tt>IDisposable</tt>s may be null, they are ignored.
+        /// Disposes all given <see cref="IDisposable"/>s, suppressing all thrown exceptions.
+        /// Some of the <see cref="IDisposable"/>s may be <c>null</c>, they are ignored.
         /// </summary>
         /// <param name="objects">
-        ///          objects to call <tt>close()</tt> on </param>
-        public static void CloseWhileHandlingException(params IDisposable[] objects)
+        ///          Objects to call <see cref="IDisposable.Dispose()"/> on </param>
+        public static void CloseWhileHandlingException(params IDisposable[] objects) // LUCENENET TODO: API rename DisposeWhileHandlingException()
         {
             foreach (var o in objects)
             {
@@ -231,9 +237,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes all given <tt>IDisposable</tt>s, suppressing all thrown exceptions. </summary>
-        /// <seealso cref= #closeWhileHandlingException(IDisposable...) </seealso>
-        public static void CloseWhileHandlingException(IEnumerable<IDisposable> objects)
+        /// Disposes all given <see cref="IDisposable"/>s, suppressing all thrown exceptions. </summary>
+        /// <seealso cref="CloseWhileHandlingException(IDisposable[])"/>
+        public static void CloseWhileHandlingException(IEnumerable<IDisposable> objects) // LUCENENET TODO: API rename DisposeWhileHandlingException()
         {
             foreach (IDisposable @object in objects)
             {
@@ -270,33 +276,33 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Wrapping the given <seealso cref="InputStream"/> in a reader using a <seealso cref="CharsetDecoder"/>.
+        /// Wrapping the given <see cref="Stream"/> in a reader using a <see cref="Encoding"/>.
         /// Unlike Java's defaults this reader will throw an exception if your it detects
-        /// the read charset doesn't match the expected <seealso cref="Charset"/>.
-        /// <p>
+        /// the read charset doesn't match the expected <see cref="Encoding"/>.
+        /// <para/>
         /// Decoding readers are useful to load configuration files, stopword lists or synonym files
         /// to detect character set problems. However, its not recommended to use as a common purpose
         /// reader.
         /// </summary>
-        /// <param name="stream"> the stream to wrap in a reader </param>
-        /// <param name="charSet"> the expected charset </param>
-        /// <returns> a wrapping reader </returns>
+        /// <param name="stream"> The stream to wrap in a reader </param>
+        /// <param name="charSet"> The expected charset </param>
+        /// <returns> A wrapping reader </returns>
         public static TextReader GetDecodingReader(Stream stream, Encoding charSet)
         {
             return new StreamReader(stream, charSet);
         }
 
         /// <summary>
-        /// Opens a TextReader for the given <seealso cref="File"/> using a <seealso cref="CharsetDecoder"/>.
+        /// Opens a <see cref="TextReader"/> for the given <see cref="FileInfo"/> using a <see cref="Encoding"/>.
         /// Unlike Java's defaults this reader will throw an exception if your it detects
-        /// the read charset doesn't match the expected <seealso cref="Charset"/>.
-        /// <p>
+        /// the read charset doesn't match the expected <see cref="Encoding"/>.
+        /// <para/>
         /// Decoding readers are useful to load configuration files, stopword lists or synonym files
         /// to detect character set problems. However, its not recommended to use as a common purpose
         /// reader. </summary>
-        /// <param name="file"> the file to open a reader on </param>
-        /// <param name="charSet"> the expected charset </param>
-        /// <returns> a reader to read the given file </returns>
+        /// <param name="file"> The file to open a reader on </param>
+        /// <param name="charSet"> The expected charset </param>
+        /// <returns> A reader to read the given file </returns>
         public static TextReader GetDecodingReader(FileInfo file, Encoding charSet)
         {
             FileStream stream = null;
@@ -318,18 +324,17 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Opens a TextReader for the given resource using a <seealso cref="CharsetDecoder"/>.
+        /// Opens a <see cref="TextReader"/> for the given resource using a <see cref="Encoding"/>.
         /// Unlike Java's defaults this reader will throw an exception if your it detects
-        /// the read charset doesn't match the expected <seealso cref="Charset"/>.
-        /// <p>
+        /// the read charset doesn't match the expected <see cref="Encoding"/>.
+        /// <para/>
         /// Decoding readers are useful to load configuration files, stopword lists or synonym files
         /// to detect character set problems. However, its not recommended to use as a common purpose
         /// reader. </summary>
-        /// <param name="clazz"> the class used to locate the resource </param>
-        /// <param name="resource"> the resource name to load </param>
-        /// <param name="charSet"> the expected charset </param>
-        /// <returns> a reader to read the given file
-        ///  </returns>
+        /// <param name="clazz"> The class used to locate the resource </param>
+        /// <param name="resource"> The resource name to load </param>
+        /// <param name="charSet"> The expected charset </param>
+        /// <returns> A reader to read the given file </returns>
         public static TextReader GetDecodingReader(Type clazz, string resource, Encoding charSet)
         {
             Stream stream = null;
@@ -351,9 +356,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Deletes all given files, suppressing all thrown IOExceptions.
-        /// <p>
-        /// Note that the files should not be null.
+        /// Deletes all given files, suppressing all thrown <see cref="Exception"/>s.
+        /// <para/>
+        /// Note that the files should not be <c>null</c>.
         /// </summary>
         public static void DeleteFilesIgnoringExceptions(Directory dir, params string[] files)
         {
@@ -398,9 +403,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Simple utilty method that takes a previously caught
-        /// {@code Throwable} and rethrows either {@code
-        /// IOException} or an unchecked exception.  If the
-        /// argument is null then this method does nothing.
+        /// <see cref="Exception"/> and rethrows either 
+        /// <see cref="IOException"/> or an unchecked exception.  If the
+        /// argument is <c>null</c> then this method does nothing.
         /// </summary>
         public static void ReThrow(Exception th)
         {
@@ -416,12 +421,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Simple utilty method that takes a previously caught
-        /// {@code Throwable} and rethrows it as an unchecked exception.
-        /// If the argument is null then this method does nothing.
+        /// <see cref="Exception"/> and rethrows it as an unchecked exception.
+        /// If the argument is <c>null</c> then this method does nothing.
         /// </summary>
         public static void ReThrowUnchecked(Exception th)
         {
-            if (th != null)
+            if (th != null) // LUCENENET TODO: BUG - In Lucene we throw a new exception type in some cases which may not be caught in a different layer
             {
                 throw th;
             }
@@ -429,8 +434,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Ensure that any writes to the given file is written to the storage device that contains it. </summary>
-        /// <param name="fileToSync"> the file to fsync </param>
-        /// <param name="isDir"> if true, the given file is a directory (we open for read and ignore IOExceptions,
+        /// <param name="fileToSync"> The file to fsync </param>
+        /// <param name="isDir"> If <c>true</c>, the given file is a directory (we open for read and ignore <see cref="IOException"/>s,
         ///  because not all file systems and operating systems allow to fsync on a directory) </param>
         public static void Fsync(string fileToSync, bool isDir)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/InPlaceMergeSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/InPlaceMergeSorter.cs b/src/Lucene.Net/Util/InPlaceMergeSorter.cs
index 14c578b..31bda95 100644
--- a/src/Lucene.Net/Util/InPlaceMergeSorter.cs
+++ b/src/Lucene.Net/Util/InPlaceMergeSorter.cs
@@ -18,19 +18,24 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// <seealso cref="Sorter"/> implementation based on the merge-sort algorithm that merges
-    ///  in place (no extra memory will be allocated). Small arrays are sorted with
-    ///  insertion sort.
-    ///  @lucene.internal
+    /// <see cref="Sorter"/> implementation based on the merge-sort algorithm that merges
+    /// in place (no extra memory will be allocated). Small arrays are sorted with
+    /// insertion sort.
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
     public abstract class InPlaceMergeSorter : Sorter
     {
         /// <summary>
-        /// Create a new <seealso cref="InPlaceMergeSorter"/> </summary>
+        /// Create a new <see cref="InPlaceMergeSorter"/> </summary>
         public InPlaceMergeSorter()
         {
         }
 
+        /// <summary>
+        /// Sort the slice which starts at <paramref name="from"/> (inclusive) and ends at
+        /// <paramref name="to"/> (exclusive).
+        /// </summary>
         public override sealed void Sort(int from, int to)
         {
             CheckRange(from, to);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IndexableBinaryStringTools.cs b/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
index aad4d03..efb1182 100644
--- a/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
+++ b/src/Lucene.Net/Util/IndexableBinaryStringTools.cs
@@ -21,27 +21,25 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Provides support for converting byte sequences to Strings and back again.
-    /// The resulting Strings preserve the original byte sequences' sort order.
-    /// <p/>
-    /// The Strings are constructed using a Base 8000h encoding of the original
-    /// binary data - each char of an encoded String represents a 15-bit chunk
+    /// Provides support for converting byte sequences to <see cref="string"/>s and back again.
+    /// The resulting <see cref="string"/>s preserve the original byte sequences' sort order.
+    /// <para/>
+    /// The <see cref="string"/>s are constructed using a Base 8000h encoding of the original
+    /// binary data - each char of an encoded <see cref="string"/> represents a 15-bit chunk
     /// from the byte sequence.  Base 8000h was chosen because it allows for all
     /// lower 15 bits of char to be used without restriction; the surrogate range
     /// [U+D8000-U+DFFF] does not represent valid chars, and would require
     /// complicated handling to avoid them and allow use of char's high bit.
-    /// <p/>
+    /// <para/>
     /// Although unset bits are used as padding in the final char, the original
     /// byte sequence could contain trailing bytes with no set bits (null bytes):
     /// padding is indistinguishable from valid information.  To overcome this
     /// problem, a char is appended, indicating the number of encoded bytes in the
     /// final content char.
-    /// <p/>
-    ///
-    /// @lucene.experimental </summary>
-    /// @deprecated Implement <seealso cref="ITermToBytesRefAttribute"/> and store bytes directly
-    /// instead. this class will be removed in Lucene 5.0
-    [Obsolete("Implement ITermToBytesRefAttribute and store bytes directly")]
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
+    [Obsolete("Implement Analysis.TokenAttributes.ITermToBytesRefAttribute and store bytes directly instead. this class will be removed in Lucene 5.0")]
     public sealed class IndexableBinaryStringTools
     {
         private static readonly CodingCase[] CODING_CASES = new CodingCase[] {
@@ -63,12 +61,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of chars required to encode the given bytes.
+        /// Returns the number of chars required to encode the given <see cref="byte"/>s.
         /// </summary>
-        /// <param name="inputArray"> byte sequence to be encoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of bytes in inputArray </param>
-        /// <returns> The number of chars required to encode the number of bytes. </returns>
+        /// <param name="inputArray"> Byte sequence to be encoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of bytes in <paramref name="inputArray"/> </param>
+        /// <returns> The number of chars required to encode the number of <see cref="byte"/>s. </returns>
         // LUCENENET specific overload for CLS compliance
         public static int GetEncodedLength(byte[] inputArray, int inputOffset, int inputLength)
         {
@@ -77,12 +75,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of chars required to encode the given sbytes.
+        /// Returns the number of chars required to encode the given <see cref="sbyte"/>s.
         /// </summary>
-        /// <param name="inputArray"> sbyte sequence to be encoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of sbytes in inputArray </param>
-        /// <returns> The number of chars required to encode the number of sbytes. </returns>
+        /// <param name="inputArray"> <see cref="sbyte"/> sequence to be encoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of sbytes in <paramref name="inputArray"/> </param>
+        /// <returns> The number of chars required to encode the number of <see cref="sbyte"/>s. </returns>
         [CLSCompliant(false)]
         public static int GetEncodedLength(sbyte[] inputArray, int inputOffset, int inputLength)
         {
@@ -91,12 +89,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of bytes required to decode the given char sequence.
+        /// Returns the number of <see cref="byte"/>s required to decode the given char sequence.
         /// </summary>
-        /// <param name="encoded"> char sequence to be decoded </param>
-        /// <param name="offset"> initial offset </param>
-        /// <param name="length"> number of characters </param>
-        /// <returns> The number of bytes required to decode the given char sequence </returns>
+        /// <param name="encoded"> Char sequence to be decoded </param>
+        /// <param name="offset"> Initial offset </param>
+        /// <param name="length"> Number of characters </param>
+        /// <returns> The number of <see cref="byte"/>s required to decode the given char sequence </returns>
         public static int GetDecodedLength(char[] encoded, int offset, int length)
         {
             int numChars = length - 1;
@@ -114,16 +112,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Encodes the input sbyte sequence into the output char sequence.  Before
+        /// Encodes the input <see cref="byte"/> sequence into the output char sequence.  Before
         /// calling this method, ensure that the output array has sufficient
-        /// capacity by calling <seealso cref="#getEncodedLength(byte[], int, int)"/>.
+        /// capacity by calling <see cref="GetEncodedLength(byte[], int, int)"/>.
         /// </summary>
-        /// <param name="inputArray"> sbyte sequence to be encoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of bytes in inputArray </param>
-        /// <param name="outputArray"> char sequence to store encoded result </param>
-        /// <param name="outputOffset"> initial offset into outputArray </param>
-        /// <param name="outputLength"> length of output, must be getEncodedLength </param>
+        /// <param name="inputArray"> <see cref="byte"/> sequence to be encoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of bytes in <paramref name="inputArray"/> </param>
+        /// <param name="outputArray"> <see cref="char"/> sequence to store encoded result </param>
+        /// <param name="outputOffset"> Initial offset into outputArray </param>
+        /// <param name="outputLength"> Length of output, must be GetEncodedLength(inputArray, inputOffset, inputLength) </param>
         // LUCENENET specific overload for CLS compliance
         public static void Encode(byte[] inputArray, int inputOffset, int inputLength, char[] outputArray, int outputOffset, int outputLength)
         {
@@ -131,16 +129,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Encodes the input sbyte sequence into the output char sequence.  Before
+        /// Encodes the input <see cref="sbyte"/> sequence into the output char sequence.  Before
         /// calling this method, ensure that the output array has sufficient
-        /// capacity by calling <seealso cref="#getEncodedLength(byte[], int, int)"/>.
+        /// capacity by calling <see cref="GetEncodedLength(sbyte[], int, int)"/>.
         /// </summary>
-        /// <param name="inputArray"> sbyte sequence to be encoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of bytes in inputArray </param>
-        /// <param name="outputArray"> char sequence to store encoded result </param>
-        /// <param name="outputOffset"> initial offset into outputArray </param>
-        /// <param name="outputLength"> length of output, must be getEncodedLength </param>
+        /// <param name="inputArray"> <see cref="sbyte"/> sequence to be encoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of bytes in <paramref name="inputArray"/> </param>
+        /// <param name="outputArray"> <see cref="char"/> sequence to store encoded result </param>
+        /// <param name="outputOffset"> Initial offset into outputArray </param>
+        /// <param name="outputLength"> Length of output, must be getEncodedLength </param>
         [CLSCompliant(false)]
         public static void Encode(sbyte[] inputArray, int inputOffset, int inputLength, char[] outputArray, int outputOffset, int outputLength)
         {
@@ -192,17 +190,17 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Decodes the input char sequence into the output byte sequence. Before
+        /// Decodes the input <see cref="char"/> sequence into the output <see cref="byte"/> sequence. Before
         /// calling this method, ensure that the output array has sufficient capacity
-        /// by calling <seealso cref="#getDecodedLength(char[], int, int)"/>.
+        /// by calling <see cref="GetDecodedLength(char[], int, int)"/>.
         /// </summary>
-        /// <param name="inputArray"> char sequence to be decoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of chars in inputArray </param>
-        /// <param name="outputArray"> byte sequence to store encoded result </param>
-        /// <param name="outputOffset"> initial offset into outputArray </param>
-        /// <param name="outputLength"> length of output, must be
-        ///        getDecodedLength(inputArray, inputOffset, inputLength) </param>
+        /// <param name="inputArray"> <see cref="char"/> sequence to be decoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of chars in <paramref name="inputArray"/> </param>
+        /// <param name="outputArray"> <see cref="byte"/> sequence to store encoded result </param>
+        /// <param name="outputOffset"> Initial offset into outputArray </param>
+        /// <param name="outputLength"> Length of output, must be
+        ///        GetDecodedLength(inputArray, inputOffset, inputLength) </param>
         // LUCENENET specific overload for CLS compliance
         public static void Decode(char[] inputArray, int inputOffset, int inputLength, byte[] outputArray, int outputOffset, int outputLength)
         {
@@ -212,15 +210,15 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Decodes the input char sequence into the output sbyte sequence. Before
         /// calling this method, ensure that the output array has sufficient capacity
-        /// by calling <seealso cref="#getDecodedLength(char[], int, int)"/>.
+        /// by calling <see cref="GetDecodedLength(char[], int, int)"/>.
         /// </summary>
-        /// <param name="inputArray"> char sequence to be decoded </param>
-        /// <param name="inputOffset"> initial offset into inputArray </param>
-        /// <param name="inputLength"> number of chars in inputArray </param>
-        /// <param name="outputArray"> byte sequence to store encoded result </param>
-        /// <param name="outputOffset"> initial offset into outputArray </param>
-        /// <param name="outputLength"> length of output, must be
-        ///        getDecodedLength(inputArray, inputOffset, inputLength) </param>
+        /// <param name="inputArray"> <see cref="char"/> sequence to be decoded </param>
+        /// <param name="inputOffset"> Initial offset into <paramref name="inputArray"/> </param>
+        /// <param name="inputLength"> Number of chars in <paramref name="inputArray"/> </param>
+        /// <param name="outputArray"> <see cref="byte"/> sequence to store encoded result </param>
+        /// <param name="outputOffset"> Initial offset into outputArray </param>
+        /// <param name="outputLength"> Length of output, must be
+        ///        GetDecodedLength(inputArray, inputOffset, inputLength) </param>
         [CLSCompliant(false)]
         public static void Decode(char[] inputArray, int inputOffset, int inputLength, sbyte[] outputArray, int outputOffset, int outputLength)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/InfoStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/InfoStream.cs b/src/Lucene.Net/Util/InfoStream.cs
index 8d97037..da90d24 100644
--- a/src/Lucene.Net/Util/InfoStream.cs
+++ b/src/Lucene.Net/Util/InfoStream.cs
@@ -21,18 +21,18 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Debugging API for Lucene classes such as <seealso cref="IndexWriter"/>
-    /// and <seealso cref="SegmentInfos"/>.
-    /// <p>
+    /// Debugging API for Lucene classes such as <see cref="Index.IndexWriter"/>
+    /// and <see cref="Index.SegmentInfos"/>.
+    /// <para>
     /// NOTE: Enabling infostreams may cause performance degradation
     /// in some components.
-    ///
+    /// </para>
     /// @lucene.internal
     /// </summary>
     public abstract class InfoStream : IDisposable
     {
         /// <summary>
-        /// Instance of InfoStream that does no logging at all. </summary>
+        /// Instance of <see cref="InfoStream"/> that does no logging at all. </summary>
         public static readonly InfoStream NO_OUTPUT = new NoOutput();
 
         private sealed class NoOutput : InfoStream
@@ -53,18 +53,17 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// prints a message </summary>
+        /// Prints a message </summary>
         public abstract void Message(string component, string message);
 
         /// <summary>
-        /// returns true if messages are enabled and should be posted to <seealso cref="#message"/>. </summary>
+        /// Returns <c>true</c> if messages are enabled and should be posted to <see cref="Message(string, string)"/>. </summary>
         public abstract bool IsEnabled(string component);
 
         private static InfoStream defaultInfoStream = NO_OUTPUT;
 
         /// <summary>
-        /// The default {@code InfoStream} used by a newly instantiated classes. </summary>
-        /// <seealso cref= #setDefault  </seealso>
+        /// Gets or Sets the default <see cref="InfoStream"/> used by a newly instantiated classes. </summary>
         public static InfoStream Default
         {
             get
@@ -88,16 +87,25 @@ namespace Lucene.Net.Util
         }
 
         // LUCENENET specific - implementing proper dispose pattern
+        /// <summary>
+        /// Disposes this <see cref="InfoStream"/>
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Disposes this <see cref="InfoStream"/>
+        /// </summary>
         protected virtual void Dispose(bool disposing)
         {
         }
 
+        /// <summary>
+        /// Clones this <see cref="InfoStream"/>
+        /// </summary>
         public virtual object Clone()
         {
             try

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/IntBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IntBlockPool.cs b/src/Lucene.Net/Util/IntBlockPool.cs
index 18180e9..2201258 100644
--- a/src/Lucene.Net/Util/IntBlockPool.cs
+++ b/src/Lucene.Net/Util/IntBlockPool.cs
@@ -23,9 +23,10 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A pool for int blocks similar to <seealso cref="ByteBlockPool"/>.
+    /// A pool for <see cref="int"/> blocks similar to <see cref="ByteBlockPool"/>.
     /// <para/>
     /// NOTE: This was IntBlockPool in Lucene
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class Int32BlockPool
@@ -46,7 +47,7 @@ namespace Lucene.Net.Util
         public static readonly int INT32_BLOCK_MASK = INT32_BLOCK_SIZE - 1;
 
         /// <summary>
-        /// Abstract class for allocating and freeing int
+        /// Abstract class for allocating and freeing <see cref="int"/>
         /// blocks.
         /// </summary>
         public abstract class Allocator
@@ -73,11 +74,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A simple <seealso cref="Allocator"/> that never recycles. </summary>
+        /// A simple <see cref="Allocator"/> that never recycles. </summary>
         public sealed class DirectAllocator : Allocator
         {
             /// <summary>
-            /// Creates a new <seealso cref="DirectAllocator"/> with a default block size
+            /// Creates a new <see cref="DirectAllocator"/> with a default block size
             /// </summary>
             public DirectAllocator()
                 : base(INT32_BLOCK_SIZE)
@@ -93,7 +94,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// array of buffers currently used in the pool. Buffers are allocated if needed don't modify this outside of this class </summary>
+        /// Array of buffers currently used in the pool. Buffers are allocated if needed don't modify this outside of this class. </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
         public int[][] Buffers
@@ -104,7 +105,7 @@ namespace Lucene.Net.Util
         private int[][] buffers = new int[10][];
 
         /// <summary>
-        /// index into the buffers array pointing to the current buffer used as the head </summary>
+        /// Index into the buffers array pointing to the current buffer used as the head. </summary>
         private int bufferUpto = -1;
 
         /// <summary>
@@ -115,7 +116,7 @@ namespace Lucene.Net.Util
         public int Int32Upto { get; set; }
 
         /// <summary>
-        /// Current head buffer </summary>
+        /// Current head buffer. </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
         public int[] Buffer
@@ -126,7 +127,7 @@ namespace Lucene.Net.Util
         private int[] buffer;
 
         /// <summary>
-        /// Current head offset 
+        /// Current head offset. 
         /// <para/>
         /// NOTE: This was intOffset in Lucene
         /// </summary>
@@ -135,16 +136,16 @@ namespace Lucene.Net.Util
         private readonly Allocator allocator;
 
         /// <summary>
-        /// Creates a new <seealso cref="Int32BlockPool"/> with a default <seealso cref="Allocator"/>. </summary>
-        /// <seealso cref= Int32BlockPool#nextBuffer() </seealso>
+        /// Creates a new <see cref="Int32BlockPool"/> with a default <see cref="Allocator"/>. </summary>
+        /// <seealso cref="Int32BlockPool.NextBuffer()"/>
         public Int32BlockPool()
             : this(new DirectAllocator())
         {
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="Int32BlockPool"/> with the given <seealso cref="Allocator"/>. </summary>
-        /// <seealso cref= Int32BlockPool#nextBuffer() </seealso>
+        /// Creates a new <see cref="Int32BlockPool"/> with the given <see cref="Allocator"/>. </summary>
+        /// <seealso cref="Int32BlockPool.NextBuffer()"/>
         public Int32BlockPool(Allocator allocator)
         {
             // set defaults
@@ -156,7 +157,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Resets the pool to its initial state reusing the first buffer. Calling
-        /// <seealso cref="Int32BlockPool#nextBuffer()"/> is not needed after reset.
+        /// <see cref="Int32BlockPool.NextBuffer()"/> is not needed after reset.
         /// </summary>
         public void Reset()
         {
@@ -165,12 +166,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Expert: Resets the pool to its initial state reusing the first buffer. </summary>
-        /// <param name="zeroFillBuffers"> if <code>true</code> the buffers are filled with <tt>0</tt>.
-        ///        this should be set to <code>true</code> if this pool is used with
-        ///        <seealso cref="SliceWriter"/>. </param>
-        /// <param name="reuseFirst"> if <code>true</code> the first buffer will be reused and calling
-        ///        <seealso cref="Int32BlockPool#nextBuffer()"/> is not needed after reset iff the
-        ///        block pool was used before ie. <seealso cref="Int32BlockPool#nextBuffer()"/> was called before. </param>
+        /// <param name="zeroFillBuffers"> If <c>true</c> the buffers are filled with <c>0</c>.
+        ///        this should be set to <c>true</c> if this pool is used with
+        ///        <see cref="SliceWriter"/>. </param>
+        /// <param name="reuseFirst"> If <c>true</c> the first buffer will be reused and calling
+        ///        <see cref="Int32BlockPool.NextBuffer()"/> is not needed after reset if the
+        ///        block pool was used before ie. <see cref="Int32BlockPool.NextBuffer()"/> was called before. </param>
         public void Reset(bool zeroFillBuffers, bool reuseFirst)
         {
             if (bufferUpto != -1)
@@ -214,9 +215,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Advances the pool to its next buffer. this method should be called once
+        /// Advances the pool to its next buffer. This method should be called once
         /// after the constructor to initialize the pool. In contrast to the
-        /// constructor a <seealso cref="Int32BlockPool#reset()"/> call will advance the pool to
+        /// constructor a <see cref="Int32BlockPool.Reset()"/> call will advance the pool to
         /// its first buffer immediately.
         /// </summary>
         public void NextBuffer()
@@ -235,8 +236,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new int slice with the given starting size and returns the slices offset in the pool. </summary>
-        /// <seealso cref= SliceReader </seealso>
+        /// Creates a new <see cref="int"/> slice with the given starting size and returns the slices offset in the pool. </summary>
+        /// <seealso cref="SliceReader"/>
         private int NewSlice(int size)
         {
             if (Int32Upto > INT32_BLOCK_SIZE - size)
@@ -264,23 +265,23 @@ namespace Lucene.Net.Util
         // no need to make this public unless we support different sizes
         // TODO make the levels and the sizes configurable
         /// <summary>
-        /// An array holding the offset into the <seealso cref="Int32BlockPool#LEVEL_SIZE_ARRAY"/>
+        /// An array holding the offset into the <see cref="Int32BlockPool.LEVEL_SIZE_ARRAY"/>
         /// to quickly navigate to the next slice level.
         /// </summary>
         private static readonly int[] NEXT_LEVEL_ARRAY = new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 };
 
         /// <summary>
-        /// An array holding the level sizes for int slices.
+        /// An array holding the level sizes for <see cref="int"/> slices.
         /// </summary>
         private static readonly int[] LEVEL_SIZE_ARRAY = new int[] { 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024 };
 
         /// <summary>
-        /// The first level size for new slices
+        /// The first level size for new slices.
         /// </summary>
         private static readonly int FIRST_LEVEL_SIZE = LEVEL_SIZE_ARRAY[0];
 
         /// <summary>
-        /// Allocates a new slice from the given offset
+        /// Allocates a new slice from the given offset.
         /// </summary>
         private int AllocSlice(int[] slice, int sliceOffset)
         {
@@ -307,10 +308,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A <seealso cref="SliceWriter"/> that allows to write multiple integer slices into a given <seealso cref="Int32BlockPool"/>.
+        /// A <see cref="SliceWriter"/> that allows to write multiple integer slices into a given <see cref="Int32BlockPool"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        ///  <seealso cref= SliceReader
-        ///  @lucene.internal </seealso>
+        /// <seealso cref="SliceReader"/> 
         public class SliceWriter
         {
             private int offset;
@@ -349,8 +351,8 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// starts a new slice and returns the start offset. The returned value
-            /// should be used as the start offset to initialize a <seealso cref="SliceReader"/>.
+            /// Starts a new slice and returns the start offset. The returned value
+            /// should be used as the start offset to initialize a <see cref="SliceReader"/>.
             /// </summary>
             public virtual int StartNewSlice()
             {
@@ -359,7 +361,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Returns the offset of the currently written slice. The returned value
-            /// should be used as the end offset to initialize a <seealso cref="SliceReader"/> once
+            /// should be used as the end offset to initialize a <see cref="SliceReader"/> once
             /// this slice is fully written or to reset the this writer if another slice
             /// needs to be written.
             /// </summary>
@@ -373,7 +375,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A <seealso cref="SliceReader"/> that can read int slices written by a <seealso cref="SliceWriter"/>
+        /// A <see cref="SliceReader"/> that can read <see cref="int"/> slices written by a <see cref="SliceWriter"/>.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public sealed class SliceReader
@@ -388,7 +391,7 @@ namespace Lucene.Net.Util
             private int end;
 
             /// <summary>
-            /// Creates a new <seealso cref="SliceReader"/> on the given pool
+            /// Creates a new <see cref="SliceReader"/> on the given pool.
             /// </summary>
             public SliceReader(Int32BlockPool pool)
             {
@@ -396,7 +399,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Resets the reader to a slice give the slices absolute start and end offset in the pool
+            /// Resets the reader to a slice give the slices absolute start and end offset in the pool.
             /// </summary>
             public void Reset(int startOffset, int endOffset)
             {
@@ -422,8 +425,8 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Returns <code>true</code> iff the current slice is fully read. If this
-            /// method returns <code>true</code> <seealso cref="SliceReader#readInt()"/> should not
+            /// Returns <c>true</c> if the current slice is fully read. If this
+            /// method returns <c>true</c> <seealso cref="SliceReader.ReadInt32()"/> should not
             /// be called again on this slice.
             /// </summary>
             public bool IsEndOfSlice
@@ -436,11 +439,11 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Reads the next int from the current slice and returns it. 
+            /// Reads the next <see cref="int"/> from the current slice and returns it. 
             /// <para/>
             /// NOTE: This was readInt() in Lucene
             /// </summary>
-            /// <seealso cref= SliceReader#endOfSlice() </seealso>
+            /// <seealso cref="SliceReader.IsEndOfSlice"/>
             public int ReadInt32()
             {
                 Debug.Assert(!IsEndOfSlice);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/IntroSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IntroSorter.cs b/src/Lucene.Net/Util/IntroSorter.cs
index ea8df37..488443d 100644
--- a/src/Lucene.Net/Util/IntroSorter.cs
+++ b/src/Lucene.Net/Util/IntroSorter.cs
@@ -20,12 +20,13 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// <seealso cref="Sorter"/> implementation based on a variant of the quicksort algorithm
+    /// <see cref="Sorter"/> implementation based on a variant of the quicksort algorithm
     /// called <a href="http://en.wikipedia.org/wiki/Introsort">introsort</a>: when
     /// the recursion level exceeds the log of the length of the array to sort, it
-    /// falls back to heapsort. this prevents quicksort from running into its
+    /// falls back to heapsort. This prevents quicksort from running into its
     /// worst-case quadratic runtime. Small arrays are sorted with
     /// insertion sort.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class IntroSorter : Sorter
@@ -37,11 +38,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Create a new <seealso cref="IntroSorter"/>. </summary>
+        /// Create a new <see cref="IntroSorter"/>. </summary>
         public IntroSorter()
         {
         }
 
+        /// <summary>
+        /// Sort the slice which starts at <paramref name="from"/> (inclusive) and ends at
+        /// <paramref name="to"/> (exclusive).
+        /// </summary>
         public override sealed void Sort(int from, int to)
         {
             CheckRange(from, to);
@@ -109,14 +114,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Save the value at slot <code>i</code> so that it can later be used as a
-        /// pivot, see <seealso cref="#comparePivot(int)"/>.
+        /// Save the value at slot <paramref name="i"/> so that it can later be used as a
+        /// pivot, see <see cref="ComparePivot(int)"/>.
         /// </summary>
         protected abstract void SetPivot(int i);
 
         /// <summary>
-        /// Compare the pivot with the slot at <code>j</code>, similarly to
-        ///  <seealso cref="#compare(int, int) compare(i, j)"/>.
+        /// Compare the pivot with the slot at <paramref name="j"/>, similarly to
+        /// Compare(i, j) (<see cref="Sorter.Compare(int, int)"/>).
         /// </summary>
         protected abstract int ComparePivot(int j);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/IntsRef.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IntsRef.cs b/src/Lucene.Net/Util/IntsRef.cs
index d98fc62..9a37b34 100644
--- a/src/Lucene.Net/Util/IntsRef.cs
+++ b/src/Lucene.Net/Util/IntsRef.cs
@@ -25,12 +25,12 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Represents <see cref="T:int[]"/>, as a slice (offset + length) into an
-    /// existing <see cref="T:int[]"/>.  The <see cref="Int32s"/> member should never be null; use
+    /// existing <see cref="T:int[]"/>.  The <see cref="Int32s"/> member should never be <c>null</c>; use
     /// <see cref="EMPTY_INT32S"/> if necessary.
     /// <para/>
     /// NOTE: This was IntsRef in Lucene
-    /// 
-    ///  @lucene.internal
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -38,14 +38,14 @@ namespace Lucene.Net.Util
     public sealed class Int32sRef : IComparable<Int32sRef>
     {
         /// <summary>
-        /// An empty integer array for convenience
+        /// An empty integer array for convenience.
         /// <para/>
         /// NOTE: This was EMPTY_INTS in Lucene
         /// </summary>
         public static readonly int[] EMPTY_INT32S = new int[0];
 
         /// <summary>
-        /// The contents of the IntsRef. Should never be {@code null}. 
+        /// The contents of the <see cref="Int32sRef"/>. Should never be <c>null</c>. 
         /// <para/>
         /// NOTE: This was ints (field) in Lucene
         /// </summary>
@@ -70,18 +70,18 @@ namespace Lucene.Net.Util
         public int Offset { get; set; }
 
         /// <summary>
-        /// Length of used ints. </summary>
+        /// Length of used <see cref="int"/>s. </summary>
         public int Length { get; set; }
 
         /// <summary>
-        /// Create a IntsRef with <see cref="EMPTY_INT32S"/> </summary>
+        /// Create a <see cref="Int32sRef"/> with <see cref="EMPTY_INT32S"/>. </summary>
         public Int32sRef()
         {
             ints = EMPTY_INT32S;
         }
 
         /// <summary>
-        /// Create a IntsRef pointing to a new array of size <code>capacity</code>.
+        /// Create a <see cref="Int32sRef"/> pointing to a new array of size <paramref name="capacity"/>.
         /// Offset and length will both be zero.
         /// </summary>
         public Int32sRef(int capacity)
@@ -90,8 +90,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this instance will directly reference ints w/o making a copy.
-        /// ints should not be null.
+        /// This instance will directly reference <paramref name="ints"/> w/o making a copy.
+        /// <paramref name="ints"/> should not be <c>null</c>.
         /// </summary>
         public Int32sRef(int[] ints, int offset, int length)
         {
@@ -102,11 +102,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a shallow clone of this instance (the underlying ints are
+        /// Returns a shallow clone of this instance (the underlying <see cref="int"/>s are
         /// <b>not</b> copied and will be shared by both the returned object and this
         /// object.
         /// </summary>
-        /// <seealso cref= #deepCopyOf </seealso>
+        /// <seealso cref="DeepCopyOf(Int32sRef)"/>
         public object Clone()
         {
             return new Int32sRef(ints, Offset, Length);
@@ -163,7 +163,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Signed int order comparison </summary>
+        /// Signed <see cref="int"/> order comparison. </summary>
         public int CompareTo(Int32sRef other)
         {
             if (this == other)
@@ -212,8 +212,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Used to grow the reference array.
-        ///
+        /// <para/>
         /// In general this should not be used as it does not take the offset into account.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public void Grow(int newLength)
@@ -243,10 +244,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new IntsRef that points to a copy of the ints from
-        /// <code>other</code>
-        /// <p>
-        /// The returned IntsRef will have a length of other.length
+        /// Creates a new <see cref="Int32sRef"/> that points to a copy of the <see cref="int"/>s from
+        /// <paramref name="other"/>
+        /// <para/>
+        /// The returned <see cref="Int32sRef"/> will have a length of <c>other.Length</c>
         /// and an offset of zero.
         /// </summary>
         public static Int32sRef DeepCopyOf(Int32sRef other)
@@ -258,7 +259,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Performs internal consistency checks.
-        /// Always returns true (or throws InvalidOperationException)
+        /// Always returns true (or throws <see cref="InvalidOperationException"/>)
         /// </summary>
         public bool IsValid()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/LongBitSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/LongBitSet.cs b/src/Lucene.Net/Util/LongBitSet.cs
index 6bbde98..f1d6644 100644
--- a/src/Lucene.Net/Util/LongBitSet.cs
+++ b/src/Lucene.Net/Util/LongBitSet.cs
@@ -22,12 +22,12 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// BitSet of fixed length (numBits), backed by accessible (<seealso cref="#getBits"/>)
-    /// long[], accessed with a long index. Use it only if you intend to store more
-    /// than 2.1B bits, otherwise you should use <seealso cref="FixedBitSet"/>.
+    /// BitSet of fixed length (<see cref="numBits"/>), backed by accessible (<see cref="GetBits()"/>)
+    /// <see cref="T:long[]"/>, accessed with a <see cref="long"/> index. Use it only if you intend to store more
+    /// than 2.1B bits, otherwise you should use <see cref="FixedBitSet"/>.
     /// <para/>
     /// NOTE: This was LongBitSet in Lucene
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -40,14 +40,14 @@ namespace Lucene.Net.Util
         private readonly int numWords;
 
         /// <summary>
-        /// If the given <seealso cref="Int64BitSet"/> is large enough to hold
-        /// {@code numBits}, returns the given bits, otherwise returns a new
-        /// <seealso cref="Int64BitSet"/> which can hold the requested number of bits.
+        /// If the given <see cref="Int64BitSet"/> is large enough to hold
+        /// <paramref name="numBits"/>, returns the given <paramref name="bits"/>, otherwise returns a new
+        /// <see cref="Int64BitSet"/> which can hold the requested number of bits.
         ///
-        /// <p>
-        /// <b>NOTE:</b> the returned bitset reuses the underlying {@code long[]} of
-        /// the given {@code bits} if possible. Also, calling <seealso cref="#length()"/> on the
-        /// returned bits may return a value greater than {@code numBits}.
+        /// <para/>
+        /// <b>NOTE:</b> the returned bitset reuses the underlying <see cref="T:long[]"/> of
+        /// the given <paramref name="bits"/> if possible. Also, reading <see cref="Length"/> on the
+        /// returned bits may return a value greater than <paramref name="numBits"/>.
         /// </summary>
         public static Int64BitSet EnsureCapacity(Int64BitSet bits, long numBits)
         {
@@ -68,7 +68,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns the number of 64 bit words it would take to hold numBits </summary>
+        /// Returns the number of 64 bit words it would take to hold <paramref name="numBits"/>. </summary>
         public static int Bits2words(long numBits)
         {
             int numLong = (int)((long)((ulong)numBits >> 6));
@@ -114,8 +114,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns number of set bits.  NOTE: this visits every
-        ///  long in the backing bits array, and the result is not
-        ///  internally cached!
+        /// long in the backing bits array, and the result is not
+        /// internally cached!
         /// </summary>
         public long Cardinality()
         {
@@ -174,8 +174,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the index of the first set bit starting at the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// Returns the index of the first set bit starting at the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public long NextSetBit(long index)
         {
@@ -202,8 +202,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the index of the last set bit before or on the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// Returns the index of the last set bit before or on the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public long PrevSetBit(long index)
         {
@@ -254,7 +254,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns true if the sets have any elements in common </summary>
+        /// Returns <c>true</c> if the sets have any elements in common </summary>
         public bool Intersects(Int64BitSet other)
         {
             int pos = Math.Min(numWords, other.numWords);
@@ -301,8 +301,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Flips a range of bits
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to flip </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to flip </param>
         public void Flip(long startIndex, long endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits);
@@ -346,8 +346,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Sets a range of bits
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to set </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to set </param>
         public void Set(long startIndex, long endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits);
@@ -377,8 +377,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Clears a range of bits.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to clear </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to clear </param>
         public void Clear(long startIndex, long endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits);
@@ -420,7 +420,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns true if both sets have the same bits set </summary>
+        /// Returns <c>true</c> if both sets have the same bits set </summary>
         public override bool Equals(object o)
         {
             if (this == o)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/LongValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/LongValues.cs b/src/Lucene.Net/Util/LongValues.cs
index ce1335f..93e5873 100644
--- a/src/Lucene.Net/Util/LongValues.cs
+++ b/src/Lucene.Net/Util/LongValues.cs
@@ -21,21 +21,23 @@ namespace Lucene.Net.Util
     using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
 
     /// <summary>
-    /// Abstraction over an array of longs.
-    ///  this class extends NumericDocValues so that we don't need to add another
-    ///  level of abstraction every time we want eg. to use the <seealso cref="PackedInt32s"/>
-    ///  utility classes to represent a <seealso cref="NumericDocValues"/> instance.
+    /// Abstraction over an array of <see cref="long"/>s.
+    /// This class extends <see cref="NumericDocValues"/> so that we don't need to add another
+    /// level of abstraction every time we want eg. to use the <see cref="PackedInt32s"/>
+    /// utility classes to represent a <see cref="NumericDocValues"/> instance.
     /// <para/>
     /// NOTE: This was LongValues in Lucene
-    /// 
-    ///  @lucene.internal
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
     public abstract class Int64Values : NumericDocValues
     {
         /// <summary>
-        /// Get value at <code>index</code>. </summary>
+        /// Get value at <paramref name="index"/>. </summary>
         public abstract long Get(long index);
 
+        /// <summary>
+        /// Get value at <paramref name="idx"/>. </summary>
         public override long Get(int idx)
         {
             return Get((long)idx);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/LongsRef.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/LongsRef.cs b/src/Lucene.Net/Util/LongsRef.cs
index 9fb7679..f5a0c4f 100644
--- a/src/Lucene.Net/Util/LongsRef.cs
+++ b/src/Lucene.Net/Util/LongsRef.cs
@@ -25,12 +25,12 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Represents <see cref="T:long[]"/>, as a slice (offset + length) into an
-    /// existing <see cref="T:long[]"/>.  The <see cref="Int64s"/> member should never be null; use
+    /// existing <see cref="T:long[]"/>.  The <see cref="Int64s"/> member should never be <c>null</c>; use
     /// <see cref="EMPTY_INT64S"/> if necessary.
     /// <para/>
     /// NOTE: This was LongsRef in Lucene
-    ///
-    ///  @lucene.internal
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -38,14 +38,14 @@ namespace Lucene.Net.Util
     public sealed class Int64sRef : IComparable<Int64sRef>
     {
         /// <summary>
-        /// An empty long array for convenience
+        /// An empty <see cref="long"/> array for convenience
         /// <para/>
         /// NOTE: This was EMPTY_LONGS in Lucene
         /// </summary>
         public static readonly long[] EMPTY_INT64S = new long[0];
 
         /// <summary>
-        /// The contents of the LongsRef. Should never be {@code null}. 
+        /// The contents of the <see cref="Int64sRef"/>. Should never be <c>null</c>. 
         /// <para/>
         /// NOTE: This was longs (field) in Lucene
         /// </summary>
@@ -74,14 +74,14 @@ namespace Lucene.Net.Util
         public int Length { get; set; }
 
         /// <summary>
-        /// Create a LongsRef with <see cref="EMPTY_INT64S"/> </summary>
+        /// Create a <see cref="Int64sRef"/> with <see cref="EMPTY_INT64S"/> </summary>
         public Int64sRef()
         {
             longs = EMPTY_INT64S;
         }
 
         /// <summary>
-        /// Create a LongsRef pointing to a new array of size <code>capacity</code>.
+        /// Create a <see cref="Int64sRef"/> pointing to a new array of size <paramref name="capacity"/>.
         /// Offset and length will both be zero.
         /// </summary>
         public Int64sRef(int capacity)
@@ -90,8 +90,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this instance will directly reference longs w/o making a copy.
-        /// longs should not be null
+        /// This instance will directly reference <paramref name="longs"/> w/o making a copy.
+        /// <paramref name="longs"/> should not be <c>null</c>.
         /// </summary>
         public Int64sRef(long[] longs, int offset, int length)
         {
@@ -102,11 +102,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a shallow clone of this instance (the underlying longs are
+        /// Returns a shallow clone of this instance (the underlying <see cref="long"/>s are
         /// <b>not</b> copied and will be shared by both the returned object and this
         /// object.
         /// </summary>
-        /// <seealso cref= #deepCopyOf </seealso>
+        /// <seealso cref="DeepCopyOf(Int64sRef)"/>
         public object Clone()
         {
             return new Int64sRef(longs, Offset, Length);
@@ -163,7 +163,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Signed int order comparison </summary>
+        /// Signed <see cref="int"/> order comparison </summary>
         public int CompareTo(Int64sRef other)
         {
             if (this == other)
@@ -212,8 +212,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Used to grow the reference array.
-        ///
+        /// <para/>
         /// In general this should not be used as it does not take the offset into account.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public void Grow(int newLength)
@@ -243,10 +244,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new IntsRef that points to a copy of the longs from
-        /// <code>other</code>
-        /// <p>
-        /// The returned IntsRef will have a length of other.length
+        /// Creates a new <see cref="Int64sRef"/> that points to a copy of the <see cref="long"/>s from
+        /// <paramref name="other"/>.
+        /// <para/>
+        /// The returned <see cref="Int64sRef"/> will have a length of <c>other.Length</c>
         /// and an offset of zero.
         /// </summary>
         public static Int64sRef DeepCopyOf(Int64sRef other)
@@ -258,7 +259,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Performs internal consistency checks.
-        /// Always returns true (or throws InvalidOperationException)
+        /// Always returns <c>true</c> (or throws <see cref="InvalidOperationException"/>)
         /// </summary>
         public bool IsValid()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/MapOfSets.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/MapOfSets.cs b/src/Lucene.Net/Util/MapOfSets.cs
index 0cf1549..df4ce9d 100644
--- a/src/Lucene.Net/Util/MapOfSets.cs
+++ b/src/Lucene.Net/Util/MapOfSets.cs
@@ -21,20 +21,21 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Helper class for keeping Lists of Objects associated with keys. <b>WARNING: this CLASS IS NOT THREAD SAFE</b>
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class MapOfSets<TKey, TValue>
     {
-        private readonly IDictionary<TKey, HashSet<TValue>> theMap;
+        private readonly IDictionary<TKey, HashSet<TValue>> theMap; // LUCENENET TODO: API Change to ISet
 
-        /// <param name="m"> the backing store for this object </param>
-        public MapOfSets(IDictionary<TKey, HashSet<TValue>> m)
+        /// <param name="m"> The backing store for this object. </param>
+        public MapOfSets(IDictionary<TKey, HashSet<TValue>> m) // LUCENENET TODO: API Change to ISet
         {
             theMap = m;
         }
 
-        /// <returns> direct access to the map backing this object. </returns>
-        public virtual IDictionary<TKey, HashSet<TValue>> Map
+        /// <returns> Direct access to the map backing this object. </returns>
+        public virtual IDictionary<TKey, HashSet<TValue>> Map // LUCENENET TODO: API Change to ISet
         {
             get
             {
@@ -43,12 +44,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds val to the Set associated with key in the Map.  If key is not
-        /// already in the map, a new Set will first be created. </summary>
-        /// <returns> the size of the Set associated with key once val is added to it. </returns>
+        /// Adds <paramref name="val"/> to the <see cref="ISet{T}"/> associated with key in the <see cref="IDictionary{TKey, TValue}"/>.  
+        /// If <paramref name="key"/> is not
+        /// already in the map, a new <see cref="ISet{T}"/> will first be created. </summary>
+        /// <returns> The size of the <see cref="ISet{T}"/> associated with key once val is added to it. </returns>
         public virtual int Put(TKey key, TValue val)
         {
-            HashSet<TValue> theSet;
+            HashSet<TValue> theSet; // LUCENENET TODO: API Change to ISet
             if (theMap.ContainsKey(key))
             {
                 theSet = theMap[key];
@@ -63,13 +65,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds multiple vals to the Set associated with key in the Map.
-        /// If key is not
-        /// already in the map, a new Set will first be created. </summary>
-        /// <returns> the size of the Set associated with key once val is added to it. </returns>
+        /// Adds multiple <paramref name="vals"/> to the <see cref="ISet{T}"/> associated with key in the <see cref="IDictionary{TKey, TValue}"/>.
+        /// If <paramref name="key"/> is not
+        /// already in the map, a new <see cref="ISet{T}"/> will first be created. </summary>
+        /// <returns> The size of the <see cref="ISet{T}"/> associated with key once val is added to it. </returns>
         public virtual int PutAll(TKey key, IEnumerable<TValue> vals)
         {
-            HashSet<TValue> theSet;
+            HashSet<TValue> theSet; // LUCENENET TODO: API Change to ISet
             if (theMap.ContainsKey(key))
             {
                 theSet = theMap[key];

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/MathUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/MathUtil.cs b/src/Lucene.Net/Util/MathUtil.cs
index 4ea0a6d..12aa41d 100644
--- a/src/Lucene.Net/Util/MathUtil.cs
+++ b/src/Lucene.Net/Util/MathUtil.cs
@@ -31,8 +31,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns {@code x <= 0 ? 0 : Math.floor(Math.log(x) / Math.log(base))} </summary>
-        /// <param name="base"> must be {@code > 1} </param>
+        /// Returns <c>x &lt;= 0 ? 0 : Math.Floor(Math.Log(x) / Math.Log(base))</c>. </summary>
+        /// <param name="base"> Must be <c>&gt; 1</c>.</param>
         public static int Log(long x, int @base)
         {
             if (@base <= 1)
@@ -49,7 +49,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Calculates logarithm in a given base with doubles.
+        /// Calculates logarithm in a given <paramref name="base"/> with doubles.
         /// </summary>
         public static double Log(double @base, double x)
         {
@@ -57,13 +57,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Return the greatest common divisor of <code>a</code> and <code>b</code>,
-        ///  consistently with <seealso cref="BigInteger#gcd(BigInteger)"/>.
-        ///  <p><b>NOTE</b>: A greatest common divisor must be positive, but
-        ///  <code>2^64</code> cannot be expressed as a long although it
-        ///  is the GCD of <seealso cref="Long#MIN_VALUE"/> and <code>0</code> and the GCD of
-        ///  <seealso cref="Long#MIN_VALUE"/> and <seealso cref="Long#MIN_VALUE"/>. So in these 2 cases,
-        ///  and only them, this method will return <seealso cref="Long#MIN_VALUE"/>.
+        /// Return the greatest common divisor of <paramref name="a"/> and <paramref name="b"/>,
+        /// consistently with <c>System.Numerics.BigInteger.GreatestCommonDivisor(System.Numerics.BigInteger, System.Numerics.BigInteger)</c>.
+        /// <para/><b>NOTE</b>: A greatest common divisor must be positive, but
+        /// <c>2^64</c> cannot be expressed as a <see cref="long"/> although it
+        /// is the GCD of <see cref="long.MinValue"/> and <c>0</c> and the GCD of
+        /// <see cref="long.MinValue"/> and <see cref="long.MinValue"/>. So in these 2 cases,
+        /// and only them, this method will return <see cref="long.MinValue"/>.
         /// </summary>
         // see http://en.wikipedia.org/wiki/Binary_GCD_algorithm#Iterative_version_in_C.2B.2B_using_ctz_.28count_trailing_zeros.29
         public static long Gcd(long a, long b)
@@ -104,14 +104,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Calculates inverse hyperbolic sine of a {@code double} value.
-        /// <p>
+        /// Calculates inverse hyperbolic sine of a <see cref="double"/> value.
+        /// <para/>
         /// Special cases:
-        /// <ul>
-        ///    <li>If the argument is NaN, then the result is NaN.
-        ///    <li>If the argument is zero, then the result is a zero with the same sign as the argument.
-        ///    <li>If the argument is infinite, then the result is infinity with the same sign as the argument.
-        /// </ul>
+        /// <list type="bullet">
+        ///    <item><description>If the argument is NaN, then the result is NaN.</description></item>
+        ///    <item><description>If the argument is zero, then the result is a zero with the same sign as the argument.</description></item>
+        ///    <item><description>If the argument is infinite, then the result is infinity with the same sign as the argument.</description></item>
+        /// </list>
         /// </summary>
         public static double Asinh(double a)
         {
@@ -131,15 +131,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Calculates inverse hyperbolic cosine of a {@code double} value.
-        /// <p>
+        /// Calculates inverse hyperbolic cosine of a <see cref="double"/> value.
+        /// <para/>
         /// Special cases:
-        /// <ul>
-        ///    <li>If the argument is NaN, then the result is NaN.
-        ///    <li>If the argument is +1, then the result is a zero.
-        ///    <li>If the argument is positive infinity, then the result is positive infinity.
-        ///    <li>If the argument is less than 1, then the result is NaN.
-        /// </ul>
+        /// <list type="bullet">
+        ///    <item><description>If the argument is NaN, then the result is NaN.</description></item>
+        ///    <item><description>If the argument is +1, then the result is a zero.</description></item>
+        ///    <item><description>If the argument is positive infinity, then the result is positive infinity.</description></item>
+        ///    <item><description>If the argument is less than 1, then the result is NaN.</description></item>
+        /// </list>
         /// </summary>
         public static double Acosh(double a)
         {
@@ -147,16 +147,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Calculates inverse hyperbolic tangent of a {@code double} value.
-        /// <p>
+        /// Calculates inverse hyperbolic tangent of a <see cref="double"/> value.
+        /// <para/>
         /// Special cases:
-        /// <ul>
-        ///    <li>If the argument is NaN, then the result is NaN.
-        ///    <li>If the argument is zero, then the result is a zero with the same sign as the argument.
-        ///    <li>If the argument is +1, then the result is positive infinity.
-        ///    <li>If the argument is -1, then the result is negative infinity.
-        ///    <li>If the argument's absolute value is greater than 1, then the result is NaN.
-        /// </ul>
+        /// <list type="bullet">
+        ///    <item><description>If the argument is NaN, then the result is NaN.</description></item>
+        ///    <item><description>If the argument is zero, then the result is a zero with the same sign as the argument.</description></item>
+        ///    <item><description>If the argument is +1, then the result is positive infinity.</description></item>
+        ///    <item><description>If the argument is -1, then the result is negative infinity.</description></item>
+        ///    <item><description>If the argument's absolute value is greater than 1, then the result is NaN.</description></item>
+        /// </list>
         /// </summary>
         public static double Atanh(double a)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/MergedIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/MergedIterator.cs b/src/Lucene.Net/Util/MergedIterator.cs
index 82f7951..668bfdf 100644
--- a/src/Lucene.Net/Util/MergedIterator.cs
+++ b/src/Lucene.Net/Util/MergedIterator.cs
@@ -24,24 +24,25 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Provides a merged sorted view from several sorted iterators.
-    /// <p>
-    /// If built with <code>removeDuplicates</code> set to true and an element
+    /// <para/>
+    /// If built with <see cref="removeDuplicates"/> set to <c>true</c> and an element
     /// appears in multiple iterators then it is deduplicated, that is this iterator
     /// returns the sorted union of elements.
-    /// <p>
-    /// If built with <code>removeDuplicates</code> set to false then all elements
+    /// <para/>
+    /// If built with <see cref="removeDuplicates"/> set to <c>false</c> then all elements
     /// in all iterators are returned.
-    /// <p>
+    /// <para/>
     /// Caveats:
-    /// <ul>
-    ///   <li>The behavior is undefined if the iterators are not actually sorted.
-    ///   <li>Null elements are unsupported.
-    ///   <li>If removeDuplicates is set to true and if a single iterator contains
-    ///       duplicates then they will not be deduplicated.
-    ///   <li>When elements are deduplicated it is not defined which one is returned.
-    ///   <li>If removeDuplicates is set to false then the order in which duplicates
-    ///       are returned isn't defined.
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description>The behavior is undefined if the iterators are not actually sorted.</description></item>
+    ///   <item><description>Null elements are unsupported.</description></item>
+    ///   <item><description>If <see cref="removeDuplicates"/> is set to <c>true</c> and if a single iterator contains
+    ///       duplicates then they will not be deduplicated.</description></item>
+    ///   <item><description>When elements are deduplicated it is not defined which one is returned.</description></item>
+    ///   <item><description>If <see cref="removeDuplicates"/> is set to <c>false</c> then the order in which duplicates
+    ///       are returned isn't defined.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class MergedIterator<T> : IEnumerator<T>
@@ -151,112 +152,6 @@ namespace Lucene.Net.Util
             numTop = 0;
         }
 
-        /*private T Current;
-        private readonly TermMergeQueue<T> Queue;
-        private readonly SubIterator<T>[] Top;
-        private readonly bool RemoveDuplicates;
-        private int NumTop;
-
-        public MergedIterator(params IEnumerator<T>[] iterators) : this(true, iterators)
-        {
-        }
-
-        public MergedIterator(bool removeDuplicates, params IEnumerator<T>[] iterators)
-        {
-          this.RemoveDuplicates = removeDuplicates;
-          Queue = new TermMergeQueue<T>(iterators.Length);
-          Top = new SubIterator[iterators.Length];
-          int index = 0;
-          foreach (IEnumerator<T> iterator in iterators)
-          {
-            if (iterator.HasNext())
-            {
-              SubIterator<T> sub = new SubIterator<T>();
-              sub.Current = iterator.next();
-              sub.Iterator = iterator;
-              sub.Index = index++;
-              Queue.Add(sub);
-            }
-          }
-        }
-
-        public override bool HasNext()
-        {
-          if (Queue.Size() > 0)
-          {
-            return true;
-          }
-
-          for (int i = 0; i < NumTop; i++)
-          {
-            if (Top[i].Iterator.hasNext())
-            {
-              return true;
-            }
-          }
-          return false;
-        }
-
-        public override T Next()
-        {
-          // restore queue
-          PushTop();
-
-          // gather equal top elements
-          if (Queue.Size() > 0)
-          {
-            PullTop();
-          }
-          else
-          {
-            Current = default(T);
-          }
-          if (Current == null)
-          {
-            throw new NoSuchElementException();
-          }
-          return Current;
-        }
-
-        public override void Remove()
-        {
-          throw new System.NotSupportedException();
-        }
-
-        private void PullTop()
-        {
-          Debug.Assert(NumTop == 0);
-          Top[NumTop++] = Queue.Pop();
-          if (RemoveDuplicates)
-          {
-            // extract all subs from the queue that have the same top element
-            while (Queue.Size() != 0 && Queue.Top().Current.Equals(Top[0].Current))
-            {
-              Top[NumTop++] = Queue.Pop();
-            }
-          }
-          Current = Top[0].Current;
-        }
-
-        private void PushTop()
-        {
-          // call next() on each top, and put back into queue
-          for (int i = 0; i < NumTop; i++)
-          {
-            if (Top[i].Iterator.hasNext())
-            {
-              Top[i].Current = Top[i].Iterator.next();
-              Queue.Add(Top[i]);
-            }
-            else
-            {
-              // no more elements
-              Top[i].Current = default(T);
-            }
-          }
-          NumTop = 0;
-        }*/
-
         private class SubIterator<I>
             where I : IComparable<I>
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/MutableBits.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/MutableBits.cs b/src/Lucene.Net/Util/MutableBits.cs
index a03aff7..117a707 100644
--- a/src/Lucene.Net/Util/MutableBits.cs
+++ b/src/Lucene.Net/Util/MutableBits.cs
@@ -18,13 +18,13 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Extension of Bits for live documents.
+    /// Extension of <see cref="IBits"/> for live documents.
     /// </summary>
     public interface IMutableBits : IBits
     {
         /// <summary>
-        /// Sets the bit specified by <code>index</code> to false. </summary>
-        /// <param name="index"> index, should be non-negative and &lt; <seealso cref="#length()"/>.
+        /// Sets the bit specified by <paramref name="index"/> to <c>false</c>. </summary>
+        /// <param name="index"> index, should be non-negative and &lt; <see cref="IBits.Length"/>.
         ///        The result of passing negative or out of bounds values is undefined
         ///        by this interface, <b>just don't do it!</b> </param>
         void Clear(int index);


[33/48] lucenenet git commit: Lucene.Net.Codecs.PerField: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.PerField: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/82141057
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/82141057
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/82141057

Branch: refs/heads/master
Commit: 8214105762280724576084f3177425cc86754fd3
Parents: a08ae94
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 05:31:16 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:39 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  3 +-
 .../Codecs/PerField/PerFieldDocValuesFormat.cs  | 35 +++++++++++---------
 .../Codecs/PerField/PerFieldPostingsFormat.cs   | 35 +++++++++++---------
 3 files changed, 39 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/82141057/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index dafe5a8..21694de 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -58,8 +58,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
    5. Codecs.Lucene42 (namespace)
    6. Codecs.Lucene45 (namespace)
    7. Codecs.Lucene46 (namespace)
-   8. Codecs.PerField (namespace)
-   9. Util.Packed (namespace)
+   8. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/82141057/src/Lucene.Net/Codecs/PerField/PerFieldDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PerField/PerFieldDocValuesFormat.cs b/src/Lucene.Net/Codecs/PerField/PerFieldDocValuesFormat.cs
index daf7f84..45a0517 100644
--- a/src/Lucene.Net/Codecs/PerField/PerFieldDocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/PerField/PerFieldDocValuesFormat.cs
@@ -37,18 +37,21 @@ namespace Lucene.Net.Codecs.PerField
 
     /// <summary>
     /// Enables per field docvalues support.
-    /// <p>
-    /// Note, when extending this class, the name (<seealso cref="#getName"/>) is
+    /// <para/>
+    /// Note, when extending this class, the name (<see cref="DocValuesFormat.Name"/>) is
     /// written into the index. In order for the field to be read, the
-    /// name must resolve to your implementation via <seealso cref="#forName(String)"/>.
-    /// this method uses Java's
-    /// <seealso cref="ServiceLoader Service Provider Interface"/> to resolve format names.
-    /// <p>
+    /// name must resolve to your implementation via <see cref="DocValuesFormat.ForName(string)"/>.
+    /// This method uses <see cref="IDocValuesFormatFactory.GetDocValuesFormat(string)"/> to resolve format names.
+    /// See <see cref="DefaultDocValuesFormatFactory"/> for information about how to implement your own <see cref="DocValuesFormat"/>.
+    /// <para/>
     /// Files written by each docvalues format have an additional suffix containing the
-    /// format name. For example, in a per-field configuration instead of <tt>_1.dat</tt>
-    /// filenames would look like <tt>_1_Lucene40_0.dat</tt>. </summary>
-    /// <seealso cref= ServiceLoader
-    /// @lucene.experimental </seealso>
+    /// format name. For example, in a per-field configuration instead of <c>_1.dat</c>
+    /// filenames would look like <c>_1_Lucene40_0.dat</c>. 
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="IDocValuesFormatFactory"/>
+    /// <seealso cref="DefaultDocValuesFormatFactory"/>
     [DocValuesFormatName("PerFieldDV40")]
     public abstract class PerFieldDocValuesFormat : DocValuesFormat
     {
@@ -58,14 +61,14 @@ namespace Lucene.Net.Codecs.PerField
         //public static readonly string PER_FIELD_NAME = "PerFieldDV40";
 
         /// <summary>
-        /// <seealso cref="FieldInfo"/> attribute name used to store the
-        ///  format name for each field.
+        /// <see cref="FieldInfo"/> attribute name used to store the
+        /// format name for each field.
         /// </summary>
         public static readonly string PER_FIELD_FORMAT_KEY = typeof(PerFieldDocValuesFormat).Name + ".format";
 
         /// <summary>
-        /// <seealso cref="FieldInfo"/> attribute name used to store the
-        ///  segment suffix name for each field.
+        /// <see cref="FieldInfo"/> attribute name used to store the
+        /// segment suffix name for each field.
         /// </summary>
         public static readonly string PER_FIELD_SUFFIX_KEY = typeof(PerFieldDocValuesFormat).Name + ".suffix";
 
@@ -395,8 +398,8 @@ namespace Lucene.Net.Codecs.PerField
 
         /// <summary>
         /// Returns the doc values format that should be used for writing
-        /// new segments of <code>field</code>.
-        /// <p>
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
         /// The field to format mapping is written to the index, so
         /// this method is only invoked when writing, not when reading.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/82141057/src/Lucene.Net/Codecs/PerField/PerFieldPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PerField/PerFieldPostingsFormat.cs b/src/Lucene.Net/Codecs/PerField/PerFieldPostingsFormat.cs
index c214df8..66a0734 100644
--- a/src/Lucene.Net/Codecs/PerField/PerFieldPostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/PerField/PerFieldPostingsFormat.cs
@@ -31,18 +31,21 @@ namespace Lucene.Net.Codecs.PerField
 
     /// <summary>
     /// Enables per field postings support.
-    /// <p>
-    /// Note, when extending this class, the name (<seealso cref="#getName"/>) is
+    /// <para/>
+    /// Note, when extending this class, the name (<see cref="PostingsFormat.Name"/>) is
     /// written into the index. In order for the field to be read, the
-    /// name must resolve to your implementation via <seealso cref="#forName(String)"/>.
-    /// this method uses Java's
-    /// <seealso cref="ServiceLoader Service Provider Interface"/> to resolve format names.
-    /// <p>
+    /// name must resolve to your implementation via <see cref="PostingsFormat.ForName(string)"/>.
+    /// This method uses <see cref="IPostingsFormatFactory.GetPostingsFormat(string)"/> to resolve format names.
+    /// See <see cref="DefaultPostingsFormatFactory"/> for information about how to implement your own <see cref="PostingsFormat"/>.
+    /// <para/>
     /// Files written by each posting format have an additional suffix containing the
-    /// format name. For example, in a per-field configuration instead of <tt>_1.prx</tt>
-    /// filenames would look like <tt>_1_Lucene40_0.prx</tt>. </summary>
-    /// <seealso cref= ServiceLoader
-    /// @lucene.experimental </seealso>
+    /// format name. For example, in a per-field configuration instead of <c>_1.prx</c>
+    /// filenames would look like <c>_1_Lucene40_0.prx</c>. 
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
+    /// <seealso cref="IPostingsFormatFactory"/>
+    /// <seealso cref="DefaultPostingsFormatFactory"/>
     [PostingsFormatName("PerField40")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public abstract class PerFieldPostingsFormat : PostingsFormat
     {
@@ -52,14 +55,14 @@ namespace Lucene.Net.Codecs.PerField
         //public static readonly string PER_FIELD_NAME = "PerField40";
 
         /// <summary>
-        /// <seealso cref="FieldInfo"/> attribute name used to store the
-        ///  format name for each field.
+        /// <see cref="FieldInfo"/> attribute name used to store the
+        /// format name for each field.
         /// </summary>
         public static readonly string PER_FIELD_FORMAT_KEY = typeof(PerFieldPostingsFormat).Name + ".format";
 
         /// <summary>
-        /// <seealso cref="FieldInfo"/> attribute name used to store the
-        ///  segment suffix name for each field.
+        /// <see cref="FieldInfo"/> attribute name used to store the
+        /// segment suffix name for each field.
         /// </summary>
         public static readonly string PER_FIELD_SUFFIX_KEY = typeof(PerFieldPostingsFormat).Name + ".suffix";
 
@@ -292,8 +295,8 @@ namespace Lucene.Net.Codecs.PerField
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        /// new segments of <code>field</code>.
-        /// <p>
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
         /// The field to format mapping is written to the index, so
         /// this method is only invoked when writing, not when reading.
         /// </summary>


[27/48] lucenenet git commit: Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/NumericUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/NumericUtils.cs b/src/Lucene.Net/Util/NumericUtils.cs
index 5cc67fd..6c96e16 100644
--- a/src/Lucene.Net/Util/NumericUtils.cs
+++ b/src/Lucene.Net/Util/NumericUtils.cs
@@ -24,38 +24,39 @@ namespace Lucene.Net.Util
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// this is a helper class to generate prefix-encoded representations for numerical values
+    /// This is a helper class to generate prefix-encoded representations for numerical values
     /// and supplies converters to represent float/double values as sortable integers/longs.
     ///
-    /// <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
+    /// <para/>To quickly execute range queries in Apache Lucene, a range is divided recursively
     /// into multiple intervals for searching: The center of the range is searched only with
     /// the lowest possible precision in the trie, while the boundaries are matched
     /// more exactly. this reduces the number of terms dramatically.
     ///
-    /// <p>this class generates terms to achieve this: First the numerical integer values need to
+    /// <para/>This class generates terms to achieve this: First the numerical integer values need to
     /// be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
     /// and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
     /// sortable like the original integer value (even using UTF-8 sort order). Each value is also
-    /// prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
+    /// prefixed (in the first char) by the <c>shift</c> value (number of bits removed) used
     /// during encoding.
     ///
-    /// <p>To also index floating point numbers, this class supplies two methods to convert them
-    /// to integer values by changing their bit layout: <seealso cref="#doubleToSortableLong"/>,
-    /// <seealso cref="#floatToSortableInt"/>. You will have no precision loss by
+    /// <para/>To also index floating point numbers, this class supplies two methods to convert them
+    /// to integer values by changing their bit layout: <see cref="DoubleToSortableInt64(double)"/>,
+    /// <see cref="SingleToSortableInt32(float)"/>. You will have no precision loss by
     /// converting floating point numbers to integers and back (only that the integer form
-    /// is not usable). Other data types like dates can easily converted to longs or ints (e.g.
-    /// date to long: <seealso cref="java.util.Date#getTime"/>).
+    /// is not usable). Other data types like dates can easily converted to <see cref="long"/>s or <see cref="int"/>s (e.g.
+    /// date to long: <see cref="DateTime.Ticks"/>).
     ///
-    /// <p>For easy usage, the trie algorithm is implemented for indexing inside
-    /// <seealso cref="NumericTokenStream"/> that can index <code>int</code>, <code>long</code>,
-    /// <code>float</code>, and <code>double</code>. For querying,
-    /// <seealso cref="NumericRangeQuery"/> and <seealso cref="NumericRangeFilter"/> implement the query part
+    /// <para/>For easy usage, the trie algorithm is implemented for indexing inside
+    /// <see cref="Analysis.NumericTokenStream"/> that can index <see cref="int"/>, <see cref="long"/>,
+    /// <see cref="float"/>, and <see cref="double"/>. For querying,
+    /// <see cref="Search.NumericRangeQuery"/> and <see cref="Search.NumericRangeFilter"/> implement the query part
     /// for the same data types.
     ///
-    /// <p>this class can also be used, to generate lexicographically sortable (according to
-    /// <seealso cref="BytesRef#getUTF8SortedAsUTF16Comparer()"/>) representations of numeric data
+    /// <para/>This class can also be used, to generate lexicographically sortable (according to
+    /// <see cref="BytesRef.UTF8SortedAsUTF16Comparer"/>) representations of numeric data
     /// types for other usages (e.g. sorting).
     ///
+    /// <para/>
     /// @lucene.internal
     /// @since 2.9, API changed non backwards-compliant in 4.0
     /// </summary>
@@ -66,10 +67,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// The default precision step used by <seealso cref="IntField"/>,
-        /// <seealso cref="FloatField"/>, <seealso cref="LongField"/>, {@link
-        /// DoubleField}, <seealso cref="NumericTokenStream"/>, {@link
-        /// NumericRangeQuery}, and <seealso cref="NumericRangeFilter"/>.
+        /// The default precision step used by <see cref="Documents.Int32Field"/>,
+        /// <see cref="Documents.SingleField"/>, <see cref="Documents.Int64Field"/>, 
+        /// <see cref="Documents.DoubleField"/>, <see cref="Analysis.NumericTokenStream"/>,
+        /// <see cref="Search.NumericRangeQuery"/>, and <see cref="Search.NumericRangeFilter"/>.
         /// </summary>
         public const int PRECISION_STEP_DEFAULT = 4;
 
@@ -82,7 +83,7 @@ namespace Lucene.Net.Util
         public const char SHIFT_START_INT64 = (char)0x20;
 
         /// <summary>
-        /// The maximum term length (used for <code>byte[]</code> buffer size)
+        /// The maximum term length (used for <see cref="T:byte[]"/> buffer size)
         /// for encoding <see cref="long"/> values.
         /// <para/>
         /// NOTE: This was BUF_SIZE_LONG in Lucene
@@ -92,7 +93,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Integers are stored at lower precision by shifting off lower bits. The shift count is
-        /// stored as <code>SHIFT_START_INT32+shift</code> in the first byte
+        /// stored as <c>SHIFT_START_INT32+shift</c> in the first byte
         /// <para/>
         /// NOTE: This was SHIFT_START_INT in Lucene
         /// </summary>
@@ -108,45 +109,45 @@ namespace Lucene.Net.Util
         public const int BUF_SIZE_INT32 = 31 / 7 + 2;
 
         /// <summary>
-        /// Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-        /// this is method is used by <seealso cref="NumericTokenStream"/>.
-        /// After encoding, {@code bytes.offset} will always be 0. 
+        /// Returns prefix coded bits after reducing the precision by <paramref name="shift"/> bits.
+        /// This is method is used by <see cref="Analysis.NumericTokenStream"/>.
+        /// After encoding, <c>bytes.Offset</c> will always be 0. 
         /// <para/>
         /// NOTE: This was longToPrefixCoded() in Lucene
         /// </summary>
-        /// <param name="val"> the numeric value </param>
-        /// <param name="shift"> how many bits to strip from the right </param>
-        /// <param name="bytes"> will contain the encoded value </param>
+        /// <param name="val"> The numeric value </param>
+        /// <param name="shift"> How many bits to strip from the right </param>
+        /// <param name="bytes"> Will contain the encoded value </param>
         public static void Int64ToPrefixCoded(long val, int shift, BytesRef bytes)
         {
             Int64ToPrefixCodedBytes(val, shift, bytes);
         }
 
         /// <summary>
-        /// Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-        /// this is method is used by <seealso cref="NumericTokenStream"/>.
-        /// After encoding, {@code bytes.offset} will always be 0. 
+        /// Returns prefix coded bits after reducing the precision by <paramref name="shift"/> bits.
+        /// This is method is used by <see cref="Analysis.NumericTokenStream"/>.
+        /// After encoding, <c>bytes.Offset</c> will always be 0. 
         /// <para/>
         /// NOTE: This was intToPrefixCoded() in Lucene
         /// </summary>
-        /// <param name="val"> the numeric value </param>
-        /// <param name="shift"> how many bits to strip from the right </param>
-        /// <param name="bytes"> will contain the encoded value </param>
+        /// <param name="val"> The numeric value </param>
+        /// <param name="shift"> How many bits to strip from the right </param>
+        /// <param name="bytes"> Will contain the encoded value </param>
         public static void Int32ToPrefixCoded(int val, int shift, BytesRef bytes)
         {
             Int32ToPrefixCodedBytes(val, shift, bytes);
         }
 
         /// <summary>
-        /// Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-        /// this is method is used by <seealso cref="NumericTokenStream"/>.
-        /// After encoding, {@code bytes.offset} will always be 0. 
+        /// Returns prefix coded bits after reducing the precision by <paramref name="shift"/> bits.
+        /// This is method is used by <see cref="Analysis.NumericTokenStream"/>.
+        /// After encoding, <c>bytes.Offset</c> will always be 0. 
         /// <para/>
         /// NOTE: This was longToPrefixCodedBytes() in Lucene
         /// </summary>
-        /// <param name="val"> the numeric value </param>
-        /// <param name="shift"> how many bits to strip from the right </param>
-        /// <param name="bytes"> will contain the encoded value </param>
+        /// <param name="val"> The numeric value </param>
+        /// <param name="shift"> How many bits to strip from the right </param>
+        /// <param name="bytes"> Will contain the encoded value </param>
         public static void Int64ToPrefixCodedBytes(long val, int shift, BytesRef bytes)
         {
             if ((shift & ~0x3f) != 0) // ensure shift is 0..63
@@ -173,15 +174,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
-        /// this is method is used by <seealso cref="NumericTokenStream"/>.
-        /// After encoding, {@code bytes.offset} will always be 0. 
+        /// Returns prefix coded bits after reducing the precision by <paramref name="shift"/> bits.
+        /// This is method is used by <see cref="Analysis.NumericTokenStream"/>.
+        /// After encoding, <c>bytes.Offset</c> will always be 0. 
         /// <para/>
         /// NOTE: This was intToPrefixCodedBytes() in Lucene
         /// </summary>
-        /// <param name="val"> the numeric value </param>
-        /// <param name="shift"> how many bits to strip from the right </param>
-        /// <param name="bytes"> will contain the encoded value </param>
+        /// <param name="val"> The numeric value </param>
+        /// <param name="shift"> How many bits to strip from the right </param>
+        /// <param name="bytes"> Will contain the encoded value </param>
         public static void Int32ToPrefixCodedBytes(int val, int shift, BytesRef bytes)
         {
             if ((shift & ~0x1f) != 0) // ensure shift is 0..31
@@ -208,11 +209,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the shift value from a prefix encoded {@code long}. 
+        /// Returns the shift value from a prefix encoded <see cref="long"/>. 
         /// <para/>
         /// NOTE: This was getPrefixCodedLongShift() in Lucene
         /// </summary>
-        /// <exception cref="NumberFormatException"> if the supplied <seealso cref="BytesRef"/> is
+        /// <exception cref="FormatException"> if the supplied <see cref="BytesRef"/> is
         /// not correctly prefix encoded. </exception>
         public static int GetPrefixCodedInt64Shift(BytesRef val)
         {
@@ -225,11 +226,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the shift value from a prefix encoded {@code int}. 
+        /// Returns the shift value from a prefix encoded <see cref="int"/>. 
         /// <para/>
         /// NOTE: This was getPrefixCodedIntShift() in Lucene
         /// </summary>
-        /// <exception cref="NumberFormatException"> if the supplied <seealso cref="BytesRef"/> is
+        /// <exception cref="FormatException"> if the supplied <see cref="BytesRef"/> is
         /// not correctly prefix encoded. </exception>
         public static int GetPrefixCodedInt32Shift(BytesRef val)
         {
@@ -242,15 +243,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a long from prefixCoded bytes.
+        /// Returns a <see cref="long"/> from prefixCoded bytes.
         /// Rightmost bits will be zero for lower precision codes.
-        /// this method can be used to decode a term's value. 
+        /// This method can be used to decode a term's value. 
         /// <para/>
         /// NOTE: This was prefixCodedToLong() in Lucene
         /// </summary>
-        /// <exception cref="NumberFormatException"> if the supplied <seealso cref="BytesRef"/> is
+        /// <exception cref="FormatException"> if the supplied <see cref="BytesRef"/> is
         /// not correctly prefix encoded. </exception>
-        /// <seealso cref= #longToPrefixCodedBytes </seealso>
+        /// <seealso cref="Int64ToPrefixCodedBytes(long, int, BytesRef)"/>
         public static long PrefixCodedToInt64(BytesRef val)
         {
             long sortableBits = 0L;
@@ -268,15 +269,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns an int from prefixCoded bytes.
+        /// Returns an <see cref="int"/> from prefixCoded bytes.
         /// Rightmost bits will be zero for lower precision codes.
-        /// this method can be used to decode a term's value. 
+        /// This method can be used to decode a term's value. 
         /// <para/>
         /// NOTE: This was prefixCodedToInt() in Lucene
         /// </summary>
-        /// <exception cref="NumberFormatException"> if the supplied <seealso cref="BytesRef"/> is
+        /// <exception cref="FormatException"> if the supplied <see cref="BytesRef"/> is
         /// not correctly prefix encoded. </exception>
-        /// <seealso cref= #intToPrefixCodedBytes </seealso>
+        /// <seealso cref="Int32ToPrefixCodedBytes(int, int, BytesRef)"/>
         public static int PrefixCodedToInt32(BytesRef val)
         {
             long sortableBits = 0;
@@ -294,16 +295,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Converts a <code>double</code> value to a sortable signed <code>long</code>.
+        /// Converts a <see cref="double"/> value to a sortable signed <see cref="long"/>.
         /// The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
-        /// bit layout and then some bits are swapped, to be able to compare the result as long.
-        /// By this the precision is not reduced, but the value can easily used as a long.
-        /// The sort order (including <seealso cref="Double#NaN"/>) is defined by
-        /// <seealso cref="Double#compareTo"/>; {@code NaN} is greater than positive infinity. 
+        /// bit layout and then some bits are swapped, to be able to compare the result as <see cref="long"/>.
+        /// By this the precision is not reduced, but the value can easily used as a <see cref="long"/>.
+        /// The sort order (including <see cref="double.NaN"/>) is defined by
+        /// <see cref="double.CompareTo(double)"/>; <c>NaN</c> is greater than positive infinity. 
         /// <para/>
         /// NOTE: This was doubleToSortableLong() in Lucene
         /// </summary>
-        /// <seealso cref= #sortableLongToDouble </seealso>
+        /// <seealso cref="SortableInt64ToDouble(long)"/>
         public static long DoubleToSortableInt64(double val)
         {
             long f = Number.DoubleToInt64Bits(val);
@@ -315,11 +316,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Converts a sortable <code>long</code> back to a <code>double</code>. 
+        /// Converts a sortable <see cref="long"/> back to a <see cref="double"/>. 
         /// <para/>
         /// NOTE: This was sortableLongToDouble() in Lucene
         /// </summary>
-        /// <seealso cref= #doubleToSortableLong </seealso>
+        /// <seealso cref="DoubleToSortableInt64(double)"/>
         public static double SortableInt64ToDouble(long val)
         {
             if (val < 0)
@@ -330,16 +331,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Converts a <code>float</code> value to a sortable signed <code>int</code>.
+        /// Converts a <see cref="float"/> value to a sortable signed <see cref="int"/>.
         /// The value is converted by getting their IEEE 754 floating-point &quot;float format&quot;
-        /// bit layout and then some bits are swapped, to be able to compare the result as int.
-        /// By this the precision is not reduced, but the value can easily used as an int.
-        /// The sort order (including <seealso cref="Float#NaN"/>) is defined by
-        /// <seealso cref="Float#compareTo"/>; {@code NaN} is greater than positive infinity. 
+        /// bit layout and then some bits are swapped, to be able to compare the result as <see cref="int"/>.
+        /// By this the precision is not reduced, but the value can easily used as an <see cref="int"/>.
+        /// The sort order (including <see cref="float.NaN"/>) is defined by
+        /// <seealso cref="float.CompareTo(float)"/>; <c>NaN</c> is greater than positive infinity. 
         /// <para/>
         /// NOTE: This was floatToSortableInt() in Lucene
         /// </summary>
-        /// <seealso cref= #sortableIntToFloat </seealso>
+        /// <seealso cref="SortableInt32ToSingle(int)"/>
         public static int SingleToSortableInt32(float val)
         {
             int f = Number.SingleToInt32Bits(val);
@@ -368,11 +369,11 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Splits a long range recursively.
         /// You may implement a builder that adds clauses to a
-        /// <seealso cref="Lucene.Net.Search.BooleanQuery"/> for each call to its
-        /// <seealso cref="Int64RangeBuilder#addRange(BytesRef,BytesRef)"/>
+        /// <see cref="Lucene.Net.Search.BooleanQuery"/> for each call to its
+        /// <see cref="Int64RangeBuilder.AddRange(BytesRef, BytesRef)"/>
         /// method.
         /// <para/>
-        /// this method is used by <seealso cref="NumericRangeQuery"/>.
+        /// This method is used by <see cref="Search.NumericRangeQuery"/>.
         /// <para/>
         /// NOTE: This was splitLongRange() in Lucene
         /// </summary>
@@ -382,13 +383,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Splits an int range recursively.
+        /// Splits an <see cref="int"/> range recursively.
         /// You may implement a builder that adds clauses to a
-        /// <seealso cref="Lucene.Net.Search.BooleanQuery"/> for each call to its
-        /// <seealso cref="Int32RangeBuilder#addRange(BytesRef,BytesRef)"/>
+        /// <see cref="Lucene.Net.Search.BooleanQuery"/> for each call to its
+        /// <see cref="Int32RangeBuilder.AddRange(BytesRef, BytesRef)"/>
         /// method.
         /// <para/>
-        /// this method is used by <seealso cref="NumericRangeQuery"/>.
+        /// This method is used by <see cref="Search.NumericRangeQuery"/>.
         /// <para/>
         /// NOTE: This was splitIntRange() in Lucene
         /// </summary>
@@ -398,7 +399,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this helper does the splitting for both 32 and 64 bit. </summary>
+        /// This helper does the splitting for both 32 and 64 bit. </summary>
         private static void SplitRange(object builder, int valSize, int precisionStep, long minBound, long maxBound)
         {
             if (precisionStep < 1)
@@ -441,7 +442,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Helper that delegates to correct range builder </summary>
+        /// Helper that delegates to correct range builder. </summary>
         private static void AddRange(object builder, int valSize, long minBound, long maxBound, int shift)
         {
             // for the max bound set all lower bits (that were shifted away):
@@ -467,18 +468,18 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Callback for <seealso cref="#splitLongRange"/>.
-        /// You need to overwrite only one of the methods.
+        /// Callback for <see cref="SplitInt64Range(Int64RangeBuilder, int, long, long)"/>.
+        /// You need to override only one of the methods.
         /// <para/>
         /// NOTE: This was LongRangeBuilder in Lucene
-        /// 
+        /// <para/>
         /// @lucene.internal
         /// @since 2.9, API changed non backwards-compliant in 4.0
         /// </summary>
         public abstract class Int64RangeBuilder
         {
             /// <summary>
-            /// Overwrite this method, if you like to receive the already prefix encoded range bounds.
+            /// Override this method, if you like to receive the already prefix encoded range bounds.
             /// You can directly build classical (inclusive) range queries from them.
             /// </summary>
             public virtual void AddRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded)
@@ -487,7 +488,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Overwrite this method, if you like to receive the raw long range bounds.
+            /// Override this method, if you like to receive the raw long range bounds.
             /// You can use this for e.g. debugging purposes (print out range bounds).
             /// </summary>
             public virtual void AddRange(long min, long max, int shift)
@@ -500,8 +501,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Callback for <seealso cref="#splitIntRange"/>.
-        /// You need to overwrite only one of the methods.
+        /// Callback for <see cref="SplitInt32Range(Int32RangeBuilder, int, int, int)"/>.
+        /// You need to override only one of the methods.
         /// <para/>
         /// NOTE: This was IntRangeBuilder in Lucene
         /// 
@@ -511,7 +512,7 @@ namespace Lucene.Net.Util
         public abstract class Int32RangeBuilder
         {
             /// <summary>
-            /// Overwrite this method, if you like to receive the already prefix encoded range bounds.
+            /// Override this method, if you like to receive the already prefix encoded range bounds.
             /// You can directly build classical range (inclusive) queries from them.
             /// </summary>
             public virtual void AddRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded)
@@ -520,7 +521,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Overwrite this method, if you like to receive the raw int range bounds.
+            /// Override this method, if you like to receive the raw int range bounds.
             /// You can use this for e.g. debugging purposes (print out range bounds).
             /// </summary>
             public virtual void AddRange(int min, int max, int shift)
@@ -533,15 +534,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Filters the given <seealso cref="TermsEnum"/> by accepting only prefix coded 64 bit
-        /// terms with a shift value of <tt>0</tt>.
+        /// Filters the given <see cref="TermsEnum"/> by accepting only prefix coded 64 bit
+        /// terms with a shift value of <c>0</c>.
         /// <para/>
         /// NOTE: This was filterPrefixCodedLongs() in Lucene
         /// </summary>
         /// <param name="termsEnum">
-        ///          the terms enum to filter </param>
-        /// <returns> a filtered <seealso cref="TermsEnum"/> that only returns prefix coded 64 bit
-        ///         terms with a shift value of <tt>0</tt>. </returns>
+        ///          The terms enum to filter </param>
+        /// <returns> A filtered <see cref="TermsEnum"/> that only returns prefix coded 64 bit
+        ///         terms with a shift value of <c>0</c>. </returns>
         public static TermsEnum FilterPrefixCodedInt64s(TermsEnum termsEnum)
         {
             return new FilteredTermsEnumAnonymousInnerClassHelper(termsEnum);
@@ -561,15 +562,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Filters the given <seealso cref="TermsEnum"/> by accepting only prefix coded 32 bit
-        /// terms with a shift value of <tt>0</tt>.
+        /// Filters the given <see cref="TermsEnum"/> by accepting only prefix coded 32 bit
+        /// terms with a shift value of <c>0</c>.
         /// <para/>
         /// NOTE: This was filterPrefixCodedInts() in Lucene
         /// </summary>
         /// <param name="termsEnum">
-        ///          the terms enum to filter </param>
-        /// <returns> a filtered <seealso cref="TermsEnum"/> that only returns prefix coded 32 bit
-        ///         terms with a shift value of <tt>0</tt>. </returns>
+        ///          The terms enum to filter </param>
+        /// <returns> A filtered <see cref="TermsEnum"/> that only returns prefix coded 32 bit
+        ///         terms with a shift value of <c>0</c>. </returns>
         public static TermsEnum FilterPrefixCodedInt32s(TermsEnum termsEnum)
         {
             return new FilteredTermsEnumAnonymousInnerClassHelper2(termsEnum);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/OfflineSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/OfflineSorter.cs b/src/Lucene.Net/Util/OfflineSorter.cs
index 9eca503..db34023 100644
--- a/src/Lucene.Net/Util/OfflineSorter.cs
+++ b/src/Lucene.Net/Util/OfflineSorter.cs
@@ -31,10 +31,10 @@ namespace Lucene.Net.Util
     /// <summary>
     /// On-disk sorting of byte arrays. Each byte array (entry) is a composed of the following
     /// fields:
-    /// <ul>
-    ///   <li>(two bytes) length of the following byte array,
-    ///   <li>exactly the above count of bytes for the sequence to be sorted.
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description>(two bytes) length of the following byte array,</description></item>
+    ///   <item><description>exactly the above count of bytes for the sequence to be sorted.</description></item>
+    /// </list>
     /// </summary>
     public sealed class OfflineSorter
     {
@@ -69,8 +69,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// A bit more descriptive unit for constructors.
         /// </summary>
-        /// <seealso cref= #automatic() </seealso>
-        /// <seealso cref= #megabytes(long) </seealso>
+        /// <seealso cref="Automatic()"/>
+        /// <seealso cref="Megabytes(long)"/>
         public sealed class BufferSize
         {
             internal readonly int bytes;
@@ -91,7 +91,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Creates a <seealso cref="BufferSize"/> in MB. The given
+            /// Creates a <see cref="BufferSize"/> in MB. The given
             /// values must be &gt; 0 and &lt; 2048.
             /// </summary>
             public static BufferSize Megabytes(long mb)
@@ -101,7 +101,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Approximately half of the currently available free heap, but no less
-            /// than <seealso cref="#ABSOLUTE_MIN_SORT_BUFFER_SIZE"/>. However if current heap allocation
+            /// than <see cref="ABSOLUTE_MIN_SORT_BUFFER_SIZE"/>. However if current heap allocation
             /// is insufficient or if there is a large portion of unallocated heap-space available
             /// for sorting consult with max allowed heap size.
             /// </summary>
@@ -149,32 +149,32 @@ namespace Lucene.Net.Util
             private readonly OfflineSorter outerInstance;
 
             /// <summary>
-            /// number of temporary files created when merging partitions </summary>
+            /// Number of temporary files created when merging partitions </summary>
             public int TempMergeFiles { get; set; }
             /// <summary>
-            /// number of partition merges </summary>
+            /// Number of partition merges </summary>
             public int MergeRounds { get; set; }
             /// <summary>
-            /// number of lines of data read </summary>
+            /// Number of lines of data read </summary>
             public int Lines { get; set; }
             /// <summary>
-            /// time spent merging sorted partitions (in milliseconds) </summary>
+            /// Time spent merging sorted partitions (in milliseconds) </summary>
             public long MergeTime { get; set; }
             /// <summary>
-            /// time spent sorting data (in milliseconds) </summary>
+            /// Time spent sorting data (in milliseconds) </summary>
             public long SortTime { get; set; }
             /// <summary>
-            /// total time spent (in milliseconds) </summary>
+            /// Total time spent (in milliseconds) </summary>
             public long TotalTime { get; set; }
             /// <summary>
-            /// time spent in i/o read (in milliseconds) </summary>
+            /// Time spent in i/o read (in milliseconds) </summary>
             public long ReadTime { get; set; }
             /// <summary>
-            /// read buffer size (in bytes) </summary>
-            public long BufferSize { get; set; }
+            /// Read buffer size (in bytes) </summary>
+            public long BufferSize { get; set; } // LUCENENET TODO: API - make setter private
 
             /// <summary>
-            /// create a new SortInfo (with empty statistics) for debugging </summary>
+            /// Create a new <see cref="SortInfo"/> (with empty statistics) for debugging. </summary>
             public SortInfo(OfflineSorter outerInstance)
             {
                 this.outerInstance = outerInstance;
@@ -182,6 +182,9 @@ namespace Lucene.Net.Util
                 InitializeInstanceFields();
             }
 
+            /// <summary>
+            /// Returns a string representation of this object.
+            /// </summary>
             public override string ToString()
             {
                 return string.Format(CultureInfo.InvariantCulture, 
@@ -207,8 +210,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Defaults constructor.
         /// </summary>
-        /// <seealso cref= #defaultTempDir() </seealso>
-        /// <seealso cref= BufferSize#automatic() </seealso>
+        /// <seealso cref="DefaultTempDir()"/>
+        /// <seealso cref="BufferSize.Automatic()"/>
         public OfflineSorter()
             : this(DEFAULT_COMPARER, BufferSize.Automatic(), DefaultTempDir(), MAX_TEMPFILES)
         {
@@ -217,8 +220,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Defaults constructor with a custom comparer.
         /// </summary>
-        /// <seealso cref= #defaultTempDir() </seealso>
-        /// <seealso cref= BufferSize#automatic() </seealso>
+        /// <seealso cref="DefaultTempDir()"/>
+        /// <seealso cref="BufferSize.Automatic()"/>
         public OfflineSorter(IComparer<BytesRef> comparer)
             : this(comparer, BufferSize.Automatic(), DefaultTempDir(), MAX_TEMPFILES)
         {
@@ -255,7 +258,7 @@ namespace Lucene.Net.Util
 
             // LUCENENET NOTE: Can't do this because another thread could recreate the file before we are done here.
             // and cause this to bomb. We use the existence of the file as an indicator that we are done using it.
-            //output.Delete(); 
+            //output.Delete(); // LUCENENET TODO: BUG: Put this back in (we now have thread-safe file creation, so this should be like the original).
 
             var merges = new List<FileInfo>();
             bool success2 = false;
@@ -345,7 +348,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the default temporary directory. By default, the System's temp folder. If not accessible
-        /// or not available, an IOException is thrown
+        /// or not available, an <see cref="IOException"/> is thrown.
         /// </summary>
         public static DirectoryInfo DefaultTempDir()
         {
@@ -394,7 +397,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Merge a list of sorted temporary files (partitions) into an output file </summary>
+        /// Merge a list of sorted temporary files (partitions) into an output file. </summary>
         internal void MergePartitions(IEnumerable<FileInfo> merges, FileInfo outputFile)
         {
             long start = Environment.TickCount;
@@ -470,7 +473,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Read in a single partition of data </summary>
+        /// Read in a single partition of data. </summary>
         internal int ReadPartition(ByteSequencesReader reader)
         {
             long start = Environment.TickCount;
@@ -504,22 +507,22 @@ namespace Lucene.Net.Util
 
 
         /// <summary>
-        /// Utility class to emit length-prefixed byte[] entries to an output stream for sorting.
-        /// Complementary to <seealso cref="ByteSequencesReader"/>.
+        /// Utility class to emit length-prefixed <see cref="T:byte[]"/> entries to an output stream for sorting.
+        /// Complementary to <see cref="ByteSequencesReader"/>.
         /// </summary>
         public class ByteSequencesWriter : IDisposable
         {
             private readonly DataOutput os;
 
             /// <summary>
-            /// Constructs a ByteSequencesWriter to the provided File </summary>
+            /// Constructs a <see cref="ByteSequencesWriter"/> to the provided <see cref="FileInfo"/>. </summary>
             public ByteSequencesWriter(FileInfo file)
                 : this(NewBinaryWriterDataOutput(file))
             {
             }
 
             /// <summary>
-            /// Constructs a ByteSequencesWriter to the provided DataOutput </summary>
+            /// Constructs a <see cref="ByteSequencesWriter"/> to the provided <see cref="DataOutput"/>. </summary>
             public ByteSequencesWriter(DataOutput os)
             {
                 this.os = os;
@@ -544,8 +547,8 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Writes a BytesRef. </summary>
-            /// <seealso cref= #write(byte[], int, int) </seealso>
+            /// Writes a <see cref="BytesRef"/>. </summary>
+            /// <seealso cref="Write(byte[], int, int)"/>
             public virtual void Write(BytesRef @ref)
             {
                 Debug.Assert(@ref != null);
@@ -554,7 +557,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Writes a byte array. </summary>
-            /// <seealso cref= #write(byte[], int, int) </seealso>
+            /// <seealso cref="Write(byte[], int, int)"/>
             public virtual void Write(byte[] bytes)
             {
                 Write(bytes, 0, bytes.Length);
@@ -562,8 +565,8 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Writes a byte array.
-            /// <p>
-            /// The length is written as a <code>short</code>, followed
+            /// <para/>
+            /// The length is written as a <see cref="short"/>, followed
             /// by the bytes.
             /// </summary>
             public virtual void Write(byte[] bytes, int off, int len)
@@ -576,7 +579,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Closes the provided <seealso cref="DataOutput"/> if it is <seealso cref="IDisposable"/>.
+            /// Disposes the provided <see cref="DataOutput"/> if it is <see cref="IDisposable"/>.
             /// </summary>
             public void Dispose()
             {
@@ -589,34 +592,34 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Utility class to read length-prefixed byte[] entries from an input.
-        /// Complementary to <seealso cref="ByteSequencesWriter"/>.
+        /// Utility class to read length-prefixed <see cref="T:byte[]"/> entries from an input.
+        /// Complementary to <see cref="ByteSequencesWriter"/>.
         /// </summary>
         public class ByteSequencesReader : IDisposable
         {
             private readonly DataInput inputStream;
 
             /// <summary>
-            /// Constructs a ByteSequencesReader from the provided File </summary>
+            /// Constructs a <see cref="ByteSequencesReader"/> from the provided <see cref="FileInfo"/>. </summary>
             public ByteSequencesReader(FileInfo file)
                 : this(new BinaryReaderDataInput(new BinaryReader(new FileStream(file.FullName, FileMode.Open, FileAccess.Read, FileShare.Read))))
             {
             }
 
             /// <summary>
-            /// Constructs a ByteSequencesReader from the provided DataInput </summary>
+            /// Constructs a <see cref="ByteSequencesReader"/> from the provided <see cref="DataInput"/>. </summary>
             public ByteSequencesReader(DataInput inputStream)
             {
                 this.inputStream = inputStream;
             }
 
             /// <summary>
-            /// Reads the next entry into the provided <seealso cref="BytesRef"/>. The internal
+            /// Reads the next entry into the provided <see cref="BytesRef"/>. The internal
             /// storage is resized if needed.
             /// </summary>
-            /// <returns> Returns <code>false</code> if EOF occurred when trying to read
-            /// the header of the next sequence. Returns <code>true</code> otherwise. </returns>
-            /// <exception cref="EOFException"> if the file ends before the full sequence is read. </exception>
+            /// <returns> Returns <c>false</c> if EOF occurred when trying to read
+            /// the header of the next sequence. Returns <c>true</c> otherwise. </returns>
+            /// <exception cref="EndOfStreamException"> If the file ends before the full sequence is read. </exception>
             public virtual bool Read(BytesRef @ref)
             {
                 ushort length;
@@ -639,11 +642,10 @@ namespace Lucene.Net.Util
             /// <summary>
             /// Reads the next entry and returns it if successful.
             /// </summary>
-            /// <seealso cref= #read(BytesRef)
-            /// </seealso>
-            /// <returns> Returns <code>null</code> if EOF occurred before the next entry
+            /// <seealso cref="Read(BytesRef)"/>
+            /// <returns> Returns <c>null</c> if EOF occurred before the next entry
             /// could be read. </returns>
-            /// <exception cref="EOFException"> if the file ends before the full sequence is read. </exception>
+            /// <exception cref="EndOfStreamException"> If the file ends before the full sequence is read. </exception>
             public virtual byte[] Read()
             {
                 ushort length;
@@ -663,7 +665,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Closes the provided <seealso cref="DataInput"/> if it is <seealso cref="IDisposable"/>.
+            /// Disposes the provided <see cref="DataInput"/> if it is <see cref="IDisposable"/>.
             /// </summary>
             public void Dispose()
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/OpenBitSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/OpenBitSet.cs b/src/Lucene.Net/Util/OpenBitSet.cs
index c15adbe..5d7704f 100644
--- a/src/Lucene.Net/Util/OpenBitSet.cs
+++ b/src/Lucene.Net/Util/OpenBitSet.cs
@@ -27,54 +27,55 @@ namespace Lucene.Net.Util
     /// <summary>
     /// An "open" BitSet implementation that allows direct access to the array of words
     /// storing the bits.
-    /// <p/>
-    /// Unlike java.util.bitset, the fact that bits are packed into an array of longs
-    /// is part of the interface.  this allows efficient implementation of other algorithms
+    /// <para/>
+    /// NOTE: This can be used in .NET any place where a <c>java.util.BitSet</c> is used in Java.
+    /// <para/>
+    /// Unlike <c>java.util.BitSet</c>, the fact that bits are packed into an array of longs
+    /// is part of the interface.  This allows efficient implementation of other algorithms
     /// by someone other than the author.  It also allows one to efficiently implement
     /// alternate serialization or interchange formats.
-    /// <p/>
-    /// <code>OpenBitSet</code> is faster than <code>java.util.BitSet</code> in most operations
+    /// <para/>
+    /// <see cref="OpenBitSet"/> is faster than <c>java.util.BitSet</c> in most operations
     /// and *much* faster at calculating cardinality of sets and results of set operations.
     /// It can also handle sets of larger cardinality (up to 64 * 2**32-1)
-    /// <p/>
-    /// The goals of <code>OpenBitSet</code> are the fastest implementation possible, and
+    /// <para/>
+    /// The goals of <see cref="OpenBitSet"/> are the fastest implementation possible, and
     /// maximum code reuse.  Extra safety and encapsulation
     /// may always be built on top, but if that's built in, the cost can never be removed (and
     /// hence people re-implement their own version in order to get better performance).
-    /// If you want a "safe", totally encapsulated (and slower and limited) BitSet
-    /// class, use <code>java.util.BitSet</code>.
-    /// <p/>
+    /// <para/>
     /// <h3>Performance Results</h3>
     ///
     /// Test system: Pentium 4, Sun Java 1.5_06 -server -Xbatch -Xmx64M
-    /// <br/>BitSet size = 1,000,000
-    /// <br/>Results are java.util.BitSet time divided by OpenBitSet time.
-    /// <table border="1">
-    /// <tr>
-    ///  <th></th> <th>cardinality</th> <th>intersect_count</th> <th>union</th> <th>nextSetBit</th> <th>get</th> <th>iterator</th>
-    /// </tr>
-    /// <tr>
-    ///  <th>50% full</th> <td>3.36</td> <td>3.96</td> <td>1.44</td> <td>1.46</td> <td>1.99</td> <td>1.58</td>
-    /// </tr>
-    /// <tr>
-    ///   <th>1% full</th> <td>3.31</td> <td>3.90</td> <td>&nbsp;</td> <td>1.04</td> <td>&nbsp;</td> <td>0.99</td>
-    /// </tr>
-    /// </table>
-    /// <br/>
+    /// <para/>BitSet size = 1,000,000
+    /// <para/>Results are java.util.BitSet time divided by OpenBitSet time.
+    /// <list type="table">
+    ///     <listheader>
+    ///         <term></term> <term>cardinality</term> <term>IntersectionCount</term> <term>Union</term> <term>NextSetBit</term> <term>Get</term> <term>GetIterator</term>
+    ///     </listheader>
+    ///     <item>
+    ///         <term>50% full</term> <description>3.36</description> <description>3.96</description> <description>1.44</description> <description>1.46</description> <description>1.99</description> <description>1.58</description>
+    ///     </item>
+    ///     <item>
+    ///         <term>1% full</term> <description>3.31</description> <description>3.90</description> <description>&#160;</description> <description>1.04</description> <description>&#160;</description> <description>0.99</description>
+    ///     </item>
+    /// </list>
+    /// <para/>
+    /// <para/>
     /// Test system: AMD Opteron, 64 bit linux, Sun Java 1.5_06 -server -Xbatch -Xmx64M
-    /// <br/>BitSet size = 1,000,000
-    /// <br/>Results are java.util.BitSet time divided by OpenBitSet time.
-    /// <table border="1">
-    /// <tr>
-    ///  <th></th> <th>cardinality</th> <th>intersect_count</th> <th>union</th> <th>nextSetBit</th> <th>get</th> <th>iterator</th>
-    /// </tr>
-    /// <tr>
-    ///  <th>50% full</th> <td>2.50</td> <td>3.50</td> <td>1.00</td> <td>1.03</td> <td>1.12</td> <td>1.25</td>
-    /// </tr>
-    /// <tr>
-    ///   <th>1% full</th> <td>2.51</td> <td>3.49</td> <td>&nbsp;</td> <td>1.00</td> <td>&nbsp;</td> <td>1.02</td>
-    /// </tr>
-    /// </table>
+    /// <para/>BitSet size = 1,000,000
+    /// <para/>Results are java.util.BitSet time divided by OpenBitSet time.
+    /// <list type="table">
+    ///     <listheader>
+    ///         <term></term> <term>cardinality</term> <term>IntersectionCount</term> <term>Union</term> <term>NextSetBit</term> <term>Get</term> <term>GetIterator</term>
+    ///     </listheader>
+    ///     <item>
+    ///         <term>50% full</term> <description>2.50</description> <description>3.50</description> <description>1.00</description> <description>1.03</description> <description>1.12</description> <description>1.25</description>
+    ///     </item>
+    ///     <item>
+    ///         <term>1% full</term> <description>2.51</description> <description>3.49</description> <description>&#160;</description> <description>1.00</description> <description>&#160;</description> <description>1.02</description>
+    ///     </item>
+    /// </list>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -88,7 +89,7 @@ namespace Lucene.Net.Util
         private long numBits;
 
         /// <summary>
-        /// Constructs an OpenBitSet large enough to hold {@code numBits}. </summary>
+        /// Constructs an <see cref="OpenBitSet"/> large enough to hold <paramref name="numBits"/>. </summary>
         public OpenBitSet(long numBits)
         {
             this.numBits = numBits;
@@ -104,17 +105,16 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Constructs an OpenBitSet from an existing long[].
-        /// <p>
+        /// Constructs an <see cref="OpenBitSet"/> from an existing <see cref="T:long[]"/>.
+        /// <para/>
         /// The first 64 bits are in long[0], with bit index 0 at the least significant
         /// bit, and bit index 63 at the most significant. Given a bit index, the word
         /// containing it is long[index/64], and it is at bit number index%64 within
         /// that word.
-        /// <p>
-        /// numWords are the number of elements in the array that contain set bits
-        /// (non-zero longs). numWords should be &lt= bits.length, and any existing
-        /// words in the array at position &gt= numWords should be zero.
-        ///
+        /// <para/>
+        /// <paramref name="numWords"/> are the number of elements in the array that contain set bits
+        /// (non-zero longs). <paramref name="numWords"/> should be &lt;= bits.Length, and any existing
+        /// words in the array at position &gt;= numWords should be zero.
         /// </summary>
         public OpenBitSet(long[] bits, int numWords)
         {
@@ -138,7 +138,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this DocIdSet implementation is cacheable. </summary>
+        /// This DocIdSet implementation is cacheable. </summary>
         public override bool IsCacheable
         {
             get
@@ -148,7 +148,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the current capacity in bits (1 greater than the index of the last bit) </summary>
+        /// Returns the current capacity in bits (1 greater than the index of the last bit). </summary>
         public virtual long Capacity
         {
             get { return m_bits.Length << 6; }
@@ -166,7 +166,8 @@ namespace Lucene.Net.Util
         //}
 
         /// <summary>
-        /// Returns the current capacity of this set. This is *not* equal to <seealso cref="#cardinality"/>.
+        /// Returns the current capacity of this set. This is *not* equal to <see cref="Cardinality()"/>.
+        /// <para/>
         /// NOTE: This is equivalent to size() or length() in Lucene.
         /// </summary>
         public virtual int Length
@@ -175,7 +176,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true if there are no set bits </summary>
+        /// Returns <c>true</c> if there are no set bits </summary>
         public virtual bool IsEmpty
         {
             get
@@ -185,7 +186,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Expert: returns the long[] storing the bits </summary>
+        /// Expert: returns the <see cref="T:long[]"/> storing the bits. </summary>
         [WritableArray]
         public virtual long[] GetBits()
         {
@@ -193,7 +194,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Expert: gets the number of longs in the array that are in use </summary>
+        /// Expert: gets the number of <see cref="long"/>s in the array that are in use. </summary>
         public virtual int NumWords
         {
             get
@@ -203,7 +204,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true or false for the specified bit index. </summary>
+        /// Returns <c>true</c> or <c>false</c> for the specified bit <paramref name="index"/>. </summary>
         public virtual bool Get(int index)
         {
             int i = index >> 6; // div 64
@@ -220,8 +221,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true or false for the specified bit index.
-        /// The index should be less than the OpenBitSet size
+        /// Returns <c>true</c> or <c>false</c> for the specified bit <paramref name="index"/>.
+        /// The index should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool FastGet(int index)
         {
@@ -235,7 +236,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true or false for the specified bit index
+        /// Returns <c>true</c> or <c>false</c> for the specified bit <paramref name="index"/>.
         /// </summary>
         public virtual bool Get(long index)
         {
@@ -250,8 +251,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true or false for the specified bit index.
-        /// The index should be less than the OpenBitSet size.
+        /// Returns <c>true</c> or <c>false</c> for the specified bit <paramref name="index"/>.
+        /// The index should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool FastGet(long index)
         {
@@ -275,8 +276,8 @@ namespace Lucene.Net.Util
         */
 
         /// <summary>
-        /// returns 1 if the bit is set, 0 if not.
-        /// The index should be less than the OpenBitSet size
+        /// Returns 1 if the bit is set, 0 if not.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual int GetBit(int index)
         {
@@ -296,7 +297,7 @@ namespace Lucene.Net.Util
         */
 
         /// <summary>
-        /// sets a bit, expanding the set size if necessary </summary>
+        /// Sets a bit, expanding the set size if necessary. </summary>
         public virtual void Set(long index)
         {
             int wordNum = ExpandingWordNum(index);
@@ -306,8 +307,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sets the bit at the specified index.
-        /// The index should be less than the OpenBitSet size.
+        /// Sets the bit at the specified <paramref name="index"/>.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastSet(int index)
         {
@@ -319,8 +320,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sets the bit at the specified index.
-        /// The index should be less than the OpenBitSet size.
+        /// Sets the bit at the specified <paramref name="index"/>.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastSet(long index)
         {
@@ -332,10 +333,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sets a range of bits, expanding the set size if necessary
+        /// Sets a range of bits, expanding the set size if necessary.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to set </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to set </param>
         public virtual void Set(long startIndex, long endIndex)
         {
             if (endIndex <= startIndex)
@@ -374,8 +375,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// clears a bit.
-        /// The index should be less than the OpenBitSet size.
+        /// Clears a bit.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastClear(int index)
         {
@@ -394,8 +395,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// clears a bit.
-        /// The index should be less than the OpenBitSet size.
+        /// Clears a bit.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastClear(long index)
         {
@@ -407,7 +408,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// clears a bit, allowing access beyond the current set size without changing the size. </summary>
+        /// Clears a bit, allowing access beyond the current set size without changing the size. </summary>
         public virtual void Clear(long index)
         {
             int wordNum = (int)(index >> 6); // div 64
@@ -423,8 +424,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Clears a range of bits.  Clearing past the end does not change the size of the set.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to clear </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to clear </param>
         public virtual void Clear(int startIndex, int endIndex)
         {
             if (endIndex <= startIndex)
@@ -470,8 +471,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Clears a range of bits.  Clearing past the end does not change the size of the set.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to clear </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to clear </param>
         public virtual void Clear(long startIndex, long endIndex)
         {
             if (endIndex <= startIndex)
@@ -514,7 +515,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Sets a bit and returns the previous value.
-        /// The index should be less than the OpenBitSet size.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool GetAndSet(int index)
         {
@@ -529,7 +530,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Sets a bit and returns the previous value.
-        /// The index should be less than the OpenBitSet size.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool GetAndSet(long index)
         {
@@ -543,8 +544,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// flips a bit.
-        /// The index should be less than the OpenBitSet size.
+        /// Flips a bit.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastFlip(int index)
         {
@@ -556,8 +557,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// flips a bit.
-        /// The index should be less than the OpenBitSet size.
+        /// Flips a bit.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual void FastFlip(long index)
         {
@@ -569,7 +570,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// flips a bit, expanding the set size if necessary </summary>
+        /// Flips a bit, expanding the set size if necessary. </summary>
         public virtual void Flip(long index)
         {
             int wordNum = ExpandingWordNum(index);
@@ -579,8 +580,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// flips a bit and returns the resulting bit value.
-        /// The index should be less than the OpenBitSet size.
+        /// Flips a bit and returns the resulting bit value.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool FlipAndGet(int index)
         {
@@ -593,8 +594,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// flips a bit and returns the resulting bit value.
-        /// The index should be less than the OpenBitSet size.
+        /// Flips a bit and returns the resulting bit value.
+        /// The <paramref name="index"/> should be less than the <see cref="Length"/>.
         /// </summary>
         public virtual bool FlipAndGet(long index)
         {
@@ -607,10 +608,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Flips a range of bits, expanding the set size if necessary
+        /// Flips a range of bits, expanding the set size if necessary.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to flip </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to flip </param>
         public virtual void Flip(long startIndex, long endIndex)
         {
             if (endIndex <= startIndex)
@@ -624,14 +625,13 @@ namespace Lucene.Net.Util
             // word to be changed.
             int endWord = ExpandingWordNum(endIndex - 1);
 
-            /// <summary>
-            ///* Grrr, java shifting wraps around so -1L>>>64 == -1
-            /// for that reason, make sure not to use endmask if the bits to flip will
-            /// be zero in the last word (redefine endWord to be the last changed...)
-            /// long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
-            /// long endmask = -1L >>> (64-(endIndex & 0x3f));   // example: 00111...111111
-            /// **
-            /// </summary>
+
+            //* Grrr, java shifting wraps around so -1L>>>64 == -1
+            // for that reason, make sure not to use endmask if the bits to flip will
+            // be zero in the last word (redefine endWord to be the last changed...)
+            // long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
+            // long endmask = -1L >>> (64-(endIndex & 0x3f));   // example: 00111...111111
+            // **
 
             long startmask = -1L << (int)startIndex;
             long endmask = (long)(0xffffffffffffffffUL >> (int)-endIndex); // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
@@ -673,7 +673,10 @@ namespace Lucene.Net.Util
         }
         */
 
-        /// <returns> the number of set bits </returns>
+        /// <summary>
+        /// Get the number of set bits.
+        /// </summary>
+        /// <returns> The number of set bits. </returns>
         public virtual long Cardinality()
         {
             return BitUtil.Pop_Array(m_bits, 0, m_wlen);
@@ -740,8 +743,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the index of the first set bit starting at the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// Returns the index of the first set bit starting at the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public virtual int NextSetBit(int index)
         {
@@ -771,8 +774,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the index of the first set bit starting at the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// Returns the index of the first set bit starting at the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public virtual long NextSetBit(long index)
         {
@@ -803,8 +806,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the index of the first set bit starting downwards at
-        ///  the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public virtual int PrevSetBit(int index)
         {
@@ -850,8 +853,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the index of the first set bit starting downwards at
-        ///  the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// the <paramref name="index"/> specified.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public virtual long PrevSetBit(long index)
         {
@@ -947,7 +950,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Remove all elements set in other. this = this AND_NOT other </summary>
+        /// Remove all elements set in other. this = this AND_NOT other. </summary>
         public virtual void Remove(OpenBitSet other)
         {
             int idx = Math.Min(m_wlen, other.m_wlen);
@@ -1002,7 +1005,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns true if the sets have any elements in common </summary>
+        /// returns <c>true</c> if the sets have any elements in common. </summary>
         public virtual bool Intersects(OpenBitSet other)
         {
             int pos = Math.Min(this.m_wlen, other.m_wlen);
@@ -1019,7 +1022,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Expand the long[] with the size given as a number of words (64 bit longs). </summary>
+        /// Expand the <see cref="T:long[]"/> with the size given as a number of words (64 bit longs). </summary>
         public virtual void EnsureCapacityWords(int numWords)
         {
             m_bits = ArrayUtil.Grow(m_bits, numWords);
@@ -1028,7 +1031,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Ensure that the long[] is big enough to hold numBits, expanding it if
+        /// Ensure that the <see cref="T:long[]"/> is big enough to hold numBits, expanding it if
         /// necessary.
         /// </summary>
         public virtual void EnsureCapacity(long numBits)
@@ -1054,14 +1057,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns the number of 64 bit words it would take to hold numBits </summary>
+        /// Returns the number of 64 bit words it would take to hold <paramref name="numBits"/>. </summary>
         public static int Bits2words(long numBits)
         {
             return (int)(((numBits - 1) >> 6) + 1);
         }
 
         /// <summary>
-        /// returns true if both sets have the same bits set </summary>
+        /// Returns <c>true</c> if both sets have the same bits set. </summary>
         public override bool Equals(object o)
         {
             if (this == o)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/OpenBitSetDISI.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/OpenBitSetDISI.cs b/src/Lucene.Net/Util/OpenBitSetDISI.cs
index 5749598..d7bb489 100644
--- a/src/Lucene.Net/Util/OpenBitSetDISI.cs
+++ b/src/Lucene.Net/Util/OpenBitSetDISI.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Util
     using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
 
     /// <summary>
-    /// OpenBitSet with added methods to bulk-update the bits
-    ///  from a <seealso cref="DocIdSetIterator"/>.
+    /// <see cref="OpenBitSet"/> with added methods to bulk-update the bits
+    /// from a <see cref="DocIdSetIterator"/>. (DISI stands for <see cref="DocIdSetIterator"/>).
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -31,10 +31,10 @@ namespace Lucene.Net.Util
     public class OpenBitSetDISI : OpenBitSet
     {
         /// <summary>
-        /// Construct an OpenBitSetDISI with its bits set
-        /// from the doc ids of the given DocIdSetIterator.
+        /// Construct an <see cref="OpenBitSetDISI"/> with its bits set
+        /// from the doc ids of the given <see cref="DocIdSetIterator"/>.
         /// Also give a maximum size one larger than the largest doc id for which a
-        /// bit may ever be set on this OpenBitSetDISI.
+        /// bit may ever be set on this <see cref="OpenBitSetDISI"/>.
         /// </summary>
         public OpenBitSetDISI(DocIdSetIterator disi, int maxSize)
             : base(maxSize)
@@ -43,9 +43,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Construct an OpenBitSetDISI with no bits set, and a given maximum size
+        /// Construct an <see cref="OpenBitSetDISI"/> with no bits set, and a given maximum size
         /// one larger than the largest doc id for which a bit may ever be set
-        /// on this OpenBitSetDISI.
+        /// on this <see cref="OpenBitSetDISI"/>.
         /// </summary>
         public OpenBitSetDISI(int maxSize)
             : base(maxSize)
@@ -53,7 +53,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Perform an inplace OR with the doc ids from a given DocIdSetIterator,
+        /// Perform an inplace OR with the doc ids from a given <see cref="DocIdSetIterator"/>,
         /// setting the bit for each such doc id.
         /// These doc ids should be smaller than the maximum size passed to the
         /// constructor.
@@ -69,7 +69,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Perform an inplace AND with the doc ids from a given DocIdSetIterator,
+        /// Perform an inplace AND with the doc ids from a given <see cref="DocIdSetIterator"/>,
         /// leaving only the bits set for which the doc ids are in common.
         /// These doc ids should be smaller than the maximum size passed to the
         /// constructor.
@@ -90,7 +90,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Perform an inplace NOT with the doc ids from a given DocIdSetIterator,
+        /// Perform an inplace NOT with the doc ids from a given <see cref="DocIdSetIterator"/>,
         /// clearing all the bits for each such doc id.
         /// These doc ids should be smaller than the maximum size passed to the
         /// constructor.
@@ -106,7 +106,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Perform an inplace XOR with the doc ids from a given DocIdSetIterator,
+        /// Perform an inplace XOR with the doc ids from a given <see cref="DocIdSetIterator"/>,
         /// flipping all the bits for each such doc id.
         /// These doc ids should be smaller than the maximum size passed to the
         /// constructor.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/OpenBitSetIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/OpenBitSetIterator.cs b/src/Lucene.Net/Util/OpenBitSetIterator.cs
index e7dbd45..769aeba 100644
--- a/src/Lucene.Net/Util/OpenBitSetIterator.cs
+++ b/src/Lucene.Net/Util/OpenBitSetIterator.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Util
     using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
 
     /// <summary>
-    /// An iterator to iterate over set bits in an OpenBitSet.
-    /// this is faster than nextSetBit() for iterating over the complete set of bits,
+    /// An iterator to iterate over set bits in an <see cref="OpenBitSet"/>.
+    /// this is faster than <see cref="OpenBitSet.NextSetBit(long)"/> for iterating over the complete set of bits,
     /// especially when the density of the bits set is high.
     /// </summary>
     public class OpenBitSetIterator : DocIdSetIterator
@@ -71,29 +71,27 @@ namespace Lucene.Net.Util
             indexArray = BitUtil.BitList((byte)word);
         }
 
-        /// <summary>
-        ///*** alternate shift implementations
-        /// // 32 bit shifts, but a long shift needed at the end
-        /// private void shift2() {
-        ///  int y = (int)word;
-        ///  if (y==0) {wordShift +=32; y = (int)(word >>>32); }
-        ///  if ((y & 0x0000FFFF) == 0) { wordShift +=16; y>>>=16; }
-        ///  if ((y & 0x000000FF) == 0) { wordShift +=8; y>>>=8; }
-        ///  indexArray = bitlist[y & 0xff];
-        ///  word >>>= (wordShift +1);
-        /// }
-        ///
-        /// private void shift3() {
-        ///  int lower = (int)word;
-        ///  int lowByte = lower & 0xff;
-        ///  if (lowByte != 0) {
-        ///    indexArray=bitlist[lowByte];
-        ///    return;
-        ///  }
-        ///  shift();
-        /// }
-        /// *****
-        /// </summary>
+        //*** alternate shift implementations
+        // // 32 bit shifts, but a long shift needed at the end
+        // private void shift2() {
+        //  int y = (int)word;
+        //  if (y==0) {wordShift +=32; y = (int)(word >>>32); }
+        //  if ((y & 0x0000FFFF) == 0) { wordShift +=16; y>>>=16; }
+        //  if ((y & 0x000000FF) == 0) { wordShift +=8; y>>>=8; }
+        //  indexArray = bitlist[y & 0xff];
+        //  word >>>= (wordShift +1);
+        // }
+        //
+        // private void shift3() {
+        //  int lower = (int)word;
+        //  int lowByte = lower & 0xff;
+        //  if (lowByte != 0) {
+        //    indexArray=bitlist[lowByte];
+        //    return;
+        //  }
+        //  shift();
+        // }
+        // *****
 
         public override int NextDoc()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/PForDeltaDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/PForDeltaDocIdSet.cs b/src/Lucene.Net/Util/PForDeltaDocIdSet.cs
index 0920ca3..968502e 100644
--- a/src/Lucene.Net/Util/PForDeltaDocIdSet.cs
+++ b/src/Lucene.Net/Util/PForDeltaDocIdSet.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Util
     using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
 
     /// <summary>
-    /// <seealso cref="DocIdSet"/> implementation based on pfor-delta encoding.
-    /// <p>this implementation is inspired from LinkedIn's Kamikaze
+    /// <see cref="DocIdSet"/> implementation based on pfor-delta encoding.
+    /// <para>This implementation is inspired from LinkedIn's Kamikaze
     /// (http://data.linkedin.com/opensource/kamikaze) and Daniel Lemire's JavaFastPFOR
-    /// (https://github.com/lemire/JavaFastPFOR).</p>
-    /// <p>On the contrary to the original PFOR paper, exceptions are encoded with
-    /// FOR instead of Simple16.</p>
+    /// (https://github.com/lemire/JavaFastPFOR).</para>
+    /// <para>On the contrary to the original PFOR paper, exceptions are encoded with
+    /// FOR instead of Simple16.</para>
     /// </summary>
     public sealed class PForDeltaDocIdSet : DocIdSet
     {
@@ -65,7 +65,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A builder for <seealso cref="PForDeltaDocIdSet"/>. </summary>
+        /// A builder for <see cref="PForDeltaDocIdSet"/>. </summary>
         public class Builder
         {
             internal readonly GrowableByteArrayDataOutput data;
@@ -98,7 +98,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Set the index interval. Every <code>indexInterval</code>-th block will
+            /// Set the index interval. Every <paramref name="indexInterval"/>-th block will
             /// be stored in the index. Set to <see cref="int.MaxValue"/> to disable indexing.
             /// </summary>
             public virtual Builder SetIndexInterval(int indexInterval)
@@ -131,7 +131,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Convenience method to add the content of a <seealso cref="DocIdSetIterator"/> to this builder. </summary>
+            /// Convenience method to add the content of a <see cref="DocIdSetIterator"/> to this builder. </summary>
             public virtual Builder Add(DocIdSetIterator it)
             {
                 for (int doc = it.NextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.NextDoc())
@@ -313,7 +313,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Build the <seealso cref="PForDeltaDocIdSet"/> instance. </summary>
+            /// Build the <see cref="PForDeltaDocIdSet"/> instance. </summary>
             public virtual PForDeltaDocIdSet Build()
             {
                 Debug.Assert(bufferSize < BLOCK_SIZE);
@@ -599,7 +599,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Return the number of documents in this <seealso cref="DocIdSet"/> in constant time. </summary>
+        /// Return the number of documents in this <see cref="DocIdSet"/> in constant time. </summary>
         public int Cardinality()
         {
             return cardinality;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/PagedBytes.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/PagedBytes.cs b/src/Lucene.Net/Util/PagedBytes.cs
index a40c3b3..4a6e4bd 100644
--- a/src/Lucene.Net/Util/PagedBytes.cs
+++ b/src/Lucene.Net/Util/PagedBytes.cs
@@ -26,13 +26,12 @@ namespace Lucene.Net.Util
     using IndexInput = Lucene.Net.Store.IndexInput;
 
     /// <summary>
-    /// Represents a logical byte[] as a series of pages.  You
-    ///  can write-once into the logical byte[] (append only),
-    ///  using copy, and then retrieve slices (BytesRef) into it
-    ///  using fill.
-    ///
+    /// Represents a logical <see cref="T:byte[]"/> as a series of pages.  You
+    /// can write-once into the logical <see cref="T:byte[]"/> (append only),
+    /// using copy, and then retrieve slices (<see cref="BytesRef"/>) into it
+    /// using fill.
+    /// <para/>
     /// @lucene.internal
-    ///
     /// </summary>
     // TODO: refactor this, byteblockpool, fst.bytestore, and any
     // other "shift/mask big arrays". there are too many of these classes!
@@ -55,10 +54,10 @@ namespace Lucene.Net.Util
         private static readonly byte[] EMPTY_BYTES = new byte[0];
 
         /// <summary>
-        /// Provides methods to read BytesRefs from a frozen
-        ///  PagedBytes.
+        /// Provides methods to read <see cref="BytesRef"/>s from a frozen
+        /// <see cref="PagedBytes"/>.
         /// </summary>
-        /// <seealso cref= #freeze  </seealso>
+        /// <seealso cref="Freeze(bool)"/>
         public sealed class Reader
         {
             private readonly byte[][] blocks;
@@ -85,14 +84,13 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Gets a slice out of <seealso cref="PagedBytes"/> starting at <i>start</i> with a
-            /// given length. Iff the slice spans across a block border this method will
+            /// Gets a slice out of <see cref="PagedBytes"/> starting at <paramref name="start"/> with a
+            /// given length. If the slice spans across a block border this method will
             /// allocate sufficient resources and copy the paged data.
-            /// <p>
+            /// <para>
             /// Slices spanning more than two blocks are not supported.
-            /// </p>
+            /// </para>
             /// @lucene.internal
-            ///
             /// </summary>
             public void FillSlice(BytesRef b, long start, int length)
             {
@@ -122,14 +120,12 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Reads length as 1 or 2 byte vInt prefix, starting at <i>start</i>.
-            /// <p>
+            /// Reads length as 1 or 2 byte vInt prefix, starting at <paramref name="start"/>.
+            /// <para>
             /// <b>Note:</b> this method does not support slices spanning across block
             /// borders.
-            /// </p>
-            ///
+            /// </para>
             /// @lucene.internal
-            ///
             /// </summary>
             // TODO: this really needs to be refactored into fieldcacheimpl
             public void Fill(BytesRef b, long start)
@@ -152,7 +148,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Returns approximate RAM bytes used </summary>
+            /// Returns approximate RAM bytes used. </summary>
             public long RamBytesUsed()
             {
                 return ((blocks != null) ? (blockSize * blocks.Length) : 0);
@@ -161,7 +157,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// 1&lt;&lt;blockBits must be bigger than biggest single
-        ///  BytesRef slice that will be pulled
+        /// <see cref="BytesRef"/> slice that will be pulled.
         /// </summary>
         public PagedBytes(int blockBits)
         {
@@ -174,7 +170,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Read this many bytes from in </summary>
+        /// Read this many bytes from <paramref name="in"/>. </summary>
         public void Copy(IndexInput @in, long byteCount)
         {
             while (byteCount > 0)
@@ -207,9 +203,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copy BytesRef in, setting BytesRef out to the result.
-        /// Do not use this if you will use freeze(true).
-        /// this only supports bytes.length <= blockSize
+        /// Copy <see cref="BytesRef"/> in, setting <see cref="BytesRef"/> out to the result.
+        /// Do not use this if you will use <c>Freeze(true)</c>.
+        /// This only supports <c>bytes.Length &lt;= blockSize</c>/
         /// </summary>
         public void Copy(BytesRef bytes, BytesRef @out)
         {
@@ -238,7 +234,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Commits final byte[], trimming it if necessary and if trim=true </summary>
+        /// Commits final <see cref="T:byte[]"/>, trimming it if necessary and if <paramref name="trim"/>=true. </summary>
         public Reader Freeze(bool trim)
         {
             if (frozen)
@@ -266,7 +262,7 @@ namespace Lucene.Net.Util
             return new PagedBytes.Reader(this);
         }
 
-        public long Pointer
+        public long Pointer // LUCENENET TODO: API - Change to GetPointer() (makes conversion)
         {
             get
             {
@@ -290,7 +286,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Copy bytes in, writing the length as a 1 or 2 byte
-        ///  vInt prefix.
+        /// vInt prefix.
         /// </summary>
         // TODO: this really needs to be refactored into fieldcacheimpl
         public long CopyUsingLengthPrefix(BytesRef bytes)
@@ -355,7 +351,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Returns the current byte position. </summary>
-            public long Position
+            public long Position // LUCENENET TODO: API - Change to GetPosition() (makes conversion)
             {
                 get
                 {
@@ -484,7 +480,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Return the current byte position. </summary>
-            public long Position
+            public long Position // LUCENENET TODO: API - Change to GetPosition() (makes conversion)
             {
                 get
                 {
@@ -494,8 +490,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a DataInput to read values from this
-        ///  PagedBytes instance.
+        /// Returns a <see cref="DataInput"/> to read values from this
+        /// <see cref="PagedBytes"/> instance.
         /// </summary>
         public PagedBytesDataInput GetDataInput()
         {
@@ -507,10 +503,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a DataOutput that you may use to write into
-        ///  this PagedBytes instance.  If you do this, you should
-        ///  not call the other writing methods (eg, copy);
-        ///  results are undefined.
+        /// Returns a <see cref="DataOutput"/> that you may use to write into
+        /// this <see cref="PagedBytes"/> instance.  If you do this, you should
+        /// not call the other writing methods (eg, copy);
+        /// results are undefined.
         /// </summary>
         public PagedBytesDataOutput GetDataOutput()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/PrintStreamInfoStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/PrintStreamInfoStream.cs b/src/Lucene.Net/Util/PrintStreamInfoStream.cs
index 63d0f95..455e273 100644
--- a/src/Lucene.Net/Util/PrintStreamInfoStream.cs
+++ b/src/Lucene.Net/Util/PrintStreamInfoStream.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// InfoStream implementation over a <seealso cref="PrintStream"/>
-    /// such as <code>System.out</code>.
-    ///
+    /// InfoStream implementation over a <see cref="TextWriter"/>
+    /// such as <see cref="Console.Out"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
-    public class PrintStreamInfoStream : InfoStream
+    public class PrintStreamInfoStream : InfoStream // LUCENENET TODO: API - Rename to TextWriterInfoStream
     {
         // Used for printing messages
         private static readonly AtomicInt32 MESSAGE_ID = new AtomicInt32();


[32/48] lucenenet git commit: Lucene.Net.Util.Mutable: Fixed XML documentation comments

Posted by ni...@apache.org.
Lucene.Net.Util.Mutable: Fixed XML documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d4e44981
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d4e44981
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d4e44981

Branch: refs/heads/master
Commit: d4e4498102bf93f517d8acc5b0c0491152b3376c
Parents: 9bd4dc8
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 02:34:19 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Mon Jun 5 06:16:28 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                   | 3 +--
 src/Lucene.Net/Util/Mutable/MutableValue.cs       | 2 +-
 src/Lucene.Net/Util/Mutable/MutableValueBool.cs   | 4 ++--
 src/Lucene.Net/Util/Mutable/MutableValueDate.cs   | 4 ++--
 src/Lucene.Net/Util/Mutable/MutableValueDouble.cs | 4 ++--
 src/Lucene.Net/Util/Mutable/MutableValueStr.cs    | 4 ++--
 6 files changed, 10 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d387b33..ccfa22a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,8 +52,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Util.Mutable (namespace)
-   3. Util.Packed (namespace)
+   2. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/src/Lucene.Net/Util/Mutable/MutableValue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Mutable/MutableValue.cs b/src/Lucene.Net/Util/Mutable/MutableValue.cs
index d8505e4..9cf2ebd 100644
--- a/src/Lucene.Net/Util/Mutable/MutableValue.cs
+++ b/src/Lucene.Net/Util/Mutable/MutableValue.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Util.Mutable
 
     /// <summary>
     /// Base class for all mutable values.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class MutableValue : IComparable<MutableValue>, IComparable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/src/Lucene.Net/Util/Mutable/MutableValueBool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Mutable/MutableValueBool.cs b/src/Lucene.Net/Util/Mutable/MutableValueBool.cs
index 589165a..4d09c08 100644
--- a/src/Lucene.Net/Util/Mutable/MutableValueBool.cs
+++ b/src/Lucene.Net/Util/Mutable/MutableValueBool.cs
@@ -18,8 +18,8 @@ namespace Lucene.Net.Util.Mutable
      */
 
     /// <summary>
-    /// <seealso cref="MutableValue"/> implementation of type
-    /// <code>boolean</code>.
+    /// <see cref="MutableValue"/> implementation of type
+    /// <see cref="bool"/>.
     /// </summary>
     public class MutableValueBool : MutableValue
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/src/Lucene.Net/Util/Mutable/MutableValueDate.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Mutable/MutableValueDate.cs b/src/Lucene.Net/Util/Mutable/MutableValueDate.cs
index bddab21..258fd6a 100644
--- a/src/Lucene.Net/Util/Mutable/MutableValueDate.cs
+++ b/src/Lucene.Net/Util/Mutable/MutableValueDate.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Util.Mutable
      */
 
     /// <summary>
-    /// <seealso cref="MutableValue"/> implementation of type
-    /// <seealso cref="Date"/>.
+    /// <see cref="MutableValue"/> implementation of type
+    /// <see cref="DateTime"/>.
     /// </summary>
     public class MutableValueDate : MutableValueInt64
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/src/Lucene.Net/Util/Mutable/MutableValueDouble.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Mutable/MutableValueDouble.cs b/src/Lucene.Net/Util/Mutable/MutableValueDouble.cs
index cd18edc..26770a9 100644
--- a/src/Lucene.Net/Util/Mutable/MutableValueDouble.cs
+++ b/src/Lucene.Net/Util/Mutable/MutableValueDouble.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Util.Mutable
      */
 
     /// <summary>
-    /// <seealso cref="MutableValue"/> implementation of type
-    /// <code>double</code>.
+    /// <see cref="MutableValue"/> implementation of type
+    /// <see cref="double"/>.
     /// </summary>
     public class MutableValueDouble : MutableValue
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d4e44981/src/Lucene.Net/Util/Mutable/MutableValueStr.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Mutable/MutableValueStr.cs b/src/Lucene.Net/Util/Mutable/MutableValueStr.cs
index 33665b8..7ededd1 100644
--- a/src/Lucene.Net/Util/Mutable/MutableValueStr.cs
+++ b/src/Lucene.Net/Util/Mutable/MutableValueStr.cs
@@ -18,8 +18,8 @@ namespace Lucene.Net.Util.Mutable
      */
 
     /// <summary>
-    /// <seealso cref="MutableValue"/> implementation of type
-    /// <seealso cref="String"/>.
+    /// <see cref="MutableValue"/> implementation of type
+    /// <see cref="string"/>.
     /// </summary>
     public class MutableValueStr : MutableValue
     {


[13/48] lucenenet git commit: Lucene.Net.Analysis: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Analysis: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/ef2d090d
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/ef2d090d
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/ef2d090d

Branch: refs/heads/master
Commit: ef2d090dd7a743ca0c3232c574d46d19ea8052a5
Parents: b2db531
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sat Jun 3 23:07:38 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sat Jun 3 23:07:38 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net/Analysis/Analyzer.cs               | 14 ++++++++------
 src/Lucene.Net/Analysis/AnalyzerWrapper.cs        | 14 +++++++-------
 src/Lucene.Net/Analysis/NumericTokenStream.cs     |  2 +-
 src/Lucene.Net/Analysis/Token.cs                  | 12 ++++++------
 .../TokenAttributes/ICharTermAttribute.cs         | 18 +++++++++---------
 src/Lucene.Net/Analysis/TokenFilter.cs            |  2 +-
 src/Lucene.Net/Analysis/Tokenizer.cs              |  2 +-
 7 files changed, 33 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/Analyzer.cs b/src/Lucene.Net/Analysis/Analyzer.cs
index 80e5ffb..a768d05 100644
--- a/src/Lucene.Net/Analysis/Analyzer.cs
+++ b/src/Lucene.Net/Analysis/Analyzer.cs
@@ -281,7 +281,7 @@ namespace Lucene.Net.Analysis
         /// </summary>
         /// <param name="fieldName">the name of the field the created <see cref="Analysis.TokenStream"/> is used for</param>
         /// <param name="text">the <see cref="string"/> the streams source reads from </param>
-        /// <returns><see cref="Analysis.TokenStream"/> for iterating the analyzed content of <paramref name="reader"/></returns>
+        /// <returns><see cref="Analysis.TokenStream"/> for iterating the analyzed content of <c>reader</c></returns>
         /// <exception cref="ObjectDisposedException"> if the Analyzer is disposed. </exception>
         /// <exception cref="IOException"> if an i/o error occurs (may rarely happen for strings). </exception>
         /// <seealso cref="GetTokenStream(string, TextReader)"/>
@@ -425,11 +425,10 @@ namespace Lucene.Net.Analysis
         /// </summary>
         [Obsolete("this implementation class will be hidden in Lucene 5.0. Use Analyzer.PER_FIELD_REUSE_STRATEGY instead!")]
         public class PerFieldReuseStrategy : ReuseStrategy
-
-        /// <summary>
-        /// Sole constructor. (For invocation by subclass constructors, typically implicit.)
-        /// </summary>
         {
+            /// <summary>
+            /// Sole constructor. (For invocation by subclass constructors, typically implicit.)
+            /// </summary>
             [Obsolete("Don't create instances of this class, use Analyzer.PER_FIELD_REUSE_STRATEGY")]
             public PerFieldReuseStrategy()
             {
@@ -463,7 +462,8 @@ namespace Lucene.Net.Analysis
         /// LUCENENET specific helper class to mimick Java's ability to create anonymous classes.
         /// Clearly, the design of <see cref="Analyzer"/> took this feature of Java into consideration.
         /// Since it doesn't exist in .NET, we can use a delegate method to call the constructor of
-        /// this concrete instance to fake it (by calling <see cref="Analyzer.NewAnonymous"/>).
+        /// this concrete instance to fake it (by calling an overload of 
+        /// <see cref="Analyzer.NewAnonymous(Func{string, TextReader, TokenStreamComponents})"/>).
         /// </summary>
         private class AnonymousAnalyzer : Analyzer
         {
@@ -603,6 +603,7 @@ namespace Lucene.Net.Analysis
         /// Stores the given <see cref="TokenStreamComponents"/> as the reusable components for the
         /// field with the give name.
         /// </summary>
+        /// <param name="analyzer"> Analyzer </param>
         /// <param name="fieldName"> Name of the field whose <see cref="TokenStreamComponents"/> are being set </param>
         /// <param name="components"> <see cref="TokenStreamComponents"/> which are to be reused for the field </param>
         public abstract void SetReusableComponents(Analyzer analyzer, string fieldName, TokenStreamComponents components);
@@ -624,6 +625,7 @@ namespace Lucene.Net.Analysis
         /// <summary>
         /// Sets the stored value.
         /// </summary>
+        /// <param name="analyzer"> Analyzer </param>
         /// <param name="storedValue"> Value to store </param>
         /// <exception cref="ObjectDisposedException"> if the <see cref="Analyzer"/> is closed. </exception>
         protected internal void SetStoredValue(Analyzer analyzer, object storedValue)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/AnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/AnalyzerWrapper.cs b/src/Lucene.Net/Analysis/AnalyzerWrapper.cs
index de40185..5b41ed2 100644
--- a/src/Lucene.Net/Analysis/AnalyzerWrapper.cs
+++ b/src/Lucene.Net/Analysis/AnalyzerWrapper.cs
@@ -27,14 +27,14 @@ namespace Lucene.Net.Analysis
     /// <see cref="GetWrappedAnalyzer(string)"/> allows the <see cref="Analyzer"/>
     /// to wrap multiple <see cref="Analyzer"/>s which are selected on a per field basis.
     /// <para/>
-    /// <see cref="WrapComponents(string, Analyzer.TokenStreamComponents)"/> allows the
-    /// <see cref="Analyzer.TokenStreamComponents"/> of the wrapped <see cref="Analyzer"/> to then be wrapped
-    /// (such as adding a new <see cref="TokenFilter"/> to form new <see cref="Analyzer.TokenStreamComponents"/>).
+    /// <see cref="WrapComponents(string, TokenStreamComponents)"/> allows the
+    /// <see cref="TokenStreamComponents"/> of the wrapped <see cref="Analyzer"/> to then be wrapped
+    /// (such as adding a new <see cref="TokenFilter"/> to form new <see cref="TokenStreamComponents"/>).
     /// </summary>
     public abstract class AnalyzerWrapper : Analyzer
     {
         /// <summary>
-        /// Creates a new <see cref="AnalyzerWrapper"/>.  Since the <see cref="Analyzer.ReuseStrategy"/> of
+        /// Creates a new <see cref="AnalyzerWrapper"/>.  Since the <see cref="ReuseStrategy"/> of
         /// the wrapped <see cref="Analyzer"/>s are unknown, <see cref="Analyzer.PER_FIELD_REUSE_STRATEGY"/> is assumed.
         /// </summary>
         [Obsolete("Use AnalyzerWrapper(Analyzer.ReuseStrategy) and specify a valid Analyzer.ReuseStrategy, probably retrieved from the wrapped analyzer using Analyzer.Strategy.")]
@@ -66,7 +66,7 @@ namespace Lucene.Net.Analysis
         protected abstract Analyzer GetWrappedAnalyzer(string fieldName);
 
         /// <summary>
-        /// Wraps / alters the given <see cref="Analyzer.TokenStreamComponents"/>, taken from the wrapped
+        /// Wraps / alters the given <see cref="TokenStreamComponents"/>, taken from the wrapped
         /// <see cref="Analyzer"/>, to form new components. It is through this method that new
         /// <see cref="TokenFilter"/>s can be added by <see cref="AnalyzerWrapper"/>s. By default, the given
         /// components are returned.
@@ -74,8 +74,8 @@ namespace Lucene.Net.Analysis
         /// <param name="fieldName">
         ///          Name of the field which is to be analyzed </param>
         /// <param name="components">
-        ///          <see cref="Analyzer.TokenStreamComponents"/> taken from the wrapped <see cref="Analyzer"/> </param>
-        /// <returns> Wrapped / altered <see cref="Analyzer.TokenStreamComponents"/>. </returns>
+        ///          <see cref="TokenStreamComponents"/> taken from the wrapped <see cref="Analyzer"/> </param>
+        /// <returns> Wrapped / altered <see cref="TokenStreamComponents"/>. </returns>
         protected virtual TokenStreamComponents WrapComponents(string fieldName, TokenStreamComponents components)
         {
             return components;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/NumericTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/NumericTokenStream.cs b/src/Lucene.Net/Analysis/NumericTokenStream.cs
index 8615738..e616ada 100644
--- a/src/Lucene.Net/Analysis/NumericTokenStream.cs
+++ b/src/Lucene.Net/Analysis/NumericTokenStream.cs
@@ -287,7 +287,7 @@ namespace Lucene.Net.Analysis
         /// <para/>
         /// NOTE: This was setLongValue() in Lucene
         /// </summary>
-        /// <param name="value"> the value, for which this <see cref=""TokenStream/> should enumerate tokens. </param>
+        /// <param name="value"> the value, for which this <see cref="TokenStream"/> should enumerate tokens. </param>
         /// <returns> this instance, because of this you can use it the following way:
         /// <code>new Field(name, new NumericTokenStream(precisionStep).SetInt64Value(value))</code> </returns>
         public NumericTokenStream SetInt64Value(long value)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/Token.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/Token.cs b/src/Lucene.Net/Analysis/Token.cs
index be1938e..5f5a843 100644
--- a/src/Lucene.Net/Analysis/Token.cs
+++ b/src/Lucene.Net/Analysis/Token.cs
@@ -477,7 +477,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="CopyBuffer(char[], int, int)"/>,
+        /// <see cref="ICharTermAttribute.CopyBuffer(char[], int, int)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>
@@ -496,7 +496,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="CopyBuffer(char[], int, int)"/>,
+        /// <see cref="ICharTermAttribute.CopyBuffer(char[], int, int)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) on <see cref="TypeAttribute.DEFAULT_TYPE"/> </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>
@@ -513,7 +513,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="Append(string)"/>,
+        /// <see cref="ICharTermAttribute.Append(string)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>
@@ -530,7 +530,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="Append(string, int, int)"/>,
+        /// <see cref="ICharTermAttribute.Append(string, int, int)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>
@@ -547,7 +547,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="Append(string)"/>,
+        /// <see cref="ICharTermAttribute.Append(string)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) on <see cref="TypeAttribute.DEFAULT_TYPE"/> </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>
@@ -564,7 +564,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// Shorthand for calling <see cref="Clear"/>,
-        /// <see cref="Append(string, int, int)"/>,
+        /// <see cref="ICharTermAttribute.Append(string, int, int)"/>,
         /// <see cref="SetOffset"/>,
         /// <see cref="Type"/> (set) on <see cref="TypeAttribute.DEFAULT_TYPE"/> </summary>
         /// <returns> this <see cref="Token"/> instance  </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/TokenAttributes/ICharTermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/TokenAttributes/ICharTermAttribute.cs b/src/Lucene.Net/Analysis/TokenAttributes/ICharTermAttribute.cs
index 7985e55..e41404b 100644
--- a/src/Lucene.Net/Analysis/TokenAttributes/ICharTermAttribute.cs
+++ b/src/Lucene.Net/Analysis/TokenAttributes/ICharTermAttribute.cs
@@ -124,30 +124,30 @@ namespace Lucene.Net.Analysis.TokenAttributes
         ICharTermAttribute Append(char c);
 
         /// <summary>
-        /// Appends the contents of the <see cref="char[]"/> array to this character sequence.
+        /// Appends the contents of the <see cref="T:char[]"/> array to this character sequence.
         /// <para>
-        /// The characters of the <see cref="char[]"/> argument are appended, in order, increasing the length of
+        /// The characters of the <see cref="T:char[]"/> argument are appended, in order, increasing the length of
         /// this sequence by the length of the argument. If argument is <c>null</c>, then the four
         /// characters <c>"null"</c> are appended.
         /// </para>
         /// </summary>
-        /// <param name="csq">The <see cref="char[]"/> array to append.</param>
+        /// <param name="csq">The <see cref="T:char[]"/> array to append.</param>
         /// <remarks>
         /// LUCENENET specific method, added to simulate using the CharBuffer class in Java.
         /// </remarks>
         ICharTermAttribute Append(char[] csq);
 
         /// <summary>
-        /// Appends the contents of the <see cref="char[]"/> array to this character sequence, beginning and ending
+        /// Appends the contents of the <see cref="T:char[]"/> array to this character sequence, beginning and ending
         /// at the specified indices.
         /// <para>
-        /// The characters of the <see cref="char[]"/> argument are appended, in order, increasing the length of
+        /// The characters of the <see cref="T:char[]"/> argument are appended, in order, increasing the length of
         /// this sequence by the length of the argument. If argument is <c>null</c>, then the four
         /// characters <c>"null"</c> are appended.
         /// </para>
         /// </summary>
-        /// <param name="csq">The <see cref="char[]"/> array to append.</param>
-        /// <param name="start">The start index of the <see cref="char[]"/> to begin copying characters.</param>
+        /// <param name="csq">The <see cref="T:char[]"/> array to append.</param>
+        /// <param name="start">The start index of the <see cref="T:char[]"/> to begin copying characters.</param>
         /// <param name="end">The index of the character following the last character in the subsequence.</param>
         /// <remarks>
         /// LUCENENET specific method, added to simulate using the CharBuffer class in Java. Note that
@@ -201,7 +201,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
         /// <summary>
         /// Appends the specified <see cref="StringBuilder"/> to this character sequence.
-        /// <p>The characters of the <see cref="StringBuilder"/> argument are appended, in order, increasing the length of
+        /// <para/>The characters of the <see cref="StringBuilder"/> argument are appended, in order, increasing the length of
         /// this sequence by the length of the argument. If argument is <c>null</c>, then the four
         /// characters <c>"null"</c> are appended.
         /// </summary>
@@ -213,7 +213,7 @@ namespace Lucene.Net.Analysis.TokenAttributes
 
         /// <summary>
         /// Appends the contents of the other <see cref="ICharTermAttribute"/> to this character sequence.
-        /// <p>The characters of the <see cref="ICharTermAttribute"/> argument are appended, in order, increasing the length of
+        /// <para/>The characters of the <see cref="ICharTermAttribute"/> argument are appended, in order, increasing the length of
         /// this sequence by the length of the argument. If argument is <c>null</c>, then the four
         /// characters <c>"null"</c> are appended.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/TokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/TokenFilter.cs b/src/Lucene.Net/Analysis/TokenFilter.cs
index bd7e0b2..f795d33 100644
--- a/src/Lucene.Net/Analysis/TokenFilter.cs
+++ b/src/Lucene.Net/Analysis/TokenFilter.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis
 
         /// <summary>
         /// This method is called by the consumer after the last token has been
-        /// consumed, after <see cref="IncrementToken()"/> returned <c>false</c>
+        /// consumed, after <see cref="TokenStream.IncrementToken()"/> returned <c>false</c>
         /// (using the new <see cref="TokenStream"/> API). Streams implementing the old API
         /// should upgrade to use this feature.
         /// <para/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ef2d090d/src/Lucene.Net/Analysis/Tokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Analysis/Tokenizer.cs b/src/Lucene.Net/Analysis/Tokenizer.cs
index 0d2bd8c..9d9d67b 100644
--- a/src/Lucene.Net/Analysis/Tokenizer.cs
+++ b/src/Lucene.Net/Analysis/Tokenizer.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis
     /// <summary>
     /// A <see cref="Tokenizer"/> is a <see cref="TokenStream"/> whose input is a <see cref="TextReader"/>.
     /// <para/>
-    /// This is an abstract class; subclasses must override <seealso cref="#IncrementToken()"/>
+    /// This is an abstract class; subclasses must override <see cref="TokenStream.IncrementToken()"/>
     /// <para/>
     /// NOTE: Subclasses overriding <see cref="TokenStream.IncrementToken()"/> must
     /// call <see cref="Util.AttributeSource.ClearAttributes()"/> before


[09/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCacheRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCacheRangeFilter.cs b/src/Lucene.Net/Search/FieldCacheRangeFilter.cs
index 091a60f..405b0a4 100644
--- a/src/Lucene.Net/Search/FieldCacheRangeFilter.cs
+++ b/src/Lucene.Net/Search/FieldCacheRangeFilter.cs
@@ -28,34 +28,34 @@ namespace Lucene.Net.Search
     using SortedDocValues = Lucene.Net.Index.SortedDocValues;
 
     /// <summary>
-    /// A range filter built on top of a cached single term field (in <seealso cref="IFieldCache"/>).
+    /// A range filter built on top of a cached single term field (in <see cref="IFieldCache"/>).
     ///
-    /// <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used.
-    /// Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache,
+    /// <para/><see cref="FieldCacheRangeFilter"/> builds a single cache for the field the first time it is used.
+    /// Each subsequent <see cref="FieldCacheRangeFilter"/> on the same field then reuses this cache,
     /// even if the range itself changes.
     ///
-    /// <p>this means that {@code FieldCacheRangeFilter} is much faster (sometimes more than 100x as fast)
-    /// as building a <seealso cref="TermRangeFilter"/>, if using a <seealso cref="#newStringRange"/>.
+    /// <para/>this means that <see cref="FieldCacheRangeFilter"/> is much faster (sometimes more than 100x as fast)
+    /// as building a <see cref="TermRangeFilter"/>, if using a <see cref="NewStringRange(string, string, string, bool, bool)"/>.
     /// However, if the range never changes it is slower (around 2x as slow) than building
-    /// a CachingWrapperFilter on top of a single <seealso cref="TermRangeFilter"/>.
+    /// a <see cref="CachingWrapperFilter"/> on top of a single <see cref="TermRangeFilter"/>.
     ///
-    /// For numeric data types, this filter may be significantly faster than <seealso cref="NumericRangeFilter"/>.
+    /// <para/>For numeric data types, this filter may be significantly faster than <see cref="NumericRangeFilter"/>.
     /// Furthermore, it does not need the numeric values encoded
-    /// by <seealso cref="IntField"/>, <seealso cref="FloatField"/>, {@link
-    /// LongField} or <seealso cref="DoubleField"/>. But
+    /// by <see cref="Documents.Int32Field"/>, <see cref="Documents.SingleField"/>,
+    /// <see cref="Documents.Int64Field"/> or <see cref="Documents.DoubleField"/>. But
     /// it has the problem that it only works with exact one value/document (see below).
     ///
-    /// <p>As with all <seealso cref="IFieldCache"/> based functionality, {@code FieldCacheRangeFilter} is only valid for
-    /// fields which exact one term for each document (except for <seealso cref="#newStringRange"/>
-    /// where 0 terms are also allowed). Due to a restriction of <seealso cref="IFieldCache"/>, for numeric ranges
+    /// <para/>As with all <see cref="IFieldCache"/> based functionality, <see cref="FieldCacheRangeFilter"/> is only valid for
+    /// fields which exact one term for each document (except for <see cref="NewStringRange(string, string, string, bool, bool)"/>
+    /// where 0 terms are also allowed). Due to a restriction of <see cref="IFieldCache"/>, for numeric ranges
     /// all terms that do not have a numeric value, 0 is assumed.
     ///
-    /// <p>Thus it works on dates, prices and other single value fields but will not work on
-    /// regular text fields. It is preferable to use a <code>NOT_ANALYZED</code> field to ensure that
+    /// <para/>Thus it works on dates, prices and other single value fields but will not work on
+    /// regular text fields. It is preferable to use a <see cref="Documents.Field.Index.NOT_ANALYZED"/> field to ensure that
     /// there is only a single term.
     ///
-    /// <p>this class does not have an constructor, use one of the static factory methods available,
-    /// that create a correct instance for different data types supported by <seealso cref="IFieldCache"/>.
+    /// <para/>This class does not have an constructor, use one of the static factory methods available,
+    /// that create a correct instance for different data types supported by <see cref="IFieldCache"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -683,12 +683,12 @@ namespace Lucene.Net.Search
             }
         }
 
-        //The functions
+        //The functions (Starting on line 84 in Lucene)
 
         /// <summary>
-        /// Creates a string range filter using <seealso cref="IFieldCache#getTermsIndex"/>. this works with all
+        /// Creates a string range filter using <see cref="IFieldCache.GetTermsIndex(Index.AtomicReader, string, float)"/>. This works with all
         /// fields containing zero or one term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// of the values to <c>null</c>.
         /// </summary>
         public static FieldCacheRangeFilter<string> NewStringRange(string field, string lowerVal, string upperVal, bool includeLower, bool includeUpper)
         {
@@ -696,9 +696,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a BytesRef range filter using <seealso cref="IFieldCache#getTermsIndex"/>. this works with all
+        /// Creates a <see cref="BytesRef"/> range filter using <see cref="IFieldCache.GetTermsIndex(Index.AtomicReader, string, float)"/>. This works with all
         /// fields containing zero or one term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// of the values to <c>null</c>.
         /// </summary>
         // TODO: bogus that newStringRange doesnt share this code... generics hell
         public static FieldCacheRangeFilter<BytesRef> NewBytesRefRange(string field, BytesRef lowerVal, BytesRef upperVal, bool includeLower, bool includeUpper)
@@ -707,9 +707,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getBytes(AtomicReader,String,boolean)"/>. this works with all
-        /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetBytes(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="byte"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// </summary>
         [Obsolete, CLSCompliant(false)] // LUCENENET NOTE: marking non-CLS compliant because it is sbyte, but obsolete anyway
         public static FieldCacheRangeFilter<sbyte?> NewByteRange(string field, sbyte? lowerVal, sbyte? upperVal, bool includeLower, bool includeUpper)
@@ -718,9 +718,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getBytes(AtomicReader,String,FieldCache.ByteParser,boolean)"/>. this works with all
-        /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetBytes(Index.AtomicReader,string,FieldCache.IByteParser,bool)"/>. This works with all
+        /// <see cref="byte"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// </summary>
         [Obsolete, CLSCompliant(false)]  // LUCENENET NOTE: marking non-CLS compliant because it is sbyte, but obsolete anyway
         public static FieldCacheRangeFilter<sbyte?> NewByteRange(string field, FieldCache.IByteParser parser, sbyte? lowerVal, sbyte? upperVal, bool includeLower, bool includeUpper)
@@ -729,9 +729,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getShorts(AtomicReader,String,boolean)"/>. this works with all
-        /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt16s(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="short"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newShortRange() in Lucene
         /// </summary>
@@ -742,9 +742,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getShorts(AtomicReader,String,FieldCache.ShortParser,boolean)"/>. this works with all
-        /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt16s(Index.AtomicReader, string, FieldCache.IInt16Parser, bool)"/>. This works with all
+        /// <see cref="short"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newShortRange() in Lucene
         /// </summary>
@@ -755,9 +755,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getInts(AtomicReader,String,boolean)"/>. this works with all
-        /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt32s(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="int"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newIntRange() in Lucene
         /// </summary>
@@ -767,9 +767,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)"/>. this works with all
-        /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt32s(Index.AtomicReader,string,FieldCache.IInt32Parser,bool)"/>. This works with all
+        /// <see cref="int"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newIntRange() in Lucene
         /// </summary>
@@ -779,9 +779,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getLongs(AtomicReader,String,boolean)"/>. this works with all
-        /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt64s(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="long"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// </summary>
         public static FieldCacheRangeFilter<long?> NewInt64Range(string field, long? lowerVal, long? upperVal, bool includeLower, bool includeUpper)
         {
@@ -789,9 +789,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)"/>. this works with all
-        /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetInt64s(Index.AtomicReader,string,FieldCache.IInt64Parser,bool)"/>. This works with all
+        /// <see cref="long"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newLongRange() in Lucene
         /// </summary>
@@ -801,9 +801,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getFloats(AtomicReader,String,boolean)"/>. this works with all
-        /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetSingles(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="float"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newFloatRange() in Lucene
         /// </summary>
@@ -813,9 +813,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)"/>. this works with all
-        /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetSingles(Index.AtomicReader,string,FieldCache.ISingleParser,bool)"/>. This works with all
+        /// <see cref="float"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// <para/>
         /// NOTE: this was newFloatRange() in Lucene
         /// </summary>
@@ -825,9 +825,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getDoubles(AtomicReader,String,boolean)"/>. this works with all
-        /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetDoubles(Index.AtomicReader,string,bool)"/>. This works with all
+        /// <see cref="double"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// </summary>
         public static FieldCacheRangeFilter<double?> NewDoubleRange(string field, double? lowerVal, double? upperVal, bool includeLower, bool includeUpper)
         {
@@ -835,9 +835,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a numeric range filter using <seealso cref="IFieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)"/>. this works with all
-        /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// Creates a numeric range filter using <see cref="IFieldCache.GetDoubles(Index.AtomicReader,string,FieldCache.IDoubleParser,bool)"/>. This works with all
+        /// <see cref="double"/> fields containing exactly one numeric term in the field. The range can be half-open by setting one
+        /// of the values to <c>null</c>.
         /// </summary>
         public static FieldCacheRangeFilter<double?> NewDoubleRange(string field, FieldCache.IDoubleParser parser, double? lowerVal, double? upperVal, bool includeLower, bool includeUpper)
         {
@@ -868,862 +868,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this method is implemented for each data type </summary>
+        /// This method is implemented for each data type </summary>
         public override abstract DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
 
-        /*
-	  /// <summary>
-	  /// Creates a string range filter using <seealso cref="FieldCache#getTermsIndex"/>. this works with all
-	  /// fields containing zero or one term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  public static FieldCacheRangeFilter<string> NewStringRange(string field, string lowerVal, string upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper(field, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper : FieldCacheRangeFilter<string>
-	  {
-		  private string Field;
-		  private string LowerVal;
-		  private string UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-		  public FieldCacheRangeFilterAnonymousInnerClassHelper(string field, string lowerVal, string upperVal, bool includeLower, bool includeUpper) : base(field, null, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			SortedDocValues fcsi = FieldCache_Fields.DEFAULT.GetTermsIndex((context.AtomicReader), Field);
-			int lowerPoint = LowerVal == null ? - 1 : fcsi.LookupTerm(new BytesRef(LowerVal));
-			int upperPoint = UpperVal == null ? - 1 : fcsi.LookupTerm(new BytesRef(UpperVal));
-
-			int inclusiveLowerPoint, inclusiveUpperPoint;
-
-			// Hints:
-			// * binarySearchLookup returns -1, if value was null.
-			// * the value is <0 if no exact hit was found, the returned value
-			//   is (-(insertion point) - 1)
-			if (lowerPoint == -1 && LowerVal == null)
-			{
-			  inclusiveLowerPoint = 0;
-			}
-			else if (IncludeLower && lowerPoint >= 0)
-			{
-			  inclusiveLowerPoint = lowerPoint;
-			}
-			else if (lowerPoint >= 0)
-			{
-			  inclusiveLowerPoint = lowerPoint + 1;
-			}
-			else
-			{
-			  inclusiveLowerPoint = Math.Max(0, -lowerPoint - 1);
-			}
-
-			if (upperPoint == -1 && UpperVal == null)
-			{
-			  inclusiveUpperPoint = int.MaxValue;
-			}
-			else if (IncludeUpper && upperPoint >= 0)
-			{
-			  inclusiveUpperPoint = upperPoint;
-			}
-			else if (upperPoint >= 0)
-			{
-			  inclusiveUpperPoint = upperPoint - 1;
-			}
-			else
-			{
-			  inclusiveUpperPoint = -upperPoint - 2;
-			}
-
-			if (inclusiveUpperPoint < 0 || inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-			Debug.Assert(inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0);
-
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper(this, context.Reader.MaxDoc, acceptDocs, fcsi, inclusiveLowerPoint, inclusiveUpperPoint);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper OuterInstance;
-
-			  private SortedDocValues Fcsi;
-			  private int InclusiveLowerPoint;
-			  private int InclusiveUpperPoint;
-
-			  public FieldCacheDocIdSetAnonymousInnerClassHelper(FieldCacheRangeFilterAnonymousInnerClassHelper outerInstance, int maxDoc, Bits acceptDocs, SortedDocValues fcsi, int inclusiveLowerPoint, int inclusiveUpperPoint) : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.Fcsi = fcsi;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-			  }
-
-			  protected internal override sealed bool MatchDoc(int doc)
-			  {
-				int docOrd = Fcsi.GetOrd(doc);
-				return docOrd >= InclusiveLowerPoint && docOrd <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a BytesRef range filter using <seealso cref="FieldCache#getTermsIndex"/>. this works with all
-	  /// fields containing zero or one term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  // TODO: bogus that newStringRange doesnt share this code... generics hell
-	  public static FieldCacheRangeFilter<BytesRef> NewBytesRefRange(string field, BytesRef lowerVal, BytesRef upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper2(field, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper2 : FieldCacheRangeFilter<BytesRef>
-	  {
-		  private string Field;
-		  private BytesRef LowerVal;
-		  private BytesRef UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-		  public FieldCacheRangeFilterAnonymousInnerClassHelper2(string field, BytesRef lowerVal, BytesRef upperVal, bool includeLower, bool includeUpper) : base(field, null, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			SortedDocValues fcsi = FieldCache_Fields.DEFAULT.GetTermsIndex((context.AtomicReader), Field);
-			int lowerPoint = LowerVal == null ? - 1 : fcsi.LookupTerm(LowerVal);
-			int upperPoint = UpperVal == null ? - 1 : fcsi.LookupTerm(UpperVal);
-
-			int inclusiveLowerPoint, inclusiveUpperPoint;
-
-			// Hints:
-			// * binarySearchLookup returns -1, if value was null.
-			// * the value is <0 if no exact hit was found, the returned value
-			//   is (-(insertion point) - 1)
-			if (lowerPoint == -1 && LowerVal == null)
-			{
-			  inclusiveLowerPoint = 0;
-			}
-			else if (IncludeLower && lowerPoint >= 0)
-			{
-			  inclusiveLowerPoint = lowerPoint;
-			}
-			else if (lowerPoint >= 0)
-			{
-			  inclusiveLowerPoint = lowerPoint + 1;
-			}
-			else
-			{
-			  inclusiveLowerPoint = Math.Max(0, -lowerPoint - 1);
-			}
-
-			if (upperPoint == -1 && UpperVal == null)
-			{
-			  inclusiveUpperPoint = int.MaxValue;
-			}
-			else if (IncludeUpper && upperPoint >= 0)
-			{
-			  inclusiveUpperPoint = upperPoint;
-			}
-			else if (upperPoint >= 0)
-			{
-			  inclusiveUpperPoint = upperPoint - 1;
-			}
-			else
-			{
-			  inclusiveUpperPoint = -upperPoint - 2;
-			}
-
-			if (inclusiveUpperPoint < 0 || inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-			Debug.Assert(inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0);
-
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper2(this, context.Reader.MaxDoc, acceptDocs, fcsi, inclusiveLowerPoint, inclusiveUpperPoint);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper2 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper2 OuterInstance;
-
-			  private SortedDocValues Fcsi;
-			  private int InclusiveLowerPoint;
-			  private int InclusiveUpperPoint;
-
-			  public FieldCacheDocIdSetAnonymousInnerClassHelper2(FieldCacheRangeFilterAnonymousInnerClassHelper2 outerInstance, int maxDoc, Bits acceptDocs, SortedDocValues fcsi, int inclusiveLowerPoint, int inclusiveUpperPoint) : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.Fcsi = fcsi;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-			  }
-
-			  protected internal override sealed bool MatchDoc(int doc)
-			  {
-				int docOrd = Fcsi.GetOrd(doc);
-				return docOrd >= InclusiveLowerPoint && docOrd <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getBytes(AtomicReader,String,boolean)"/>. this works with all
-	  /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  [Obsolete]
-	  public static FieldCacheRangeFilter<sbyte?> NewByteRange(string field, sbyte? lowerVal, sbyte? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewByteRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getBytes(AtomicReader,String,FieldCache.ByteParser,boolean)"/>. this works with all
-	  /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  [Obsolete]
-      public static FieldCacheRangeFilter<sbyte?> NewByteRange(string field, FieldCache_Fields.IByteParser parser, sbyte? lowerVal, sbyte? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper3(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper3 : FieldCacheRangeFilter<sbyte?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.IByteParser Parser;
-		  private sbyte? LowerVal;
-		  private sbyte? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper3(string field, FieldCache_Fields.IByteParser parser, sbyte? lowerVal, sbyte? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			sbyte inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  sbyte i = (sbyte)LowerVal;
-			  if (!IncludeLower && i == sbyte.MaxValue)
-			  {
-				return null;
-			  }
-			  inclusiveLowerPoint = (sbyte)(IncludeLower ? i : (i + 1));
-			}
-			else
-			{
-			  inclusiveLowerPoint = sbyte.MinValue;
-			}
-			if (UpperVal != null)
-			{
-			  sbyte i = (sbyte)UpperVal;
-			  if (!IncludeUpper && i == sbyte.MinValue)
-			  {
-				return null;
-			  }
-			  inclusiveUpperPoint = (sbyte)(IncludeUpper ? i : (i - 1));
-			}
-			else
-			{
-			  inclusiveUpperPoint = sbyte.MaxValue;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Bytes values = FieldCache_Fields.DEFAULT.GetBytes((context.AtomicReader), Field, (FieldCache_Fields.IByteParser)Parser, false);
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper3(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper3 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper3 OuterInstance;
-
-			  private sbyte InclusiveLowerPoint;
-			  private sbyte InclusiveUpperPoint;
-              private FieldCache_Fields.Bytes Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper3(FieldCacheRangeFilterAnonymousInnerClassHelper3 outerInstance, int maxDoc, Bits acceptDocs, sbyte inclusiveLowerPoint, sbyte inclusiveUpperPoint, FieldCache_Fields.Bytes values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				sbyte value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getShorts(AtomicReader,String,boolean)"/>. this works with all
-	  /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  [Obsolete]
-	  public static FieldCacheRangeFilter<short?> NewShortRange(string field, short? lowerVal, short? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewShortRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getShorts(AtomicReader,String,FieldCache.ShortParser,boolean)"/>. this works with all
-	  /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  [Obsolete]
-      public static FieldCacheRangeFilter<short?> NewShortRange(string field, FieldCache_Fields.IShortParser parser, short? lowerVal, short? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper4(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper4 : FieldCacheRangeFilter<short?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.IShortParser Parser;
-		  private short? LowerVal;
-		  private short? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper4(string field, FieldCache_Fields.IShortParser parser, short? lowerVal, short? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			short inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  short i = (short)LowerVal;
-			  if (!IncludeLower && i == short.MaxValue)
-			  {
-				return null;
-			  }
-			  inclusiveLowerPoint = (short)(IncludeLower ? i : (i + 1));
-			}
-			else
-			{
-			  inclusiveLowerPoint = short.MinValue;
-			}
-			if (UpperVal != null)
-			{
-			  short i = (short)UpperVal;
-			  if (!IncludeUpper && i == short.MinValue)
-			  {
-				return null;
-			  }
-			  inclusiveUpperPoint = (short)(IncludeUpper ? i : (i - 1));
-			}
-			else
-			{
-			  inclusiveUpperPoint = short.MaxValue;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Shorts values = FieldCache_Fields.DEFAULT.GetShorts((context.AtomicReader), Field, (FieldCache_Fields.IShortParser)Parser, false);
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper4(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper4 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper4 OuterInstance;
-
-			  private short InclusiveLowerPoint;
-			  private short InclusiveUpperPoint;
-              private FieldCache_Fields.Shorts Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper4(FieldCacheRangeFilterAnonymousInnerClassHelper4 outerInstance, int maxDoc, Bits acceptDocs, short inclusiveLowerPoint, short inclusiveUpperPoint, FieldCache_Fields.Shorts values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				short value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getInts(AtomicReader,String,boolean)"/>. this works with all
-	  /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  public static FieldCacheRangeFilter<int?> NewIntRange(string field, int? lowerVal, int? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)"/>. this works with all
-	  /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-      public static FieldCacheRangeFilter<int?> NewIntRange(string field, FieldCache_Fields.IIntParser parser, int? lowerVal, int? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper5(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper5 : FieldCacheRangeFilter<int?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.IIntParser Parser;
-		  private int? LowerVal;
-		  private int? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper5(string field, FieldCache_Fields.IIntParser parser, int? lowerVal, int? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			int inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  int i = (int)LowerVal;
-			  if (!IncludeLower && i == int.MaxValue)
-			  {
-				return null;
-			  }
-			  inclusiveLowerPoint = IncludeLower ? i : (i + 1);
-			}
-			else
-			{
-			  inclusiveLowerPoint = int.MinValue;
-			}
-			if (UpperVal != null)
-			{
-			  int i = (int)UpperVal;
-			  if (!IncludeUpper && i == int.MinValue)
-			  {
-				return null;
-			  }
-			  inclusiveUpperPoint = IncludeUpper ? i : (i - 1);
-			}
-			else
-			{
-			  inclusiveUpperPoint = int.MaxValue;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Ints values = FieldCache_Fields.DEFAULT.GetInts((context.AtomicReader), Field, (FieldCache_Fields.IIntParser)Parser, false);
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper5(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper5 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper5 OuterInstance;
-
-			  private int InclusiveLowerPoint;
-			  private int InclusiveUpperPoint;
-              private FieldCache_Fields.Ints Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper5(FieldCacheRangeFilterAnonymousInnerClassHelper5 outerInstance, int maxDoc, Bits acceptDocs, int inclusiveLowerPoint, int inclusiveUpperPoint, FieldCache_Fields.Ints values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				int value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getLongs(AtomicReader,String,boolean)"/>. this works with all
-	  /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  public static FieldCacheRangeFilter<long?> NewLongRange(string field, long? lowerVal, long? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)"/>. this works with all
-	  /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-      public static FieldCacheRangeFilter<long?> NewLongRange(string field, FieldCache_Fields.ILongParser parser, long? lowerVal, long? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper6(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper6 : FieldCacheRangeFilter<long?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.ILongParser Parser;
-		  private long? LowerVal;
-		  private long? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper6(string field, FieldCache_Fields.ILongParser parser, long? lowerVal, long? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			long inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  long i = (long)LowerVal;
-			  if (!IncludeLower && i == long.MaxValue)
-			  {
-				return null;
-			  }
-			  inclusiveLowerPoint = IncludeLower ? i : (i + 1L);
-			}
-			else
-			{
-			  inclusiveLowerPoint = long.MinValue;
-			}
-			if (UpperVal != null)
-			{
-			  long i = (long)UpperVal;
-			  if (!IncludeUpper && i == long.MinValue)
-			  {
-				return null;
-			  }
-			  inclusiveUpperPoint = IncludeUpper ? i : (i - 1L);
-			}
-			else
-			{
-			  inclusiveUpperPoint = long.MaxValue;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Longs values = FieldCache_Fields.DEFAULT.GetLongs((context.AtomicReader), Field, (FieldCache_Fields.ILongParser)Parser, false);
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper6(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper6 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper6 OuterInstance;
-
-			  private long InclusiveLowerPoint;
-			  private long InclusiveUpperPoint;
-              private FieldCache_Fields.Longs Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper6(FieldCacheRangeFilterAnonymousInnerClassHelper6 outerInstance, int maxDoc, Bits acceptDocs, long inclusiveLowerPoint, long inclusiveUpperPoint, FieldCache_Fields.Longs values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				long value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getFloats(AtomicReader,String,boolean)"/>. this works with all
-	  /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  public static FieldCacheRangeFilter<float?> NewFloatRange(string field, float? lowerVal, float? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)"/>. this works with all
-	  /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-      public static FieldCacheRangeFilter<float?> NewFloatRange(string field, FieldCache_Fields.IFloatParser parser, float? lowerVal, float? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper7(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper7 : FieldCacheRangeFilter<float?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.IFloatParser Parser;
-		  private float? LowerVal;
-		  private float? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper7(string field, FieldCache_Fields.IFloatParser parser, float? lowerVal, float? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			// we transform the floating point numbers to sortable integers
-			// using NumericUtils to easier find the next bigger/lower value
-			float inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  float f = (float)LowerVal;
-			  if (!IncludeUpper && f > 0.0f && float.IsInfinity(f))
-			  {
-				return null;
-			  }
-			  int i = NumericUtils.FloatToSortableInt(f);
-			  inclusiveLowerPoint = NumericUtils.SortableIntToFloat(IncludeLower ? i : (i + 1));
-			}
-			else
-			{
-			  inclusiveLowerPoint = float.NegativeInfinity;
-			}
-			if (UpperVal != null)
-			{
-			  float f = (float)UpperVal;
-			  if (!IncludeUpper && f < 0.0f && float.IsInfinity(f))
-			  {
-				return null;
-			  }
-			  int i = NumericUtils.FloatToSortableInt(f);
-			  inclusiveUpperPoint = NumericUtils.SortableIntToFloat(IncludeUpper ? i : (i - 1));
-			}
-			else
-			{
-			  inclusiveUpperPoint = float.PositiveInfinity;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Floats values = FieldCache_Fields.DEFAULT.GetFloats((context.AtomicReader), Field, (FieldCache_Fields.IFloatParser)Parser, false);
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper7(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper7 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper7 OuterInstance;
-
-			  private float InclusiveLowerPoint;
-			  private float InclusiveUpperPoint;
-              private FieldCache_Fields.Floats Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper7(FieldCacheRangeFilterAnonymousInnerClassHelper7 outerInstance, int maxDoc, Bits acceptDocs, float inclusiveLowerPoint, float inclusiveUpperPoint, FieldCache_Fields.Floats values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				float value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getDoubles(AtomicReader,String,boolean)"/>. this works with all
-	  /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-	  public static FieldCacheRangeFilter<double?> NewDoubleRange(string field, double? lowerVal, double? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return NewDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  /// <summary>
-	  /// Creates a numeric range filter using <seealso cref="FieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)"/>. this works with all
-	  /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
-	  /// of the values to <code>null</code>.
-	  /// </summary>
-      public static FieldCacheRangeFilter<double?> NewDoubleRange(string field, FieldCache_Fields.IDoubleParser parser, double? lowerVal, double? upperVal, bool includeLower, bool includeUpper)
-	  {
-		return new FieldCacheRangeFilterAnonymousInnerClassHelper8(field, parser, lowerVal, upperVal, includeLower, includeUpper);
-	  }
-
-	  private class FieldCacheRangeFilterAnonymousInnerClassHelper8 : FieldCacheRangeFilter<double?>
-	  {
-		  private string Field;
-          private FieldCache_Fields.IDoubleParser Parser;
-		  private double? LowerVal;
-		  private double? UpperVal;
-		  private bool IncludeLower;
-		  private bool IncludeUpper;
-
-          public FieldCacheRangeFilterAnonymousInnerClassHelper8(string field, FieldCache_Fields.IDoubleParser parser, double? lowerVal, double? upperVal, bool includeLower, bool includeUpper)
-              : base(field, parser, lowerVal, upperVal, includeLower, includeUpper)
-		  {
-			  this.Field = field;
-			  this.Parser = parser;
-			  this.LowerVal = lowerVal;
-			  this.UpperVal = upperVal;
-			  this.IncludeLower = includeLower;
-			  this.IncludeUpper = includeUpper;
-		  }
-
-		  public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs)
-		  {
-			// we transform the floating point numbers to sortable integers
-			// using NumericUtils to easier find the next bigger/lower value
-			double inclusiveLowerPoint, inclusiveUpperPoint;
-			if (LowerVal != null)
-			{
-			  double f = (double)LowerVal;
-			  if (!IncludeUpper && f > 0.0 && double.IsInfinity(f))
-			  {
-				return null;
-			  }
-			  long i = NumericUtils.DoubleToSortableLong(f);
-			  inclusiveLowerPoint = NumericUtils.SortableLongToDouble(IncludeLower ? i : (i + 1L));
-			}
-			else
-			{
-			  inclusiveLowerPoint = double.NegativeInfinity;
-			}
-			if (UpperVal != null)
-			{
-			  double f = (double)UpperVal;
-			  if (!IncludeUpper && f < 0.0 && double.IsInfinity(f))
-			  {
-				return null;
-			  }
-			  long i = NumericUtils.DoubleToSortableLong(f);
-			  inclusiveUpperPoint = NumericUtils.SortableLongToDouble(IncludeUpper ? i : (i - 1L));
-			}
-			else
-			{
-			  inclusiveUpperPoint = double.PositiveInfinity;
-			}
-
-			if (inclusiveLowerPoint > inclusiveUpperPoint)
-			{
-			  return null;
-			}
-
-            FieldCache_Fields.Doubles values = FieldCache_Fields.DEFAULT.GetDoubles((context.AtomicReader), Field, (FieldCache_Fields.IDoubleParser)Parser, false);
-			// ignore deleted docs if range doesn't contain 0
-			return new FieldCacheDocIdSetAnonymousInnerClassHelper8(this, context.Reader.MaxDoc, acceptDocs, inclusiveLowerPoint, inclusiveUpperPoint, values);
-		  }
-
-		  private class FieldCacheDocIdSetAnonymousInnerClassHelper8 : FieldCacheDocIdSet
-		  {
-			  private readonly FieldCacheRangeFilterAnonymousInnerClassHelper8 OuterInstance;
-
-			  private double InclusiveLowerPoint;
-			  private double InclusiveUpperPoint;
-              private FieldCache_Fields.Doubles Values;
-
-              public FieldCacheDocIdSetAnonymousInnerClassHelper8(FieldCacheRangeFilterAnonymousInnerClassHelper8 outerInstance, int maxDoc, Bits acceptDocs, double inclusiveLowerPoint, double inclusiveUpperPoint, FieldCache_Fields.Doubles values)
-                  : base(maxDoc, acceptDocs)
-			  {
-                  this.OuterInstance = outerInstance;
-				  this.InclusiveLowerPoint = inclusiveLowerPoint;
-				  this.InclusiveUpperPoint = inclusiveUpperPoint;
-				  this.Values = values;
-			  }
-
-			  protected internal override bool MatchDoc(int doc)
-			  {
-				double value = Values.Get(doc);
-				return value >= InclusiveLowerPoint && value <= InclusiveUpperPoint;
-			  }
-		  }
-	  }*/
 
+
+        // From line 516 in Lucene
         public override sealed string ToString()
         {
             StringBuilder sb = (new StringBuilder(field)).Append(":");
@@ -1783,14 +933,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public virtual bool IncludesLower
         {
             get { return includeLower; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public virtual bool IncludesUpper
         {
             get { return includeUpper; }
@@ -1817,7 +967,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the current numeric parser ({@code null} for {@code T} is {@code String}} </summary>
+        /// Returns the current numeric parser (<c>null</c> for <typeparamref name="T"/> is <see cref="string"/>) </summary>
         public virtual FieldCache.IParser Parser
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCacheRewriteMethod.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCacheRewriteMethod.cs b/src/Lucene.Net/Search/FieldCacheRewriteMethod.cs
index d222765..e51248b 100644
--- a/src/Lucene.Net/Search/FieldCacheRewriteMethod.cs
+++ b/src/Lucene.Net/Search/FieldCacheRewriteMethod.cs
@@ -31,9 +31,10 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// Rewrites MultiTermQueries into a filter, using the FieldCache for term enumeration.
-    /// <p>
-    /// this can be used to perform these queries against an unindexed docvalues field.
+    /// Rewrites <see cref="MultiTermQuery"/>s into a filter, using the <see cref="IFieldCache"/> for term enumeration.
+    /// <para/>
+    /// This can be used to perform these queries against an unindexed docvalues field.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -56,7 +57,7 @@ namespace Lucene.Net.Search
             protected internal readonly MultiTermQuery m_query;
 
             /// <summary>
-            /// Wrap a <seealso cref="MultiTermQuery"/> as a Filter.
+            /// Wrap a <see cref="MultiTermQuery"/> as a Filter.
             /// </summary>
             protected internal MultiTermQueryFieldCacheWrapperFilter(MultiTermQuery query)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCacheTermsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCacheTermsFilter.cs b/src/Lucene.Net/Search/FieldCacheTermsFilter.cs
index 24c018c..9998866 100644
--- a/src/Lucene.Net/Search/FieldCacheTermsFilter.cs
+++ b/src/Lucene.Net/Search/FieldCacheTermsFilter.cs
@@ -26,71 +26,71 @@ namespace Lucene.Net.Search
     using SortedDocValues = Lucene.Net.Index.SortedDocValues;
 
     /// <summary>
-    /// A <seealso cref="Filter"/> that only accepts documents whose single
+    /// A <see cref="Filter"/> that only accepts documents whose single
     /// term value in the specified field is contained in the
     /// provided set of allowed terms.
     ///
-    /// <p/>
+    /// <para/>
     ///
-    /// this is the same functionality as TermsFilter (from
+    /// This is the same functionality as TermsFilter (from
     /// queries/), except this filter requires that the
     /// field contains only a single term for all documents.
     /// Because of drastically different implementations, they
     /// also have different performance characteristics, as
     /// described below.
     ///
-    /// <p/>
+    /// <para/>
     ///
     /// The first invocation of this filter on a given field will
-    /// be slower, since a <seealso cref="SortedDocValues"/> must be
+    /// be slower, since a <see cref="SortedDocValues"/> must be
     /// created.  Subsequent invocations using the same field
     /// will re-use this cache.  However, as with all
-    /// functionality based on <seealso cref="FieldCache"/>, persistent RAM
+    /// functionality based on <see cref="IFieldCache"/>, persistent RAM
     /// is consumed to hold the cache, and is not freed until the
-    /// <seealso cref="IndexReader"/> is closed.  In contrast, TermsFilter
+    /// <see cref="Index.IndexReader"/> is disposed.  In contrast, TermsFilter
     /// has no persistent RAM consumption.
     ///
     ///
-    /// <p/>
+    /// <para/>
     ///
     /// With each search, this filter translates the specified
-    /// set of Terms into a private <seealso cref="FixedBitSet"/> keyed by
-    /// term number per unique <seealso cref="IndexReader"/> (normally one
+    /// set of <see cref="Index.Terms"/> into a private <see cref="FixedBitSet"/> keyed by
+    /// term number per unique <see cref="Index.IndexReader"/> (normally one
     /// reader per segment).  Then, during matching, the term
     /// number for each docID is retrieved from the cache and
-    /// then checked for inclusion using the <seealso cref="FixedBitSet"/>.
+    /// then checked for inclusion using the <see cref="FixedBitSet"/>.
     /// Since all testing is done using RAM resident data
     /// structures, performance should be very fast, most likely
     /// fast enough to not require further caching of the
-    /// DocIdSet for each possible combination of terms.
+    /// <see cref="DocIdSet"/> for each possible combination of terms.
     /// However, because docIDs are simply scanned linearly, an
     /// index with a great many small documents may find this
     /// linear scan too costly.
     ///
-    /// <p/>
+    /// <para/>
     ///
-    /// In contrast, TermsFilter builds up an <seealso cref="FixedBitSet"/>,
+    /// In contrast, TermsFilter builds up a <see cref="FixedBitSet"/>,
     /// keyed by docID, every time it's created, by enumerating
-    /// through all matching docs using <seealso cref="DocsEnum"/> to seek
+    /// through all matching docs using <see cref="Index.DocsEnum"/> to seek
     /// and scan through each term's docID list.  While there is
     /// no linear scan of all docIDs, besides the allocation of
-    /// the underlying array in the <seealso cref="FixedBitSet"/>, this
+    /// the underlying array in the <see cref="FixedBitSet"/>, this
     /// approach requires a number of "disk seeks" in proportion
     /// to the number of terms, which can be exceptionally costly
     /// when there are cache misses in the OS's IO cache.
     ///
-    /// <p/>
+    /// <para/>
     ///
     /// Generally, this filter will be slower on the first
     /// invocation for a given field, but subsequent invocations,
-    /// even if you change the allowed set of Terms, should be
+    /// even if you change the allowed set of <see cref="Index.Terms"/>, should be
     /// faster than TermsFilter, especially as the number of
-    /// Terms being matched increases.  If you are matching only
+    /// <see cref="Index.Terms"/> being matched increases.  If you are matching only
     /// a very small number of terms, and those terms in turn
     /// match a very small number of documents, TermsFilter may
     /// perform faster.
     ///
-    /// <p/>
+    /// <para/>
     ///
     /// Which filter is best is very application dependent.
     /// </summary>


[19/48] lucenenet git commit: Lucene.Net.Analysis.Stempel: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Analysis.Stempel: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/bed2088e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/bed2088e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/bed2088e

Branch: refs/heads/master
Commit: bed2088e2d2f8c9a79075ea23e29e6b6d5afb2ab
Parents: 6bde1ef
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:59:52 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:59:52 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/bed2088e/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs b/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs
index aba1488..4de2098 100644
--- a/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs
@@ -141,11 +141,11 @@ namespace Lucene.Net.Analysis.Pl
         }
 
         /// <summary>
-        /// Creates a <see cref="Analyzer.TokenStreamComponents"/>
+        /// Creates a <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns>
-        /// A <see cref="Analyzer.TokenStreamComponents"/> built from an <see cref="StandardTokenizer"/>
+        /// A <see cref="TokenStreamComponents"/> built from an <see cref="StandardTokenizer"/>
         /// filtered with <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, 
         /// <see cref="SetKeywordMarkerFilter"/> if a stem excusion set is provided and <see cref="StempelFilter"/>.
         /// </returns>


[17/48] lucenenet git commit: Lucene.Net.Suggest: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Suggest: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/864dcf73
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/864dcf73
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/864dcf73

Branch: refs/heads/master
Commit: 864dcf735776cc48d24e519488469e8be90f4da3
Parents: 93eef42
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:46:46 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:46:46 2017 +0700

----------------------------------------------------------------------
 .../Suggest/Analyzing/AnalyzingInfixSuggester.cs        |  4 ++--
 .../Suggest/Analyzing/FreeTextSuggester.cs              |  6 +++---
 .../Suggest/Jaspell/JaspellTernarySearchTrie.cs         | 12 ++++++------
 3 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/864dcf73/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
index 344628f..edd912e 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/AnalyzingInfixSuggester.cs
@@ -292,7 +292,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
         /// instead if you want to replace a previous suggestion.
         /// After adding or updating a batch of new suggestions,
         /// you must call <see cref="Refresh()"/> in the end in order to
-        /// see the suggestions in <see cref="DoLookup"/> 
+        /// see the suggestions in <see cref="DoLookup(string, IEnumerable{BytesRef}, int, bool, bool)"/> 
         /// </summary>
         public virtual void Add(BytesRef text, IEnumerable<BytesRef> contexts, long weight, BytesRef payload)
         {
@@ -306,7 +306,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
         /// this text is not already present you can use <see cref="Add"/> 
         /// instead.  After adding or updating a batch of
         /// new suggestions, you must call <see cref="Refresh()"/> in the
-        /// end in order to see the suggestions in <see cref="DoLookup"/> 
+        /// end in order to see the suggestions in <see cref="DoLookup(string, IEnumerable{BytesRef}, int, bool, bool)"/> 
         /// </summary>
         public virtual void Update(BytesRef text, IEnumerable<BytesRef> contexts, long weight, BytesRef payload)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/864dcf73/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
index a28007f..aeeb8be 100644
--- a/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Analyzing/FreeTextSuggester.cs
@@ -39,9 +39,9 @@ namespace Lucene.Net.Search.Suggest.Analyzing
     //   - add pruning of low-freq ngrams?   
 
     /// <summary>
-    /// Builds an ngram model from the text sent to <see cref="Build"/>
+    /// Builds an ngram model from the text sent to <see cref="Build(IInputIterator, double)"/>
     /// and predicts based on the last grams-1 tokens in
-    /// the request sent to <see cref="DoLookup"/>.  This tries to
+    /// the request sent to <see cref="DoLookup(string, IEnumerable{BytesRef}, bool, int)"/>.  This tries to
     /// handle the "long tail" of suggestions for when the
     /// incoming query is a never before seen query string.
     /// 
@@ -61,7 +61,7 @@ namespace Lucene.Net.Search.Suggest.Analyzing
     /// "Large language models in machine translation"</a> for details.
     /// 
     /// </para>
-    /// <para> From <see cref="DoLookup"/>, the key of each result is the
+    /// <para> From <see cref="DoLookup(string, IEnumerable{BytesRef}, bool, int)"/>, the key of each result is the
     /// ngram token; the value is <see cref="long.MaxValue"/> * score (fixed
     /// point, cast to long).  Divide by <see cref="long.MaxValue"/> to get
     /// the score back, which ranges from 0.0 to 1.0.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/864dcf73/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellTernarySearchTrie.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellTernarySearchTrie.cs b/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellTernarySearchTrie.cs
index 29e5276..7ed60d1 100644
--- a/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellTernarySearchTrie.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellTernarySearchTrie.cs
@@ -168,13 +168,13 @@ namespace Lucene.Net.Search.Suggest.Jaspell
         */
 
         /// <summary>
-        /// The default number of values returned by the <see cref="MatchAlmost"/>
+        /// The default number of values returned by the <see cref="MatchAlmost(string, int)"/>
         /// method.
         /// </summary>
         private int defaultNumReturnValues = -1;
 
         /// <summary>
-        /// the number of differences allowed in a call to the <see cref="MatchAlmost"/>
+        /// the number of differences allowed in a call to the <see cref="MatchAlmost(string, int)"/>
         /// <c>key</c>.
         /// </summary>
         private int matchAlmostDiff;
@@ -680,7 +680,7 @@ namespace Lucene.Net.Search.Suggest.Jaspell
         /// target key, where diff is equal to the last value set
         /// to the <see cref="MatchAlmostDiff"/> property.
         /// <para>
-        /// If the <see cref="MatchAlmost"/> method is called before the
+        /// If the <see cref="MatchAlmost(string, int)"/> method is called before the
         /// <see cref="MatchAlmostDiff"/> property has been called for the first time,
         /// then diff = 0.
         /// 
@@ -700,7 +700,7 @@ namespace Lucene.Net.Search.Suggest.Jaspell
         /// target key, where diff is equal to the last value set
         /// to the <see cref="MatchAlmostDiff"/> property.
         /// <para>
-        /// If the <see cref="MatchAlmost"/> method is called before the
+        /// If the <see cref="MatchAlmost(string, int)"/> method is called before the
         /// <see cref="MatchAlmostDiff"/> property has been called for the first time,
         /// then diff = 0.
         /// 
@@ -899,7 +899,7 @@ namespace Lucene.Net.Search.Suggest.Jaspell
 
         /// <summary>
         /// Sets the number of characters by which words can differ from target word
-        /// when calling the <see cref="MatchAlmost"/> method.
+        /// when calling the <see cref="MatchAlmost(string, int)"/> method.
         /// <para>
         /// Arguments less than 0 will set the char difference to 0, and arguments
         /// greater than 3 will set the char difference to 3.
@@ -931,7 +931,7 @@ namespace Lucene.Net.Search.Suggest.Jaspell
 
         /// <summary>
         /// Sets the default maximum number of values returned from the
-        /// <see cref="MatchPrefix"/> and <see cref="MatchAlmost"/> methods.
+        /// <see cref="MatchPrefix(string, int)"/> and <see cref="MatchAlmost(string, int)"/> methods.
         /// <para>
         /// The value should be set this to -1 to get an unlimited number of return
         /// values. note that the methods mentioned above provide overloaded versions


[34/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML documentation comments (excluding sub-namespaces)

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldsProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldsProducer.cs b/src/Lucene.Net/Codecs/FieldsProducer.cs
index d2b8f8a..5b9067b 100644
--- a/src/Lucene.Net/Codecs/FieldsProducer.cs
+++ b/src/Lucene.Net/Codecs/FieldsProducer.cs
@@ -23,39 +23,45 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Abstract API that produces terms, doc, freq, prox, offset and
-    ///  payloads postings.
-    ///
+    /// payloads postings.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class FieldsProducer : Fields, IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal FieldsProducer()
         {
         }
 
         // LUCENENET specific - implementing proper dispose pattern
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public abstract long RamBytesUsed();
 
         /// <summary>
         /// Checks consistency of this reader.
-        /// <p>
+        /// <para/>
         /// Note that this may be costly in terms of I/O, e.g.
         /// may involve computing a checksum value against large data files.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract void CheckIntegrity();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FilterCodec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FilterCodec.cs b/src/Lucene.Net/Codecs/FilterCodec.cs
index 81168f9..709545f 100644
--- a/src/Lucene.Net/Codecs/FilterCodec.cs
+++ b/src/Lucene.Net/Codecs/FilterCodec.cs
@@ -19,30 +19,32 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// A codec that forwards all its method calls to another codec.
-    /// <p>
+    /// <para/>
     /// Extend this class when you need to reuse the functionality of an existing
     /// codec. For example, if you want to build a codec that redefines Lucene46's
-    /// <seealso cref="LiveDocsFormat"/>:
-    /// <pre class="prettyprint">
-    ///   public final class CustomCodec extends FilterCodec {
+    /// <see cref="Codecs.LiveDocsFormat"/>:
+    /// <code>
+    ///     public sealed class CustomCodec : FilterCodec 
+    ///     {
+    ///         public CustomCodec()
+    ///             : base("CustomCodec", new Lucene46Codec())
+    ///         {
+    ///         }
     ///
-    ///     public CustomCodec() {
-    ///       super("CustomCodec", new Lucene46Codec());
+    ///         public override LiveDocsFormat LiveDocsFormat 
+    ///         {
+    ///             get { return new CustomLiveDocsFormat(); }
+    ///         }
     ///     }
+    /// </code>
     ///
-    ///     public LiveDocsFormat liveDocsFormat() {
-    ///       return new CustomLiveDocsFormat();
-    ///     }
-    ///
-    ///   }
-    /// </pre>
-    ///
-    /// <p><em>Please note:</em> Don't call <seealso cref="Codec#forName"/> from
-    /// the no-arg constructor of your own codec. When the SPI framework
-    /// loads your own Codec as SPI component, SPI has not yet fully initialized!
-    /// If you want to extend another Codec, instantiate it directly by calling
+    /// <para/>
+    /// <em>Please note:</em> Don't call <see cref="Codec.ForName(string)"/> from
+    /// the no-arg constructor of your own codec. When the <see cref="DefaultCodecFactory"/>
+    /// loads your own <see cref="Codec"/>, the <see cref="DefaultCodecFactory"/> has not yet fully initialized!
+    /// If you want to extend another <see cref="Codec"/>, instantiate it directly by calling
     /// its constructor.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class FilterCodec : Codec

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/LiveDocsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/LiveDocsFormat.cs b/src/Lucene.Net/Codecs/LiveDocsFormat.cs
index 3b5ba40..fb377a6 100644
--- a/src/Lucene.Net/Codecs/LiveDocsFormat.cs
+++ b/src/Lucene.Net/Codecs/LiveDocsFormat.cs
@@ -26,14 +26,15 @@ namespace Lucene.Net.Codecs
     using SegmentCommitInfo = Lucene.Net.Index.SegmentCommitInfo;
 
     /// <summary>
-    /// Format for live/deleted documents
+    /// Format for live/deleted documents.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class LiveDocsFormat
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal LiveDocsFormat()
         {
@@ -52,14 +53,14 @@ namespace Lucene.Net.Codecs
         public abstract IBits ReadLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context);
 
         /// <summary>
-        /// Persist live docs bits.  Use {@link
-        ///  SegmentCommitInfo#getNextDelGen} to determine the
-        ///  generation of the deletes file you should write to.
+        /// Persist live docs bits.  Use 
+        /// <see cref="SegmentCommitInfo.NextDelGen"/> to determine the
+        /// generation of the deletes file you should write to.
         /// </summary>
         public abstract void WriteLiveDocs(IMutableBits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context);
 
         /// <summary>
-        /// Records all files in use by this <seealso cref="SegmentCommitInfo"/> into the files argument. </summary>
+        /// Records all files in use by this <see cref="SegmentCommitInfo"/> into the files argument. </summary>
         public abstract void Files(SegmentCommitInfo info, ICollection<string> files);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
index 8b0c6ea..e591999 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
@@ -35,10 +35,10 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <ul>
     ///   <li>
     ///   <b>Packed Blocks and VInt Blocks</b>:
-    ///   <p>In packed blocks, integers are encoded with the same bit width (<seealso cref="PackedInts packed format"/>):
+    ///   <p>In packed blocks, integers are encoded with the same bit width packed format (<see cref="Util.Packed.PackedInt32s"/>):
     ///      the block size (i.e. number of integers inside block) is fixed (currently 128). Additionally blocks
     ///      that are all the same value are encoded in an optimized way.</p>
-    ///   <p>In VInt blocks, integers are encoded as <seealso cref="DataOutput#writeVInt VInt"/>:
+    ///   <p>In VInt blocks, integers are encoded as VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>):
     ///      the block size is variable.</p>
     ///   </li>
     ///

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/MappingMultiDocsAndPositionsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/MappingMultiDocsAndPositionsEnum.cs b/src/Lucene.Net/Codecs/MappingMultiDocsAndPositionsEnum.cs
index 15daee5..5c4d346 100644
--- a/src/Lucene.Net/Codecs/MappingMultiDocsAndPositionsEnum.cs
+++ b/src/Lucene.Net/Codecs/MappingMultiDocsAndPositionsEnum.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Codecs
     /// <summary>
     /// Exposes flex API, merged from flex API of sub-segments,
     /// remapping docIDs (this is used for segment merging).
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class MappingMultiDocsAndPositionsEnum : DocsAndPositionsEnum
@@ -58,8 +58,8 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Sets the <seealso cref="MergeState"/>, which is used to re-map
-        ///  document IDs.
+        /// Gets or Sets the <see cref="Index.MergeState"/>, which is used to re-map
+        /// document IDs.
         /// </summary>
         public MergeState MergeState
         {
@@ -75,7 +75,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// How many sub-readers we are merging. </summary>
-        ///  <seealso cref= #getSubs  </seealso>
+        /// <seealso cref="Subs"/>
         public int NumSubs
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs b/src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs
index b16b2ab..8074346 100644
--- a/src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs
+++ b/src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Codecs
     /// <summary>
     /// Exposes flex API, merged from flex API of sub-segments,
     /// remapping docIDs (this is used for segment merging).
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class MappingMultiDocsEnum : DocsEnum
@@ -58,8 +58,8 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Sets the <seealso cref="MergeState"/>, which is used to re-map
-        ///  document IDs.
+        /// Sets the <see cref="MergeState"/>, which is used to re-map
+        /// document IDs.
         /// </summary>
         public MergeState MergeState
         {
@@ -75,7 +75,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// How many sub-readers we are merging. </summary>
-        ///  <seealso cref= #getSubs  </seealso>
+        /// <seealso cref="Subs"/>
         public int NumSubs
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs b/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs
index 0399aaa..63d25f9 100644
--- a/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs
+++ b/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs
@@ -25,20 +25,20 @@ namespace Lucene.Net.Codecs
     using MathUtil = Lucene.Net.Util.MathUtil;
 
     /// <summary>
-    /// this abstract class reads skip lists with multiple levels.
-    ///
-    /// See <seealso cref="MultiLevelSkipListWriter"/> for the information about the encoding
+    /// This abstract class reads skip lists with multiple levels.
+    /// <para/>
+    /// See <see cref="MultiLevelSkipListWriter"/> for the information about the encoding
     /// of the multi level skip lists.
-    ///
-    /// Subclasses must implement the abstract method <seealso cref="#readSkipData(int, IndexInput)"/>
+    /// <para/>
+    /// Subclasses must implement the abstract method <see cref="ReadSkipData(int, IndexInput)"/>
     /// which defines the actual format of the skip data.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class MultiLevelSkipListReader : IDisposable
     {
         /// <summary>
-        /// the maximum number of skip levels possible for this index </summary>
+        /// The maximum number of skip levels possible for this index. </summary>
         protected internal int m_maxNumberOfSkipLevels;
 
         // number of levels in this skip list
@@ -57,7 +57,7 @@ namespace Lucene.Net.Codecs
         private bool haveSkipped;
 
         /// <summary>
-        /// skipStream for each level. </summary>
+        /// SkipStream for each level. </summary>
         private IndexInput[] skipStream;
 
         /// <summary>
@@ -65,7 +65,7 @@ namespace Lucene.Net.Codecs
         private long[] skipPointer;
 
         /// <summary>
-        ///  skipInterval of each level. </summary>
+        /// SkipInterval of each level. </summary>
         private int[] skipInterval;
 
         /// <summary>
@@ -86,7 +86,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// childPointer of last read skip entry with docId &lt;=
-        ///  target.
+        /// target.
         /// </summary>
         private long lastChildPointer;
 
@@ -94,7 +94,7 @@ namespace Lucene.Net.Codecs
         private readonly int skipMultiplier;
 
         /// <summary>
-        /// Creates a {@code MultiLevelSkipListReader}. </summary>
+        /// Creates a <see cref="MultiLevelSkipListReader"/>. </summary>
         protected MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval, int skipMultiplier)
         {
             this.skipStream = new IndexInput[maxSkipLevels];
@@ -116,9 +116,9 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Creates a {@code MultiLevelSkipListReader}, where
-        ///  {@code skipInterval} and {@code skipMultiplier} are
-        ///  the same.
+        /// Creates a <see cref="MultiLevelSkipListReader"/>, where
+        /// <see cref="skipInterval"/> and <see cref="skipMultiplier"/> are
+        /// the same.
         /// </summary>
         protected internal MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval)
             : this(skipStream, maxSkipLevels, skipInterval, skipInterval)
@@ -126,8 +126,8 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Returns the id of the doc to which the last call of <seealso cref="#skipTo(int)"/>
-        ///  has skipped.
+        /// Returns the id of the doc to which the last call of <see cref="SkipTo(int)"/>
+        /// has skipped.
         /// </summary>
         public virtual int Doc
         {
@@ -139,7 +139,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Skips entries to the first beyond the current whose document number is
-        ///  greater than or equal to <i>target</i>. Returns the current doc count.
+        /// greater than or equal to <paramref name="target"/>. Returns the current doc count.
         /// </summary>
         public virtual int SkipTo(int target)
         {
@@ -213,7 +213,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Seeks the skip entry on the given level </summary>
+        /// Seeks the skip entry on the given level. </summary>
         protected virtual void SeekChild(int level)
         {
             skipStream[level].Seek(lastChildPointer);
@@ -225,7 +225,10 @@ namespace Lucene.Net.Codecs
             }
         }
 
-        public void Dispose()
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
+        public void Dispose() // LUCENENET TODO: API - implement dispose pattern
         {
             for (int i = 1; i < skipStream.Length; i++)
             {
@@ -255,7 +258,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Loads the skip levels </summary>
+        /// Loads the skip levels. </summary>
         private void LoadSkipLevels()
         {
             if (docCount <= skipInterval[0])
@@ -310,12 +313,12 @@ namespace Lucene.Net.Codecs
         /// <summary>
         /// Subclasses must implement the actual skip data encoding in this method.
         /// </summary>
-        /// <param name="level"> the level skip data shall be read from </param>
-        /// <param name="skipStream"> the skip stream to read from </param>
+        /// <param name="level"> The level skip data shall be read from. </param>
+        /// <param name="skipStream"> The skip stream to read from. </param>
         protected abstract int ReadSkipData(int level, IndexInput skipStream);
 
         /// <summary>
-        /// Copies the values of the last read skip entry on this <paramref name="level"/> </summary>
+        /// Copies the values of the last read skip entry on this <paramref name="level"/>. </summary>
         protected virtual void SetLastSkipData(int level)
         {
             lastDoc = m_skipDoc[level];
@@ -323,7 +326,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// used to buffer the top skip levels </summary>
+        /// Used to buffer the top skip levels. </summary>
         private sealed class SkipBuffer : IndexInput
         {
             private byte[] data;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/MultiLevelSkipListWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/MultiLevelSkipListWriter.cs b/src/Lucene.Net/Codecs/MultiLevelSkipListWriter.cs
index d7fe18b..2c5e960 100644
--- a/src/Lucene.Net/Codecs/MultiLevelSkipListWriter.cs
+++ b/src/Lucene.Net/Codecs/MultiLevelSkipListWriter.cs
@@ -24,9 +24,9 @@ namespace Lucene.Net.Codecs
     using RAMOutputStream = Lucene.Net.Store.RAMOutputStream;
 
     /// <summary>
-    /// this abstract class writes skip lists with multiple levels.
+    /// This abstract class writes skip lists with multiple levels.
     ///
-    /// <pre>
+    /// <code>
     ///
     /// Example for skipInterval = 3:
     ///                                                     c            (skip level 2)
@@ -47,30 +47,30 @@ namespace Lucene.Net.Codecs
     ///
     /// While this class takes care of writing the different skip levels,
     /// subclasses must define the actual format of the skip data.
-    /// </pre>
+    /// </code>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class MultiLevelSkipListWriter
     {
         /// <summary>
-        /// number of levels in this skip list </summary>
+        /// Number of levels in this skip list. </summary>
         protected internal int m_numberOfSkipLevels;
 
         /// <summary>
-        /// the skip interval in the list with level = 0 </summary>
+        /// The skip interval in the list with level = 0. </summary>
         private int skipInterval;
 
         /// <summary>
-        /// skipInterval used for level &gt; 0 </summary>
+        /// SkipInterval used for level &gt; 0. </summary>
         private int skipMultiplier;
 
         /// <summary>
-        /// for every skip level a different buffer is used </summary>
+        /// For every skip level a different buffer is used. </summary>
         private RAMOutputStream[] skipBuffer;
 
         /// <summary>
-        /// Creates a {@code MultiLevelSkipListWriter}. </summary>
+        /// Creates a <see cref="MultiLevelSkipListWriter"/>. </summary>
         protected MultiLevelSkipListWriter(int skipInterval, int skipMultiplier, int maxSkipLevels, int df)
         {
             this.skipInterval = skipInterval;
@@ -94,9 +94,9 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Creates a {@code MultiLevelSkipListWriter}, where
-        ///  {@code skipInterval} and {@code skipMultiplier} are
-        ///  the same.
+        /// Creates a <see cref="MultiLevelSkipListWriter"/>{@code }, where
+        /// <see cref="skipInterval"/> and <see cref="skipMultiplier"/> are
+        /// the same.
         /// </summary>
         protected MultiLevelSkipListWriter(int skipInterval, int maxSkipLevels, int df)
             : this(skipInterval, skipInterval, maxSkipLevels, df)
@@ -115,7 +115,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Creates new buffers or empties the existing ones </summary>
+        /// Creates new buffers or empties the existing ones. </summary>
         public virtual void ResetSkip()
         {
             if (skipBuffer == null)
@@ -134,16 +134,16 @@ namespace Lucene.Net.Codecs
         /// <summary>
         /// Subclasses must implement the actual skip data encoding in this method.
         /// </summary>
-        /// <param name="level"> the level skip data shall be writing for </param>
-        /// <param name="skipBuffer"> the skip buffer to write to </param>
+        /// <param name="level"> The level skip data shall be writing for. </param>
+        /// <param name="skipBuffer"> The skip buffer to write to. </param>
         protected abstract void WriteSkipData(int level, IndexOutput skipBuffer);
 
         /// <summary>
         /// Writes the current skip data to the buffers. The current document frequency determines
         /// the max level is skip data is to be written to.
         /// </summary>
-        /// <param name="df"> the current document frequency </param>
-        /// <exception cref="IOException"> If an I/O error occurs </exception>
+        /// <param name="df"> The current document frequency. </param>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs. </exception>
         public virtual void BufferSkip(int df)
         {
             Debug.Assert(df % skipInterval == 0);
@@ -179,8 +179,8 @@ namespace Lucene.Net.Codecs
         /// <summary>
         /// Writes the buffered skip lists to the given output.
         /// </summary>
-        /// <param name="output"> the IndexOutput the skip lists shall be written to </param>
-        /// <returns> the pointer the skip list starts </returns>
+        /// <param name="output"> The <see cref="IndexOutput"/> the skip lists shall be written to. </param>
+        /// <returns> The pointer the skip list starts. </returns>
         public virtual long WriteSkip(IndexOutput output)
         {
             long skipPointer = output.GetFilePointer();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/NormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/NormsFormat.cs b/src/Lucene.Net/Codecs/NormsFormat.cs
index 0bb97a9..c295444 100644
--- a/src/Lucene.Net/Codecs/NormsFormat.cs
+++ b/src/Lucene.Net/Codecs/NormsFormat.cs
@@ -27,26 +27,26 @@ namespace Lucene.Net.Codecs
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal NormsFormat()
         {
         }
 
         /// <summary>
-        /// Returns a <seealso cref="DocValuesConsumer"/> to write norms to the
-        ///  index.
+        /// Returns a <see cref="DocValuesConsumer"/> to write norms to the
+        /// index.
         /// </summary>
         public abstract DocValuesConsumer NormsConsumer(SegmentWriteState state);
 
         /// <summary>
-        /// Returns a <seealso cref="DocValuesProducer"/> to read norms from the index.
-        /// <p>
+        /// Returns a <see cref="DocValuesProducer"/> to read norms from the index.
+        /// <para/>
         /// NOTE: by the time this call returns, it must hold open any files it will
         /// need to use; else, those files may be deleted. Additionally, required files
         /// may be deleted during the execution of this call before there is a chance
-        /// to open them. Under these circumstances an IOException should be thrown by
-        /// the implementation. IOExceptions are expected and will automatically cause
+        /// to open them. Under these circumstances an <see cref="System.IO.IOException"/> should be thrown by
+        /// the implementation. <see cref="System.IO.IOException"/> are expected and will automatically cause
         /// a retry of the segment opening logic with the newly revised segments.
         /// </summary>
         public abstract DocValuesProducer NormsProducer(SegmentReadState state);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/PostingsBaseFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsBaseFormat.cs b/src/Lucene.Net/Codecs/PostingsBaseFormat.cs
index f9b4876..a783fa5 100644
--- a/src/Lucene.Net/Codecs/PostingsBaseFormat.cs
+++ b/src/Lucene.Net/Codecs/PostingsBaseFormat.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Codecs
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
     /// <summary>
-    /// Provides a <seealso cref="PostingsReaderBase"/> and {@link
-    /// PostingsWriterBase}.
-    ///
+    /// Provides a <see cref="Codecs.PostingsReaderBase"/> and 
+    /// <see cref="Codecs.PostingsWriterBase"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 
@@ -37,7 +37,7 @@ namespace Lucene.Net.Codecs
     {
         /// <summary>
         /// Unique name that's used to retrieve this codec when
-        ///  reading the index
+        /// reading the index.
         /// </summary>
         public string Name { get; private set; }
 
@@ -49,14 +49,14 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Creates the <seealso cref="PostingsReaderBase"/> for this
-        ///  format.
+        /// Creates the <see cref="Codecs.PostingsReaderBase"/> for this
+        /// format.
         /// </summary>
         public abstract PostingsReaderBase PostingsReaderBase(SegmentReadState state);
 
         /// <summary>
-        /// Creates the <seealso cref="PostingsWriterBase"/> for this
-        ///  format.
+        /// Creates the <see cref="Codecs.PostingsWriterBase"/> for this
+        /// format.
         /// </summary>
         public abstract PostingsWriterBase PostingsWriterBase(SegmentWriteState state);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/PostingsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsConsumer.cs b/src/Lucene.Net/Codecs/PostingsConsumer.cs
index b8615a7..c4dedfc 100644
--- a/src/Lucene.Net/Codecs/PostingsConsumer.cs
+++ b/src/Lucene.Net/Codecs/PostingsConsumer.cs
@@ -29,29 +29,29 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Abstract API that consumes postings for an individual term.
-    /// <p>
+    /// <para/>
     /// The lifecycle is:
-    /// <ol>
-    ///    <li>PostingsConsumer is returned for each term by
-    ///        <seealso cref="TermsConsumer#startTerm(BytesRef)"/>.
-    ///    <li><seealso cref="#startDoc(int, int)"/> is called for each
+    /// <list type="number">
+    ///    <item><description>PostingsConsumer is returned for each term by
+    ///        <see cref="TermsConsumer.StartTerm(BytesRef)"/>.</description></item>
+    ///    <item><description><see cref="StartDoc(int, int)"/> is called for each
     ///        document where the term occurs, specifying id
-    ///        and term frequency for that document.
-    ///    <li>If positions are enabled for the field, then
-    ///        <seealso cref="#addPosition(int, BytesRef, int, int)"/>
+    ///        and term frequency for that document.</description></item>
+    ///    <item><description>If positions are enabled for the field, then
+    ///        <see cref="AddPosition(int, BytesRef, int, int)"/>
     ///        will be called for each occurrence in the
-    ///        document.
-    ///    <li><seealso cref="#finishDoc()"/> is called when the producer
-    ///        is done adding positions to the document.
-    /// </ol>
-    ///
+    ///        document.</description></item>
+    ///    <item><description><see cref="FinishDoc()"/> is called when the producer
+    ///        is done adding positions to the document.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class PostingsConsumer
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal PostingsConsumer()
         {
@@ -59,30 +59,30 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Adds a new doc in this term.
-        /// <code>freq</code> will be -1 when term frequencies are omitted
+        /// <paramref name="freq"/> will be -1 when term frequencies are omitted
         /// for the field.
         /// </summary>
         public abstract void StartDoc(int docId, int freq);
 
         /// <summary>
-        /// Add a new position & payload, and start/end offset.  A
-        ///  null payload means no payload; a non-null payload with
-        ///  zero length also means no payload.  Caller may reuse
-        ///  the <seealso cref="BytesRef"/> for the payload between calls
-        ///  (method must fully consume the payload). <code>startOffset</code>
-        ///  and <code>endOffset</code> will be -1 when offsets are not indexed.
+        /// Add a new position &amp; payload, and start/end offset.  A
+        /// <c>null</c> <paramref name="payload"/> means no payload; a non-<c>null</c> <paramref name="payload"/> with
+        /// zero length also means no payload.  Caller may reuse
+        /// the <see cref="BytesRef"/> for the <paramref name="payload"/> between calls
+        /// (method must fully consume the payload). <paramref name="startOffset"/>
+        /// and <paramref name="endOffset"/> will be -1 when offsets are not indexed.
         /// </summary>
         public abstract void AddPosition(int position, BytesRef payload, int startOffset, int endOffset);
 
         /// <summary>
-        /// Called when we are done adding positions & payloads
-        ///  for each doc.
+        /// Called when we are done adding positions &amp; payloads
+        /// for each doc.
         /// </summary>
         public abstract void FinishDoc();
 
         /// <summary>
         /// Default merge impl: append documents, mapping around
-        ///  deletes
+        /// deletes.
         /// </summary>
         public virtual TermStats Merge(MergeState mergeState, IndexOptions indexOptions, DocsEnum postings, FixedBitSet visitedDocs)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsFormat.cs b/src/Lucene.Net/Codecs/PostingsFormat.cs
index bc34e65..2ea5dba 100644
--- a/src/Lucene.Net/Codecs/PostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/PostingsFormat.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Codecs
     /// Note, when extending this class, the name (<see cref="Name"/>) may
     /// written into the index in certain configurations. In order for the segment
     /// to be read, the name must resolve to your implementation via <see cref="ForName(string)"/>.
-    /// this method uses <see cref="IPostingsFormatFactory.GetPostingsFormat(string)"/> to resolve format names.
+    /// This method uses <see cref="IPostingsFormatFactory.GetPostingsFormat(string)"/> to resolve format names.
     /// <para/>
     /// If you implement your own format:
     /// <list type="number">
@@ -105,14 +105,14 @@ namespace Lucene.Net.Codecs
         /// (such as when using <see cref="PerField.PerFieldPostingsFormat"/>): in such configurations,
         /// for the segment to be read this class should be registered by subclassing <see cref="DefaultPostingsFormatFactory"/> and
         /// calling <see cref="DefaultPostingsFormatFactory.ScanForPostingsFormats(System.Reflection.Assembly)"/> in the class constructor. 
-        /// The new <see cref="IPostingsFormatFactory"/> can be registered by calling <see cref="SetPostingsFormatFactory"/> at application startup.</summary>
+        /// The new <see cref="IPostingsFormatFactory"/> can be registered by calling <see cref="SetPostingsFormatFactory(IPostingsFormatFactory)"/> at application startup.</summary>
         protected PostingsFormat()
         {
             this.name = NamedServiceFactory<PostingsFormat>.GetServiceName(this.GetType());
         }
 
         /// <summary>
-        /// Returns this posting format's name </summary>
+        /// Returns this posting format's name. </summary>
         public string Name
         {
             get
@@ -122,7 +122,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Writes a new segment </summary>
+        /// Writes a new segment. </summary>
         public abstract FieldsConsumer FieldsConsumer(SegmentWriteState state);
 
         /// <summary>
@@ -131,8 +131,8 @@ namespace Lucene.Net.Codecs
         /// use; else, those files may be deleted.
         /// Additionally, required files may be deleted during the execution of
         /// this call before there is a chance to open them. Under these
-        /// circumstances an IOException should be thrown by the implementation.
-        /// IOExceptions are expected and will automatically cause a retry of the
+        /// circumstances an <see cref="System.IO.IOException"/> should be thrown by the implementation.
+        /// <see cref="System.IO.IOException"/>s are expected and will automatically cause a retry of the
         /// segment opening logic with the newly revised segments.
         /// </summary>
         public abstract FieldsProducer FieldsProducer(SegmentReadState state);
@@ -143,14 +143,14 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// looks up a format by name </summary>
+        /// Looks up a format by name. </summary>
         public static PostingsFormat ForName(string name)
         {
             return postingsFormatFactory.GetPostingsFormat(name);
         }
 
         /// <summary>
-        /// returns a list of all available format names </summary>
+        /// Returns a list of all available format names. </summary>
         public static ICollection<string> AvailablePostingsFormats()
         {
             if (postingsFormatFactory is IServiceListable)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/PostingsReaderBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsReaderBase.cs b/src/Lucene.Net/Codecs/PostingsReaderBase.cs
index 8a5688b..38cdb48 100644
--- a/src/Lucene.Net/Codecs/PostingsReaderBase.cs
+++ b/src/Lucene.Net/Codecs/PostingsReaderBase.cs
@@ -29,14 +29,15 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// The core terms dictionaries (BlockTermsReader,
-    ///  BlockTreeTermsReader) interact with a single instance
-    ///  of this class to manage creation of <seealso cref="DocsEnum"/> and
-    ///  <seealso cref="DocsAndPositionsEnum"/> instances.  It provides an
-    ///  IndexInput (termsIn) where this class may read any
-    ///  previously stored data that it had written in its
-    ///  corresponding <seealso cref="PostingsWriterBase"/> at indexing
-    ///  time.
-    ///  @lucene.experimental
+    /// <see cref="BlockTreeTermsReader"/>) interact with a single instance
+    /// of this class to manage creation of <see cref="DocsEnum"/> and
+    /// <see cref="DocsAndPositionsEnum"/> instances.  It provides an
+    /// <see cref="IndexInput"/> (termsIn) where this class may read any
+    /// previously stored data that it had written in its
+    /// corresponding <see cref="PostingsWriterBase"/> at indexing
+    /// time.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
 
     // TODO: find a better name; this defines the API that the
@@ -46,7 +47,7 @@ namespace Lucene.Net.Codecs
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal PostingsReaderBase()
         {
@@ -54,51 +55,58 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Performs any initialization, such as reading and
-        ///  verifying the header from the provided terms
-        ///  dictionary <seealso cref="IndexInput"/>.
+        /// verifying the header from the provided terms
+        /// dictionary <see cref="IndexInput"/>.
         /// </summary>
         public abstract void Init(IndexInput termsIn);
 
         /// <summary>
-        /// Return a newly created empty TermState </summary>
+        /// Return a newly created empty <see cref="TermState"/>. </summary>
         public abstract BlockTermState NewTermState();
 
         /// <summary>
-        /// Actually decode metadata for next term </summary>
-        ///  <seealso cref= PostingsWriterBase#encodeTerm  </seealso>
+        /// Actually decode metadata for next term. </summary>
+        /// <seealso cref="PostingsWriterBase.EncodeTerm(long[], Store.DataOutput, FieldInfo, BlockTermState, bool)"/>
         public abstract void DecodeTerm(long[] longs, DataInput @in, FieldInfo fieldInfo, BlockTermState state, bool absolute);
 
         /// <summary>
         /// Must fully consume state, since after this call that
-        ///  TermState may be reused.
+        /// <see cref="TermState"/> may be reused.
         /// </summary>
         public abstract DocsEnum Docs(FieldInfo fieldInfo, BlockTermState state, IBits skipDocs, DocsEnum reuse, DocsFlags flags);
 
         /// <summary>
         /// Must fully consume state, since after this call that
-        ///  TermState may be reused.
+        /// <see cref="TermState"/> may be reused.
         /// </summary>
         public abstract DocsAndPositionsEnum DocsAndPositions(FieldInfo fieldInfo, BlockTermState state, IBits skipDocs, DocsAndPositionsEnum reuse, DocsAndPositionsFlags flags);
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public abstract long RamBytesUsed();
 
         /// <summary>
         /// Checks consistency of this reader.
-        /// <p>
+        /// <para/>
         /// Note that this may be costly in terms of I/O, e.g.
         /// may involve computing a checksum value against large data files.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract void CheckIntegrity();
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/PostingsWriterBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/PostingsWriterBase.cs b/src/Lucene.Net/Codecs/PostingsWriterBase.cs
index dfedbda..55b1e33 100644
--- a/src/Lucene.Net/Codecs/PostingsWriterBase.cs
+++ b/src/Lucene.Net/Codecs/PostingsWriterBase.cs
@@ -24,16 +24,17 @@ namespace Lucene.Net.Codecs
     using IndexOutput = Lucene.Net.Store.IndexOutput;
 
     /// <summary>
-    /// Extension of <seealso cref="PostingsConsumer"/> to support pluggable term dictionaries.
-    /// <p>
-    /// this class contains additional hooks to interact with the provided
-    /// term dictionaries such as <seealso cref="BlockTreeTermsWriter"/>. If you want
+    /// Extension of <see cref="PostingsConsumer"/> to support pluggable term dictionaries.
+    /// <para/>
+    /// This class contains additional hooks to interact with the provided
+    /// term dictionaries such as <see cref="BlockTreeTermsWriter"/>. If you want
     /// to re-use an existing implementation and are only interested in
     /// customizing the format of the postings list, extend this class
     /// instead.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= PostingsReaderBase
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="PostingsReaderBase"/>
     // TODO: find a better name; this defines the API that the
     // terms dict impls use to talk to a postings impl.
     // TermsDict + PostingsReader/WriterBase == PostingsConsumer/Producer
@@ -41,7 +42,7 @@ namespace Lucene.Net.Codecs
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal PostingsWriterBase()
         {
@@ -49,58 +50,64 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Called once after startup, before any terms have been
-        ///  added.  Implementations typically write a header to
-        ///  the provided {@code termsOut}.
+        /// added.  Implementations typically write a header to
+        /// the provided <paramref name="termsOut"/>.
         /// </summary>
         public abstract void Init(IndexOutput termsOut);
 
         /// <summary>
-        /// Return a newly created empty TermState </summary>
+        /// Return a newly created empty <see cref="Index.TermState"/> </summary>
         public abstract BlockTermState NewTermState();
 
         /// <summary>
-        /// Start a new term.  Note that a matching call to {@link
-        ///  #finishTerm(BlockTermState)} is done, only if the term has at least one
-        ///  document.
+        /// Start a new term.  Note that a matching call to 
+        /// <see cref="FinishTerm(BlockTermState)"/> is done, only if the term has at least one
+        /// document.
         /// </summary>
         public abstract void StartTerm();
 
         /// <summary>
-        /// Finishes the current term.  The provided {@link
-        ///  BlockTermState} contains the term's summary statistics,
-        ///  and will holds metadata from PBF when returned
+        /// Finishes the current term.  The provided 
+        /// <see cref="BlockTermState"/> contains the term's summary statistics,
+        /// and will holds metadata from PBF when returned.
         /// </summary>
         public abstract void FinishTerm(BlockTermState state);
 
         /// <summary>
-        /// Encode metadata as long[] and byte[]. {@code absolute} controls whether
+        /// Encode metadata as <see cref="T:long[]"/> and <see cref="T:byte[]"/>. <paramref name="absolute"/> controls whether
         /// current term is delta encoded according to latest term.
-        /// Usually elements in {@code longs} are file pointers, so each one always
-        /// increases when a new term is consumed. {@code out} is used to write generic
+        /// Usually elements in <paramref name="longs"/> are file pointers, so each one always
+        /// increases when a new term is consumed. <paramref name="out"/> is used to write generic
         /// bytes, which are not monotonic.
-        ///
-        /// NOTE: sometimes long[] might contain "don't care" values that are unused, e.g.
+        /// <para/>
+        /// NOTE: sometimes <see cref="T:long[]"/> might contain "don't care" values that are unused, e.g.
         /// the pointer to postings list may not be defined for some terms but is defined
-        /// for others, if it is designed to inline  some postings data in term dictionary.
+        /// for others, if it is designed to inline some postings data in term dictionary.
         /// In this case, the postings writer should always use the last value, so that each
-        /// element in metadata long[] remains monotonic.
+        /// element in metadata <see cref="T:long[]"/> remains monotonic.
         /// </summary>
         public abstract void EncodeTerm(long[] longs, DataOutput @out, FieldInfo fieldInfo, BlockTermState state, bool absolute);
 
         /// <summary>
         /// Sets the current field for writing, and returns the
-        /// fixed length of long[] metadata (which is fixed per
+        /// fixed length of <see cref="T:long[]"/> metadata (which is fixed per
         /// field), called when the writing switches to another field.
         /// </summary>
         // TODO: better name?
         public abstract int SetField(FieldInfo fieldInfo);
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/SegmentInfoFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/SegmentInfoFormat.cs b/src/Lucene.Net/Codecs/SegmentInfoFormat.cs
index 54f20a6..7e1b785 100644
--- a/src/Lucene.Net/Codecs/SegmentInfoFormat.cs
+++ b/src/Lucene.Net/Codecs/SegmentInfoFormat.cs
@@ -21,30 +21,30 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Expert: Controls the format of the
-    /// <seealso cref="SegmentInfo"/> (segment metadata file).
-    /// <p>
+    /// <see cref="SegmentInfo"/> (segment metadata file).
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= SegmentInfo
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="SegmentInfo"/>
     public abstract class SegmentInfoFormat
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal SegmentInfoFormat()
         {
         }
 
         /// <summary>
-        /// Returns the <seealso cref="SegmentInfoReader"/> for reading
-        ///  <seealso cref="SegmentInfo"/> instances.
+        /// Returns the <see cref="Codecs.SegmentInfoReader"/> for reading
+        /// <see cref="SegmentInfo"/> instances.
         /// </summary>
         public abstract SegmentInfoReader SegmentInfoReader { get; }
 
         /// <summary>
-        /// Returns the <seealso cref="SegmentInfoWriter"/> for writing
-        ///  <seealso cref="SegmentInfo"/> instances.
+        /// Returns the <see cref="Codecs.SegmentInfoWriter"/> for writing
+        /// <see cref="SegmentInfo"/> instances.
         /// </summary>
         public abstract SegmentInfoWriter SegmentInfoWriter { get; }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/SegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/SegmentInfoReader.cs b/src/Lucene.Net/Codecs/SegmentInfoReader.cs
index afa8e4e..4e0a4ba 100644
--- a/src/Lucene.Net/Codecs/SegmentInfoReader.cs
+++ b/src/Lucene.Net/Codecs/SegmentInfoReader.cs
@@ -22,26 +22,27 @@ namespace Lucene.Net.Codecs
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Specifies an API for classes that can read <seealso cref="SegmentInfo"/> information.
+    /// Specifies an API for classes that can read <see cref="SegmentInfo"/> information.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class SegmentInfoReader
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal SegmentInfoReader()
         {
         }
 
         /// <summary>
-        /// Read <seealso cref="SegmentInfo"/> data from a directory. </summary>
-        /// <param name="directory"> directory to read from </param>
-        /// <param name="segmentName"> name of the segment to read </param>
-        /// <returns> infos instance to be populated with data </returns>
-        /// <exception cref="IOException"> If an I/O error occurs </exception>
+        /// Read <see cref="SegmentInfo"/> data from a directory. </summary>
+        /// <param name="directory"> Directory to read from. </param>
+        /// <param name="segmentName"> Name of the segment to read. </param>
+        /// <param name="context"> IO context. </param>
+        /// <returns> Infos instance to be populated with data. </returns>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs. </exception>
         public abstract SegmentInfo Read(Directory directory, string segmentName, IOContext context);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/SegmentInfoWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/SegmentInfoWriter.cs b/src/Lucene.Net/Codecs/SegmentInfoWriter.cs
index 2ba610a..8a73f8c 100644
--- a/src/Lucene.Net/Codecs/SegmentInfoWriter.cs
+++ b/src/Lucene.Net/Codecs/SegmentInfoWriter.cs
@@ -23,23 +23,23 @@ namespace Lucene.Net.Codecs
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Specifies an API for classes that can write out <seealso cref="SegmentInfo"/> data.
+    /// Specifies an API for classes that can write out <see cref="SegmentInfo"/> data.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public abstract class SegmentInfoWriter
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal SegmentInfoWriter()
         {
         }
 
         /// <summary>
-        /// Write <seealso cref="SegmentInfo"/> data. </summary>
-        /// <exception cref="IOException"> If an I/O error occurs </exception>
+        /// Write <see cref="SegmentInfo"/> data. </summary>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs. </exception>
         public abstract void Write(Directory dir, SegmentInfo info, FieldInfos fis, IOContext ioContext);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/StoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/StoredFieldsFormat.cs b/src/Lucene.Net/Codecs/StoredFieldsFormat.cs
index 8db0844..1e65f63 100644
--- a/src/Lucene.Net/Codecs/StoredFieldsFormat.cs
+++ b/src/Lucene.Net/Codecs/StoredFieldsFormat.cs
@@ -23,27 +23,27 @@ namespace Lucene.Net.Codecs
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Controls the format of stored fields
+    /// Controls the format of stored fields.
     /// </summary>
     public abstract class StoredFieldsFormat
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal StoredFieldsFormat()
         {
         }
 
         /// <summary>
-        /// Returns a <seealso cref="StoredFieldsReader"/> to load stored
-        ///  fields.
+        /// Returns a <see cref="StoredFieldsReader"/> to load stored
+        /// fields.
         /// </summary>
         public abstract StoredFieldsReader FieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context);
 
         /// <summary>
-        /// Returns a <seealso cref="StoredFieldsWriter"/> to write stored
-        ///  fields.
+        /// Returns a <see cref="StoredFieldsWriter"/> to write stored
+        /// fields.
         /// </summary>
         public abstract StoredFieldsWriter FieldsWriter(Directory directory, SegmentInfo si, IOContext context);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/StoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/StoredFieldsReader.cs b/src/Lucene.Net/Codecs/StoredFieldsReader.cs
index 5a093b2..969f541 100644
--- a/src/Lucene.Net/Codecs/StoredFieldsReader.cs
+++ b/src/Lucene.Net/Codecs/StoredFieldsReader.cs
@@ -23,46 +23,54 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Codec API for reading stored fields.
-    /// 
-    /// You need to implement visitDocument(int, StoredFieldVisitor) to
-    /// read the stored fields for a document, implement clone( (creating
-    /// clones of any IndexInputs used, etc), and close() 
-    /// 
+    /// <para/>
+    /// You need to implement <see cref="VisitDocument(int, StoredFieldVisitor)"/> to
+    /// read the stored fields for a document, implement <see cref="Clone()"/> (creating
+    /// clones of any <see cref="Store.IndexInput"/>s used, etc), and <see cref="Dispose(bool)"/>
+    /// to cleanup any allocated resources.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class StoredFieldsReader : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal StoredFieldsReader()
         {
         }
 
         /// <summary>
-        /// Visit the stored fields for document <code>n</code> </summary>
+        /// Visit the stored fields for document <paramref name="n"/>. </summary>
         public abstract void VisitDocument(int n, StoredFieldVisitor visitor);
 
         public abstract object Clone();
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public abstract long RamBytesUsed();
 
         /// <summary>
         /// Checks consistency of this reader.
-        /// 
+        /// <para/>
         /// Note that this may be costly in terms of I/O, e.g.
         /// may involve computing a checksum value against large data files.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract void CheckIntegrity();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/StoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/StoredFieldsWriter.cs b/src/Lucene.Net/Codecs/StoredFieldsWriter.cs
index 29ac364..15f7112 100644
--- a/src/Lucene.Net/Codecs/StoredFieldsWriter.cs
+++ b/src/Lucene.Net/Codecs/StoredFieldsWriter.cs
@@ -30,24 +30,24 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Codec API for writing stored fields:
-    /// <p>
-    /// <ol>
-    ///   <li>For every document, <seealso cref="#startDocument(int)"/> is called,
-    ///       informing the Codec how many fields will be written.
-    ///   <li><seealso cref="#writeField(FieldInfo, IndexableField)"/> is called for
-    ///       each field in the document.
-    ///   <li>After all documents have been written, <seealso cref="#finish(FieldInfos, int)"/>
-    ///       is called for verification/sanity-checks.
-    ///   <li>Finally the writer is closed (<seealso cref="#close()"/>)
-    /// </ol>
-    ///
+    /// <para/>
+    /// <list type="number">
+    ///   <item><description>For every document, <see cref="StartDocument(int)"/> is called,
+    ///       informing the Codec how many fields will be written.</description></item>
+    ///   <item><description><see cref="WriteField(FieldInfo, IIndexableField)"/> is called for
+    ///       each field in the document.</description></item>
+    ///   <item><description>After all documents have been written, <see cref="Finish(FieldInfos, int)"/>
+    ///       is called for verification/sanity-checks.</description></item>
+    ///   <item><description>Finally the writer is disposed (<see cref="Dispose(bool)"/>)</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class StoredFieldsWriter : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal StoredFieldsWriter()
         {
@@ -55,10 +55,10 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Called before writing the stored fields of the document.
-        ///  <seealso cref="#writeField(FieldInfo, IndexableField)"/> will be called
-        ///  <code>numStoredFields</code> times. Note that this is
-        ///  called even if the document has no stored fields, in
-        ///  this case <code>numStoredFields</code> will be zero.
+        /// <see cref="WriteField(FieldInfo, IIndexableField)"/> will be called
+        /// <paramref name="numStoredFields"/> times. Note that this is
+        /// called even if the document has no stored fields, in
+        /// this case <paramref name="numStoredFields"/> will be zero.
         /// </summary>
         public abstract void StartDocument(int numStoredFields);
 
@@ -74,28 +74,28 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Aborts writing entirely, implementation should remove
-        ///  any partially-written files, etc.
+        /// any partially-written files, etc.
         /// </summary>
         public abstract void Abort();
 
         /// <summary>
-        /// Called before <seealso cref="#close()"/>, passing in the number
-        ///  of documents that were written. Note that this is
-        ///  intentionally redundant (equivalent to the number of
-        ///  calls to <seealso cref="#startDocument(int)"/>, but a Codec should
-        ///  check that this is the case to detect the JRE bug described
-        ///  in LUCENE-1282.
+        /// Called before <see cref="Dispose()"/>, passing in the number
+        /// of documents that were written. Note that this is
+        /// intentionally redundant (equivalent to the number of
+        /// calls to <see cref="StartDocument(int)"/>, but a <see cref="Codec"/> should
+        /// check that this is the case to detect the bug described
+        /// in LUCENE-1282.
         /// </summary>
         public abstract void Finish(FieldInfos fis, int numDocs);
 
         /// <summary>
         /// Merges in the stored fields from the readers in
-        ///  <code>mergeState</code>. The default implementation skips
-        ///  over deleted documents, and uses <seealso cref="#startDocument(int)"/>,
-        ///  <seealso cref="#writeField(FieldInfo, IndexableField)"/>, and <seealso cref="#finish(FieldInfos, int)"/>,
-        ///  returning the number of documents that were written.
-        ///  Implementations can override this method for more sophisticated
-        ///  merging (bulk-byte copying, etc).
+        /// <paramref name="mergeState"/>. The default implementation skips
+        /// over deleted documents, and uses <see cref="StartDocument(int)"/>,
+        /// <see cref="WriteField(FieldInfo, IIndexableField)"/>, and <see cref="Finish(FieldInfos, int)"/>,
+        /// returning the number of documents that were written.
+        /// Implementations can override this method for more sophisticated
+        /// merging (bulk-byte copying, etc).
         /// </summary>
         public virtual int Merge(MergeState mergeState)
         {
@@ -128,7 +128,8 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// sugar method for startDocument() + writeField() for every stored field in the document </summary>
+        /// Sugar method for <see cref="StartDocument(int)"/> + <see cref="WriteField(FieldInfo, IIndexableField)"/> 
+        /// for every stored field in the document. </summary>
         protected void AddDocument<T1>(IEnumerable<T1> doc, FieldInfos fieldInfos) where T1 : Lucene.Net.Index.IIndexableField
         {
             int storedCount = 0;
@@ -153,12 +154,18 @@ namespace Lucene.Net.Codecs
             FinishDocument();
         }
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/TermStats.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/TermStats.cs b/src/Lucene.Net/Codecs/TermStats.cs
index 9481a6c..013a21f 100644
--- a/src/Lucene.Net/Codecs/TermStats.cs
+++ b/src/Lucene.Net/Codecs/TermStats.cs
@@ -22,19 +22,19 @@ namespace Lucene.Net.Codecs
     /// <summary>
     /// Holder for per-term statistics.
     /// </summary>
-    /// <seealso cref= TermsEnum#docFreq </seealso>
-    /// <seealso cref= TermsEnum#totalTermFreq </seealso>
+    /// <seealso cref="Index.TermsEnum.DocFreq"/>
+    /// <seealso cref="Index.TermsEnum.TotalTermFreq"/>
     public class TermStats
     {
         /// <summary>
         /// How many documents have at least one occurrence of
-        ///  this term.
+        /// this term.
         /// </summary>
         public int DocFreq { get; private set; }
 
         /// <summary>
         /// Total number of times this term occurs across all
-        ///  documents in the field.
+        /// documents in the field.
         /// </summary>
         public long TotalTermFreq { get; private set; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/TermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/TermVectorsFormat.cs b/src/Lucene.Net/Codecs/TermVectorsFormat.cs
index 84cef97..025c2ba 100644
--- a/src/Lucene.Net/Codecs/TermVectorsFormat.cs
+++ b/src/Lucene.Net/Codecs/TermVectorsFormat.cs
@@ -23,27 +23,27 @@ namespace Lucene.Net.Codecs
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Controls the format of term vectors
+    /// Controls the format of term vectors.
     /// </summary>
     public abstract class TermVectorsFormat
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal TermVectorsFormat()
         {
         }
 
         /// <summary>
-        /// Returns a <seealso cref="TermVectorsReader"/> to read term
-        ///  vectors.
+        /// Returns a <see cref="TermVectorsReader"/> to read term
+        /// vectors.
         /// </summary>
         public abstract TermVectorsReader VectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context);
 
         /// <summary>
-        /// Returns a <seealso cref="TermVectorsWriter"/> to write term
-        ///  vectors.
+        /// Returns a <see cref="TermVectorsWriter"/> to write term
+        /// vectors.
         /// </summary>
         public abstract TermVectorsWriter VectorsWriter(Directory directory, SegmentInfo segmentInfo, IOContext context);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/TermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/TermVectorsReader.cs b/src/Lucene.Net/Codecs/TermVectorsReader.cs
index c106b7b..1f5d0bd 100644
--- a/src/Lucene.Net/Codecs/TermVectorsReader.cs
+++ b/src/Lucene.Net/Codecs/TermVectorsReader.cs
@@ -25,52 +25,59 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Codec API for reading term vectors:
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class TermVectorsReader : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal TermVectorsReader()
         {
         }
 
         /// <summary>
-        /// Returns term vectors for this document, or null if
-        ///  term vectors were not indexed. If offsets are
-        ///  available they are in an <seealso cref="OffsetAttribute"/>
-        ///  available from the <seealso cref="DocsAndPositionsEnum"/>.
+        /// Returns term vectors for this document, or <c>null</c> if
+        /// term vectors were not indexed. If offsets are
+        /// available they are in an <see cref="OffsetAttribute"/>
+        /// available from the <see cref="DocsAndPositionsEnum"/>.
         /// </summary>
         public abstract Fields Get(int doc);
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public abstract long RamBytesUsed();
 
         /// <summary>
         /// Checks consistency of this reader.
-        /// <p>
+        /// <para/>
         /// Note that this may be costly in terms of I/O, e.g.
         /// may involve computing a checksum value against large data files.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract void CheckIntegrity();
 
         /// <summary>
         /// Create a clone that one caller at a time may use to
-        ///  read term vectors.
+        /// read term vectors.
         /// </summary>
         public abstract object Clone();
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/TermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/TermVectorsWriter.cs b/src/Lucene.Net/Codecs/TermVectorsWriter.cs
index dd195c3..9d91956 100644
--- a/src/Lucene.Net/Codecs/TermVectorsWriter.cs
+++ b/src/Lucene.Net/Codecs/TermVectorsWriter.cs
@@ -37,31 +37,31 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Codec API for writing term vectors:
-    /// <p>
-    /// <ol>
-    ///   <li>For every document, <seealso cref="#startDocument(int)"/> is called,
-    ///       informing the Codec how many fields will be written.
-    ///   <li><seealso cref="#startField(FieldInfo, int, boolean, boolean, boolean)"/> is called for
+    /// <para/>
+    /// <list type="number">
+    ///   <item><description>For every document, <see cref="StartDocument(int)"/> is called,
+    ///       informing the <see cref="Codec"/> how many fields will be written.</description></item>
+    ///   <item><description><see cref="StartField(FieldInfo, int, bool, bool, bool)"/> is called for
     ///       each field in the document, informing the codec how many terms
     ///       will be written for that field, and whether or not positions,
-    ///       offsets, or payloads are enabled.
-    ///   <li>Within each field, <seealso cref="#startTerm(BytesRef, int)"/> is called
-    ///       for each term.
-    ///   <li>If offsets and/or positions are enabled, then
-    ///       <seealso cref="#addPosition(int, int, int, BytesRef)"/> will be called for each term
-    ///       occurrence.
-    ///   <li>After all documents have been written, <seealso cref="#finish(FieldInfos, int)"/>
-    ///       is called for verification/sanity-checks.
-    ///   <li>Finally the writer is closed (<seealso cref="#close()"/>)
-    /// </ol>
-    ///
+    ///       offsets, or payloads are enabled.</description></item>
+    ///   <item><description>Within each field, <see cref="StartTerm(BytesRef, int)"/> is called
+    ///       for each term.</description></item>
+    ///   <item><description>If offsets and/or positions are enabled, then
+    ///       <see cref="AddPosition(int, int, int, BytesRef)"/> will be called for each term
+    ///       occurrence.</description></item>
+    ///   <item><description>After all documents have been written, <see cref="Finish(FieldInfos, int)"/>
+    ///       is called for verification/sanity-checks.</description></item>
+    ///   <item><description>Finally the writer is disposed (<see cref="Dispose(bool)"/>)</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class TermVectorsWriter : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal TermVectorsWriter()
         {
@@ -69,11 +69,11 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Called before writing the term vectors of the document.
-        ///  <seealso cref="#startField(FieldInfo, int, boolean, boolean, boolean)"/> will
-        ///  be called <code>numVectorFields</code> times. Note that if term
-        ///  vectors are enabled, this is called even if the document
-        ///  has no vector fields, in this case <code>numVectorFields</code>
-        ///  will be zero.
+        /// <see cref="StartField(FieldInfo, int, bool, bool, bool)"/> will
+        /// be called <paramref name="numVectorFields"/> times. Note that if term
+        /// vectors are enabled, this is called even if the document
+        /// has no vector fields, in this case <paramref name="numVectorFields"/>
+        /// will be zero.
         /// </summary>
         public abstract void StartDocument(int numVectorFields);
 
@@ -85,7 +85,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Called before writing the terms of the field.
-        ///  <seealso cref="#startTerm(BytesRef, int)"/> will be called <code>numTerms</code> times.
+        /// <see cref="StartTerm(BytesRef, int)"/> will be called <paramref name="numTerms"/> times.
         /// </summary>
         public abstract void StartField(FieldInfo info, int numTerms, bool positions, bool offsets, bool payloads);
 
@@ -96,10 +96,10 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Adds a term and its term frequency <code>freq</code>.
+        /// Adds a <paramref name="term"/> and its term frequency <paramref name="freq"/>.
         /// If this field has positions and/or offsets enabled, then
-        /// <seealso cref="#addPosition(int, int, int, BytesRef)"/> will be called
-        /// <code>freq</code> times respectively.
+        /// <see cref="AddPosition(int, int, int, BytesRef)"/> will be called
+        /// <paramref name="freq"/> times respectively.
         /// </summary>
         public abstract void StartTerm(BytesRef term, int freq);
 
@@ -110,36 +110,37 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Adds a term position and offsets </summary>
+        /// Adds a term <paramref name="position"/> and offsets. </summary>
         public abstract void AddPosition(int position, int startOffset, int endOffset, BytesRef payload);
 
         /// <summary>
         /// Aborts writing entirely, implementation should remove
-        ///  any partially-written files, etc.
+        /// any partially-written files, etc.
         /// </summary>
         public abstract void Abort();
 
         /// <summary>
-        /// Called before <seealso cref="#close()"/>, passing in the number
-        ///  of documents that were written. Note that this is
-        ///  intentionally redundant (equivalent to the number of
-        ///  calls to <seealso cref="#startDocument(int)"/>, but a Codec should
-        ///  check that this is the case to detect the JRE bug described
-        ///  in LUCENE-1282.
+        /// Called before <see cref="Dispose(bool)"/>, passing in the number
+        /// of documents that were written. Note that this is
+        /// intentionally redundant (equivalent to the number of
+        /// calls to <see cref="StartDocument(int)"/>, but a <see cref="Codec"/> should
+        /// check that this is the case to detect the bug described
+        /// in LUCENE-1282.
         /// </summary>
         public abstract void Finish(FieldInfos fis, int numDocs);
 
         /// <summary>
-        /// Called by IndexWriter when writing new segments.
-        /// <p>
-        /// this is an expert API that allows the codec to consume
+        /// Called by <see cref="Index.IndexWriter"/> when writing new segments.
+        /// <para/>
+        /// This is an expert API that allows the codec to consume
         /// positions and offsets directly from the indexer.
-        /// <p>
-        /// The default implementation calls <seealso cref="#addPosition(int, int, int, BytesRef)"/>,
+        /// <para/>
+        /// The default implementation calls <see cref="AddPosition(int, int, int, BytesRef)"/>,
         /// but subclasses can override this if they want to efficiently write
         /// all the positions, then all the offsets, for example.
-        /// <p>
+        /// <para/>
         /// NOTE: this API is extremely expert and subject to change or removal!!!
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         // TODO: we should probably nuke this and make a more efficient 4.x format
@@ -206,14 +207,14 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Merges in the term vectors from the readers in
-        ///  <code>mergeState</code>. The default implementation skips
-        ///  over deleted documents, and uses <seealso cref="#startDocument(int)"/>,
-        ///  <seealso cref="#startField(FieldInfo, int, boolean, boolean, boolean)"/>,
-        ///  <seealso cref="#startTerm(BytesRef, int)"/>, <seealso cref="#addPosition(int, int, int, BytesRef)"/>,
-        ///  and <seealso cref="#finish(FieldInfos, int)"/>,
-        ///  returning the number of documents that were written.
-        ///  Implementations can override this method for more sophisticated
-        ///  merging (bulk-byte copying, etc).
+        /// <paramref name="mergeState"/>. The default implementation skips
+        /// over deleted documents, and uses <see cref="StartDocument(int)"/>,
+        /// <see cref="StartField(FieldInfo, int, bool, bool, bool)"/>,
+        /// <see cref="StartTerm(BytesRef, int)"/>, <see cref="AddPosition(int, int, int, BytesRef)"/>,
+        /// and <see cref="Finish(FieldInfos, int)"/>,
+        /// returning the number of documents that were written.
+        /// Implementations can override this method for more sophisticated
+        /// merging (bulk-byte copying, etc).
         /// </summary>
         public virtual int Merge(MergeState mergeState)
         {
@@ -245,7 +246,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Safe (but, slowish) default method to write every
-        ///  vector field in the document.
+        /// vector field in the document.
         /// </summary>
         protected void AddAllDocVectors(Fields vectors, MergeState mergeState)
         {
@@ -350,17 +351,23 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Return the BytesRef Comparer used to sort terms
-        ///  before feeding to this API.
+        /// Return the <see cref="T:IComparer{BytesRef}"/> used to sort terms
+        /// before feeding to this API.
         /// </summary>
         public abstract IComparer<BytesRef> Comparer { get; }
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/TermsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/TermsConsumer.cs b/src/Lucene.Net/Codecs/TermsConsumer.cs
index ca896bf..2789ef1 100644
--- a/src/Lucene.Net/Codecs/TermsConsumer.cs
+++ b/src/Lucene.Net/Codecs/TermsConsumer.cs
@@ -34,29 +34,29 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Abstract API that consumes terms for an individual field.
-    /// <p>
+    /// <para/>
     /// The lifecycle is:
-    /// <ol>
-    ///   <li>TermsConsumer is returned for each field
-    ///       by <seealso cref="FieldsConsumer#addField(FieldInfo)"/>.
-    ///   <li>TermsConsumer returns a <seealso cref="PostingsConsumer"/> for
-    ///       each term in <seealso cref="#startTerm(BytesRef)"/>.
-    ///   <li>When the producer (e.g. IndexWriter)
+    /// <list type="number">
+    ///   <item><description>TermsConsumer is returned for each field
+    ///       by <see cref="FieldsConsumer.AddField(FieldInfo)"/>.</description></item>
+    ///   <item><description>TermsConsumer returns a <see cref="PostingsConsumer"/> for
+    ///       each term in <see cref="StartTerm(BytesRef)"/>.</description></item>
+    ///   <item><description>When the producer (e.g. IndexWriter)
     ///       is done adding documents for the term, it calls
-    ///       <seealso cref="#finishTerm(BytesRef, TermStats)"/>, passing in
-    ///       the accumulated term statistics.
-    ///   <li>Producer calls <seealso cref="#finish(long, long, int)"/> with
+    ///       <see cref="FinishTerm(BytesRef, TermStats)"/>, passing in
+    ///       the accumulated term statistics.</description></item>
+    ///   <item><description>Producer calls <see cref="Finish(long, long, int)"/> with
     ///       the accumulated collection statistics when it is finished
-    ///       adding terms to the field.
-    /// </ol>
-    ///
+    ///       adding terms to the field.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class TermsConsumer
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal TermsConsumer()
         {
@@ -64,28 +64,28 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Starts a new term in this field; this may be called
-        ///  with no corresponding call to finish if the term had
-        ///  no docs.
+        /// with no corresponding call to finish if the term had
+        /// no docs.
         /// </summary>
         public abstract PostingsConsumer StartTerm(BytesRef text);
 
         /// <summary>
-        /// Finishes the current term; numDocs must be > 0.
-        ///  <code>stats.totalTermFreq</code> will be -1 when term
-        ///  frequencies are omitted for the field.
+        /// Finishes the current term; numDocs must be &gt; 0.
+        /// <c>stats.TotalTermFreq</c> will be -1 when term
+        /// frequencies are omitted for the field.
         /// </summary>
         public abstract void FinishTerm(BytesRef text, TermStats stats);
 
         /// <summary>
         /// Called when we are done adding terms to this field.
-        ///  <code>sumTotalTermFreq</code> will be -1 when term
-        ///  frequencies are omitted for the field.
+        /// <paramref name="sumTotalTermFreq"/> will be -1 when term
+        /// frequencies are omitted for the field.
         /// </summary>
         public abstract void Finish(long sumTotalTermFreq, long sumDocFreq, int docCount);
 
         /// <summary>
-        /// Return the BytesRef Comparer used to sort terms
-        ///  before feeding to this API.
+        /// Gets the <see cref="T:IComparer{BytesRef}"/> used to sort terms
+        /// before feeding to this API.
         /// </summary>
         public abstract IComparer<BytesRef> Comparer { get; }
 
@@ -94,7 +94,7 @@ namespace Lucene.Net.Codecs
         private MappingMultiDocsAndPositionsEnum postingsEnum;
 
         /// <summary>
-        /// Default merge impl </summary>
+        /// Default merge impl. </summary>
         public virtual void Merge(MergeState mergeState, IndexOptions indexOptions, TermsEnum termsEnum)
         {
             BytesRef term;


[42/48] lucenenet git commit: Lucene.Net.Codecs.Lucene3x: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene3x: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/95b5d4bb
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/95b5d4bb
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/95b5d4bb

Branch: refs/heads/master
Commit: 95b5d4bba0f1ce86aa65076885f168716dea851c
Parents: 27cdd04
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 14:38:17 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:41 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  3 +-
 src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs |  5 +-
 .../Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs |  6 +--
 .../Codecs/Lucene3x/Lucene3xFieldInfosReader.cs |  3 +-
 .../Codecs/Lucene3x/Lucene3xFields.cs           |  5 +-
 .../Codecs/Lucene3x/Lucene3xNormsFormat.cs      |  6 +--
 .../Codecs/Lucene3x/Lucene3xNormsProducer.cs    | 13 +++---
 .../Codecs/Lucene3x/Lucene3xPostingsFormat.cs   | 17 ++++---
 .../Lucene3x/Lucene3xSegmentInfoFormat.cs       | 24 +++++-----
 .../Lucene3x/Lucene3xSegmentInfoReader.cs       |  9 ++--
 .../Codecs/Lucene3x/Lucene3xSkipListReader.cs   | 10 ++--
 .../Lucene3x/Lucene3xStoredFieldsFormat.cs      |  1 -
 .../Lucene3x/Lucene3xStoredFieldsReader.cs      | 23 +++++----
 .../Lucene3x/Lucene3xTermVectorsFormat.cs       |  6 +--
 .../Lucene3x/Lucene3xTermVectorsReader.cs       | 13 ++++--
 .../Codecs/Lucene3x/SegmentTermDocs.cs          |  7 +--
 .../Codecs/Lucene3x/SegmentTermEnum.cs          | 35 +++++++-------
 .../Codecs/Lucene3x/SegmentTermPositions.cs     |  6 +--
 src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs    |  4 +-
 src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs      |  9 ++--
 .../Codecs/Lucene3x/TermInfosReader.cs          | 25 +++++-----
 .../Codecs/Lucene3x/TermInfosReaderIndex.cs     | 49 ++++++++++----------
 22 files changed, 138 insertions(+), 141 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c8a36fb..5c39dc4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,8 +52,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs.Compressing (namespace)
-   2. Codecs.Lucene3x (namespace)
-   3. Util.Packed (namespace)
+   2. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs
index f3ae770..773958b 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xCodec.cs
@@ -28,7 +28,6 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     /// <summary>
     /// Supports the Lucene 3.x index format (readonly) </summary>
-    /// @deprecated Only for reading existing 3.x indexes
     [Obsolete("Only for reading existing 3.x indexes")]
     [CodecName("Lucene3x")] // LUCENENET specific - using CodecName attribute to ensure the default name passed from subclasses is the same as this class name
     public class Lucene3xCodec : Codec
@@ -51,7 +50,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         private readonly Lucene3xNormsFormat normsFormat = new Lucene3xNormsFormat();
 
         /// <summary>
-        /// Extension of compound file for doc store files </summary>
+        /// Extension of compound file for doc store files. </summary>
         internal const string COMPOUND_FILE_STORE_EXTENSION = "cfx";
 
         // TODO: this should really be a different impl
@@ -120,7 +119,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns file names for shared doc stores, if any, else
-        /// null.
+        /// <c>null</c>.
         /// </summary>
         public static ISet<string> GetDocStoreFiles(SegmentInfo info)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs
index 329369f..5ab5488 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosFormat.cs
@@ -20,10 +20,10 @@ namespace Lucene.Net.Codecs.Lucene3x
      */
 
     /// <summary>
-    /// Lucene3x ReadOnly FieldInfosFromat implementation </summary>
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
+    /// Lucene3x ReadOnly <see cref="FieldInfosFormat"/> implementation.
+    /// <para/>
     /// @lucene.experimental
+    /// </summary>
     [Obsolete("(4.0) this is only used to read indexes created before 4.0")]
     internal class Lucene3xFieldInfosFormat : FieldInfosFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosReader.cs
index 8377418..3e3d0ac 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFieldInfosReader.cs
@@ -35,12 +35,11 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     /// <summary>
     /// @lucene.experimental </summary>
-    /// @deprecated Only for reading existing 3.x indexes
     [Obsolete("Only for reading existing 3.x indexes")]
     internal class Lucene3xFieldInfosReader : FieldInfosReader
     {
         /// <summary>
-        /// Extension of field infos </summary>
+        /// Extension of field infos. </summary>
         internal const string FIELD_INFOS_EXTENSION = "fnm";
 
         // First used in 2.9; prior to 2.9 there was no format header

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFields.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFields.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFields.cs
index e34ddbe..81aae0d 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFields.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xFields.cs
@@ -43,8 +43,9 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     /// <summary>
     /// Exposes flex API on a pre-flex index, as a codec.
-    /// @lucene.experimental </summary>
-    /// @deprecated (4.0)
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("(4.0)")]
     internal class Lucene3xFields : FieldsProducer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsFormat.cs
index 21b2cbd..356cd8b 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsFormat.cs
@@ -23,10 +23,10 @@ namespace Lucene.Net.Codecs.Lucene3x
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
     /// <summary>
-    /// Lucene3x ReadOnly NormsFormat implementation </summary>
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
+    /// Lucene3x ReadOnly <see cref="NormsFormat"/> implementation.
+    /// <para/>
     /// @lucene.experimental
+    /// </summary>
     [Obsolete("(4.0) this is only used to read indexes created before 4.0.")]
     internal class Lucene3xNormsFormat : NormsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsProducer.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsProducer.cs
index 00faced..ed32a6f 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsProducer.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xNormsProducer.cs
@@ -41,22 +41,23 @@ namespace Lucene.Net.Codecs.Lucene3x
     using StringHelper = Lucene.Net.Util.StringHelper;
 
     /// <summary>
-    /// Reads Lucene 3.x norms format and exposes it via DocValues API
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading existing 3.x indexes
+    /// Reads Lucene 3.x norms format and exposes it via <see cref="Index.DocValues"/> API.
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("Only for reading existing 3.x indexes")]
     internal class Lucene3xNormsProducer : DocValuesProducer
     {
         /// <summary>
-        /// norms header placeholder </summary>
+        /// Norms header placeholder. </summary>
         internal static readonly sbyte[] NORMS_HEADER = { (sbyte)'N', (sbyte)'R', (sbyte)'M', -1 };
 
         /// <summary>
-        /// Extension of norms file </summary>
+        /// Extension of norms file. </summary>
         internal const string NORMS_EXTENSION = "nrm";
 
         /// <summary>
-        /// Extension of separate norms file </summary>
+        /// Extension of separate norms file. </summary>
         internal const string SEPARATE_NORMS_EXTENSION = "s";
 
         private readonly IDictionary<string, NormsDocValues> norms = new Dictionary<string, NormsDocValues>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xPostingsFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xPostingsFormat.cs
index c5e4c1b..0d368ff 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xPostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xPostingsFormat.cs
@@ -23,26 +23,25 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     /// <summary>
     /// Codec that reads the pre-flex-indexing postings
-    ///  format.  It does not provide a writer because newly
-    ///  written segments should use the Codec configured on IndexWriter.
-    /// </summary>
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
+    /// format.  It does not provide a writer because newly
+    /// written segments should use the <see cref="Codec"/> configured on <see cref="Index.IndexWriter"/>.
+    /// <para/>
     /// @lucene.experimental
-    [Obsolete("(4.0) this is only used to read indexes created")]
+    /// </summary>
+    [Obsolete("(4.0) this is only used to read indexes created before 4.0.")]
     [PostingsFormatName("Lucene3x")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     internal class Lucene3xPostingsFormat : PostingsFormat
     {
         /// <summary>
-        /// Extension of terms file </summary>
+        /// Extension of terms file. </summary>
         public const string TERMS_EXTENSION = "tis";
 
         /// <summary>
-        /// Extension of terms index file </summary>
+        /// Extension of terms index file. </summary>
         public const string TERMS_INDEX_EXTENSION = "tii";
 
         /// <summary>
-        /// Extension of freq postings file </summary>
+        /// Extension of freq postings file. </summary>
         public const string FREQ_EXTENSION = "frq";
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs
index 86c55de..da18377 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs
@@ -23,23 +23,23 @@ namespace Lucene.Net.Codecs.Lucene3x
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene3x ReadOnly SegmentInfoFormat implementation </summary>
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
+    /// Lucene3x ReadOnly <see cref="SegmentInfoFormat"/> implementation.
+    /// <para/>
     /// @lucene.experimental
-    [Obsolete("(4.0) this is only used to read indexes created")]
+    /// </summary>
+    [Obsolete("(4.0) this is only used to read indexes created before 4.0.")]
     public class Lucene3xSegmentInfoFormat : SegmentInfoFormat
     {
         private readonly SegmentInfoReader reader = new Lucene3xSegmentInfoReader();
 
         /// <summary>
-        /// this format adds optional per-segment String
-        ///  diagnostics storage, and switches userData to Map
+        /// This format adds optional per-segment String
+        /// diagnostics storage, and switches userData to Map.
         /// </summary>
         public static readonly int FORMAT_DIAGNOSTICS = -9;
 
         /// <summary>
-        /// Each segment records whether it has term vectors </summary>
+        /// Each segment records whether it has term vectors. </summary>
         public static readonly int FORMAT_HAS_VECTORS = -10;
 
         /// <summary>
@@ -48,7 +48,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Extension used for saving each SegmentInfo, once a 3.x
-        ///  index is first committed to with 4.0.
+        /// index is first committed to with 4.0.
         /// </summary>
         public static readonly string UPGRADED_SI_EXTENSION = "si";
 
@@ -80,22 +80,22 @@ namespace Lucene.Net.Codecs.Lucene3x
         public static readonly string NORMGEN_KEY = typeof(Lucene3xSegmentInfoFormat).Name + ".normgen";
         public static readonly string NORMGEN_PREFIX = typeof(Lucene3xSegmentInfoFormat).Name + ".normfield";
 
-        /// <returns> if this segment shares stored fields & vectors, this
-        ///         offset is where in that file this segment's docs begin  </returns>
+        /// <returns> If this segment shares stored fields &amp; vectors, this
+        ///         offset is where in that file this segment's docs begin.  </returns>
         public static int GetDocStoreOffset(SegmentInfo si)
         {
             string v = si.GetAttribute(DS_OFFSET_KEY);
             return v == null ? -1 : Convert.ToInt32(v, CultureInfo.InvariantCulture);
         }
 
-        /// <returns> name used to derive fields/vectors file we share with other segments </returns>
+        /// <returns> Name used to derive fields/vectors file we share with other segments. </returns>
         public static string GetDocStoreSegment(SegmentInfo si)
         {
             string v = si.GetAttribute(DS_NAME_KEY);
             return v == null ? si.Name : v;
         }
 
-        /// <returns> whether doc store files are stored in compound file (*.cfx) </returns>
+        /// <returns> Whether doc store files are stored in compound file (*.cfx). </returns>
         public static bool GetDocStoreIsCompoundFile(SegmentInfo si)
         {
             string v = si.GetAttribute(DS_COMPOUND_KEY);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
index da6a314..2e4b015 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoReader.cs
@@ -36,9 +36,10 @@ namespace Lucene.Net.Codecs.Lucene3x
     using SegmentInfos = Lucene.Net.Index.SegmentInfos;
 
     /// <summary>
-    /// Lucene 3x implementation of <seealso cref="SegmentInfoReader"/>.
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading existing 3.x indexes
+    /// Lucene 3x implementation of <see cref="SegmentInfoReader"/>.
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("Only for reading existing 3.x indexes")]
     public class Lucene3xSegmentInfoReader : SegmentInfoReader
     {
@@ -138,7 +139,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// reads from legacy 3.x segments_N </summary>
+        /// Reads from legacy 3.x segments_N. </summary>
         private SegmentCommitInfo ReadLegacySegmentInfo(Directory dir, int format, IndexInput input)
         {
             // check that it is a format we can understand

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSkipListReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSkipListReader.cs
index baa16b3..05f64e8 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSkipListReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSkipListReader.cs
@@ -22,9 +22,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     using IndexInput = Lucene.Net.Store.IndexInput;
 
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
-    [Obsolete("(4.0) this is only used to read indexes created")]
+    [Obsolete("(4.0) this is only used to read indexes created before 4.0.")]
     internal sealed class Lucene3xSkipListReader : MultiLevelSkipListReader
     {
         private bool currentFieldStoresPayloads;
@@ -58,7 +56,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns the freq pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)"/> has skipped.
         /// </summary>
         public long FreqPointer
         {
@@ -70,7 +68,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns the prox pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)"/> has skipped.
         /// </summary>
         public long ProxPointer
         {
@@ -82,7 +80,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns the payload length of the payload stored just before
-        /// the doc to which the last call of <seealso cref="MultiLevelSkipListReader#skipTo(int)"/>
+        /// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)"/>
         /// has skipped.
         /// </summary>
         public int PayloadLength

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsFormat.cs
index 051dc79..af10590 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsFormat.cs
@@ -24,7 +24,6 @@ namespace Lucene.Net.Codecs.Lucene3x
     using IOContext = Lucene.Net.Store.IOContext;
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
-    /// @deprecated Only for reading existing 3.x indexes
     [Obsolete("Only for reading existing 3.x indexes")]
     internal class Lucene3xStoredFieldsFormat : StoredFieldsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
index d30d7a2..8e019f2 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
@@ -38,21 +38,20 @@ namespace Lucene.Net.Codecs.Lucene3x
 
     /// <summary>
     /// Class responsible for access to stored document fields.
-    /// <p/>
+    /// <para/>
     /// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
     /// </summary>
-    /// @deprecated Only for reading existing 3.x indexes
     [Obsolete("Only for reading existing 3.x indexes")]
     internal sealed class Lucene3xStoredFieldsReader : StoredFieldsReader, IDisposable
     {
         private const int FORMAT_SIZE = 4;
 
         /// <summary>
-        /// Extension of stored fields file </summary>
+        /// Extension of stored fields file. </summary>
         public const string FIELDS_EXTENSION = "fdt";
 
         /// <summary>
-        /// Extension of stored fields index file </summary>
+        /// Extension of stored fields index file. </summary>
         public const string FIELDS_INDEX_EXTENSION = "fdx";
 
         // Lucene 3.0: Removal of compressed fields
@@ -100,10 +99,10 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns a cloned FieldsReader that shares open
-        ///  IndexInputs with the original one.  It is the caller's
-        ///  job not to close the original FieldsReader until all
-        ///  clones are called (eg, currently SegmentReader manages
-        ///  this logic).
+        /// IndexInputs with the original one.  It is the caller's
+        /// job not to close the original FieldsReader until all
+        /// clones are called (eg, currently SegmentReader manages
+        /// this logic).
         /// </summary>
         public override object Clone()
         {
@@ -228,7 +227,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             }
         }
 
-        /// <exception cref="ObjectDisposedException"> if this FieldsReader is closed </exception>
+        /// <exception cref="ObjectDisposedException"> If this FieldsReader is disposed. </exception>
         private void EnsureOpen()
         {
             if (closed)
@@ -238,10 +237,10 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Closes the underlying <seealso cref="Lucene.Net.Store.IndexInput"/> streams.
-        /// this means that the Fields values will not be accessible.
+        /// Closes the underlying <see cref="Lucene.Net.Store.IndexInput"/> streams.
+        /// This means that the Fields values will not be accessible.
         /// </summary>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         protected override void Dispose(bool disposing)
         {
             if (disposing)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsFormat.cs
index c222b97..8e1bdd7 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsFormat.cs
@@ -27,10 +27,10 @@ namespace Lucene.Net.Codecs.Lucene3x
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene3x ReadOnly TermVectorsFormat implementation </summary>
-    /// @deprecated (4.0) this is only used to read indexes created
-    /// before 4.0.
+    /// Lucene3x ReadOnly <see cref="TermVectorsFormat"/> implementation 
+    /// <para/>
     /// @lucene.experimental
+    /// </summary>
     [Obsolete("(4.0) this is only used to read indexes created before 4.0.")]
     internal class Lucene3xTermVectorsFormat : TermVectorsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsReader.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsReader.cs
index dbce339..7fbdf32 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xTermVectorsReader.cs
@@ -42,7 +42,6 @@ namespace Lucene.Net.Codecs.Lucene3x
     using Terms = Lucene.Net.Index.Terms;
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
-    /// @deprecated Only for reading existing 3.x indexes
     [Obsolete("Only for reading existing 3.x indexes")]
     internal class Lucene3xTermVectorsReader : TermVectorsReader
     {
@@ -67,15 +66,15 @@ namespace Lucene.Net.Codecs.Lucene3x
         public const sbyte STORE_OFFSET_WITH_TERMVECTOR = 0x2;
 
         /// <summary>
-        /// Extension of vectors fields file </summary>
+        /// Extension of vectors fields file. </summary>
         public const string VECTORS_FIELDS_EXTENSION = "tvf";
 
         /// <summary>
-        /// Extension of vectors documents file </summary>
+        /// Extension of vectors documents file. </summary>
         public const string VECTORS_DOCUMENTS_EXTENSION = "tvd";
 
         /// <summary>
-        /// Extension of vectors index file </summary>
+        /// Extension of vectors index file. </summary>
         public const string VECTORS_INDEX_EXTENSION = "tvx";
 
         private readonly FieldInfos fieldInfos;
@@ -211,7 +210,11 @@ namespace Lucene.Net.Codecs.Lucene3x
             }
         }
 
-        /// <summary>The number of documents in the reader. NOTE: This was size() in Lucene.</summary>
+        /// <summary>
+        /// The number of documents in the reader. 
+        /// <para/>
+        /// NOTE: This was size() in Lucene.
+        /// </summary>
         internal virtual int Count
         {
             get { return size; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/SegmentTermDocs.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermDocs.cs b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermDocs.cs
index 9c0b3eb..ad4ae16 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermDocs.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermDocs.cs
@@ -27,8 +27,9 @@ namespace Lucene.Net.Codecs.Lucene3x
     using IndexOptions = Lucene.Net.Index.IndexOptions;
     using Term = Lucene.Net.Index.Term;
 
-    /// @deprecated (4.0)
-    ///  @lucene.experimental
+    /// <summary>
+    /// @lucene.experimental
+    /// </summary>
     [Obsolete("(4.0)")]
     internal class SegmentTermDocs
     {
@@ -256,7 +257,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Overridden by SegmentTermPositions to skip in prox stream. </summary>
+        /// Overridden by <see cref="SegmentTermPositions"/> to skip in prox stream. </summary>
         protected internal virtual void SkipProx(long proxPointer, int payloadLength)
         {
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
index 68d5a73..7782028 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermEnum.cs
@@ -26,10 +26,9 @@ namespace Lucene.Net.Codecs.Lucene3x
     using IndexInput = Lucene.Net.Store.IndexInput;
     using Term = Lucene.Net.Index.Term;
 
-    /// @deprecated (4.0) No longer used with flex indexing, except for
-    /// reading old segments
+    /// <summary>
     /// @lucene.experimental
-
+    /// </summary>
     [Obsolete("(4.0) No longer used with flex indexing, except for reading old segments")]
     internal sealed class SegmentTermEnum : IDisposable
     {
@@ -204,7 +203,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         /// <summary>
         /// Returns the current Term in the enumeration.
-        /// Initially invalid, valid after next() called for the first time.
+        /// Initially invalid, valid after <see cref="Next()"/> called for the first time.
         /// </summary>
         public Term Term()
         {
@@ -212,15 +211,15 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns the previous Term enumerated. Initially null. </summary>
+        /// Returns the previous Term enumerated. Initially <c>null</c>. </summary>
         internal Term Prev()
         {
             return prevBuffer.ToTerm();
         }
 
         /// <summary>
-        /// Returns the current TermInfo in the enumeration.
-        /// Initially invalid, valid after next() called for the first time.
+        /// Returns the current <see cref="Lucene3x.TermInfo"/> in the enumeration.
+        /// Initially invalid, valid after <see cref="Next()"/> called for the first time.
         /// </summary>
         internal TermInfo TermInfo()
         {
@@ -228,8 +227,8 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Sets the argument to the current TermInfo in the enumeration.
-        /// Initially invalid, valid after next() called for the first time.
+        /// Sets the argument to the current <see cref="Lucene3x.TermInfo"/> in the enumeration.
+        /// Initially invalid, valid after <see cref="Next()"/> called for the first time.
         /// </summary>
         internal void TermInfo(TermInfo ti)
         {
@@ -237,25 +236,27 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns the docFreq from the current TermInfo in the enumeration.
-        /// Initially invalid, valid after next() called for the first time.
+        /// Returns the docFreq from the current <see cref="Lucene3x.TermInfo"/> in the enumeration.
+        /// Initially invalid, valid after <see cref="Next()"/> called for the first time.
         /// </summary>
         public int DocFreq
         {
             get { return termInfo.DocFreq; }
         }
 
-        /* Returns the freqPointer from the current TermInfo in the enumeration.
-          Initially invalid, valid after next() called for the first time.*/
-
+        /// <summary>
+        /// Returns the freqPointer from the current <see cref="Lucene3x.TermInfo"/> in the enumeration.
+        /// Initially invalid, valid after<see cref="Next()"/> called for the first time.
+        /// </summary>
         internal long FreqPointer
         {
             get { return termInfo.FreqPointer; }
         }
 
-        /* Returns the proxPointer from the current TermInfo in the enumeration.
-          Initially invalid, valid after next() called for the first time.*/
-
+        /// <summary>
+        /// Returns the proxPointer from the current <see cref="Lucene3x.TermInfo"/> in the enumeration.
+        /// Initially invalid, valid after<see cref="Next()"/> called for the first time.
+        /// </summary>
         internal long ProxPointer
         {
             get { return termInfo.ProxPointer; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/SegmentTermPositions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermPositions.cs b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermPositions.cs
index 8a8c762..beaf93d 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/SegmentTermPositions.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/SegmentTermPositions.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Codecs.Lucene3x
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// @lucene.experimental </summary>
-    /// @deprecated (4.0)
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("(4.0)")]
     internal sealed class SegmentTermPositions : SegmentTermDocs
     {
@@ -155,7 +155,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Called by super.skipTo(). </summary>
+        /// Called by <c>base.SkipTo()</c>. </summary>
         protected internal override void SkipProx(long proxPointer, int payloadLength)
         {
             // we save the pointer, we might have to skip there lazily

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs b/src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs
index d414e67..1c457dc 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/TermBuffer.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Codecs.Lucene3x
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// @lucene.experimental </summary>
-    /// @deprecated (4.0)
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("(4.0)")]
     internal sealed class TermBuffer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs b/src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs
index d4caa08..09f3411 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/TermInfo.cs
@@ -20,12 +20,9 @@ namespace Lucene.Net.Codecs.Lucene3x
      */
 
     /// <summary>
-    /// A TermInfo is the record of information stored for a
-    /// term </summary>
-    /// @deprecated (4.0) this class is no longer used in flexible
-    /// indexing.
-
-    [Obsolete]
+    /// A <see cref="TermInfo"/> is the record of information stored for a
+    /// term. </summary>
+    [Obsolete("(4.0) this class is no longer used in flexible indexing.")]
     internal class TermInfo
     {
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
index 5af2731..90360e8 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReader.cs
@@ -32,13 +32,13 @@ namespace Lucene.Net.Codecs.Lucene3x
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// this stores a monotonically increasing set of <Term, TermInfo> pairs in a
-    /// Directory.  Pairs are accessed either by Term or by ordinal position the
-    /// set </summary>
-    /// @deprecated (4.0) this class has been replaced by
-    /// FormatPostingsTermsDictReader, except for reading old segments.
+    /// This stores a monotonically increasing set of <c>Term, TermInfo</c> pairs in a
+    /// Directory.  Pairs are accessed either by <see cref="Term"/> or by ordinal position the
+    /// set.
+    /// <para/>
     /// @lucene.experimental
-    [Obsolete("(4.0) this class has been replaced by")]
+    /// </summary>
+    [Obsolete("(4.0) this class has been replaced by FormatPostingsTermsDictReader, except for reading old segments.")]
     internal sealed class TermInfosReader : IDisposable
     {
         private readonly Directory directory;
@@ -98,7 +98,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         private readonly DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd> termsCache = new DoubleBarrelLRUCache<CloneableTerm, TermInfoAndOrd>(DEFAULT_CACHE_SIZE);
 
         /// <summary>
-        /// Per-thread resources managed by ThreadLocal
+        /// Per-thread resources managed by ThreadLocal.
         /// </summary>
         private sealed class ThreadResources
         {
@@ -186,7 +186,8 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns the number of term/value pairs in the set. 
+        /// Returns the number of term/value pairs in the set.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         internal long Count
@@ -221,14 +222,14 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns the TermInfo for a Term in the set, or null. </summary>
+        /// Returns the <see cref="TermInfo"/> for a <see cref="Term"/> in the set, or <c>null</c>. </summary>
         internal TermInfo Get(Term term)
         {
             return Get(term, false);
         }
 
         /// <summary>
-        /// Returns the TermInfo for a Term in the set, or null. </summary>
+        /// Returns the <see cref="TermInfo"/> for a <see cref="Term"/> in the set, or <c>null</c>. </summary>
         private TermInfo Get(Term term, bool mustSeekEnum)
         {
             if (size == 0)
@@ -391,7 +392,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns the position of a Term in the set or -1. </summary>
+        /// Returns the position of a <see cref="Term"/> in the set or -1. </summary>
         internal long GetPosition(Term term)
         {
             if (size == 0)
@@ -420,7 +421,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         }
 
         /// <summary>
-        /// Returns an enumeration of all the Terms and TermInfos in the set. </summary>
+        /// Returns an enumeration of all the <see cref="Term"/>s and <see cref="TermInfo"/>s in the set. </summary>
         public SegmentTermEnum Terms()
         {
             return (SegmentTermEnum)origEnum.Clone();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/95b5d4bb/src/Lucene.Net/Codecs/Lucene3x/TermInfosReaderIndex.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReaderIndex.cs b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReaderIndex.cs
index 14f3744..eb8c2c5 100644
--- a/src/Lucene.Net/Codecs/Lucene3x/TermInfosReaderIndex.cs
+++ b/src/Lucene.Net/Codecs/Lucene3x/TermInfosReaderIndex.cs
@@ -32,11 +32,10 @@ namespace Lucene.Net.Codecs.Lucene3x
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// this stores a monotonically increasing set of <Term, TermInfo> pairs in an
-    /// index segment. Pairs are accessed either by Term or by ordinal position the
-    /// set. The Terms and TermInfo are actually serialized and stored into a byte
-    /// array and pointers to the position of each are stored in a int array. </summary>
-    /// @deprecated Only for reading existing 3.x indexes
+    /// This stores a monotonically increasing set of <c>Term, TermInfo</c> pairs in an
+    /// index segment. Pairs are accessed either by <see cref="Term"/> or by ordinal position the
+    /// set. The <see cref="Index.Terms"/> and <see cref="TermInfo"/> are actually serialized and stored into a byte
+    /// array and pointers to the position of each are stored in a <see cref="int"/> array. </summary>
     [Obsolete("Only for reading existing 3.x indexes")]
     internal class TermInfosReaderIndex
     {
@@ -54,14 +53,14 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// Loads the segment information at segment load time.
         /// </summary>
         /// <param name="indexEnum">
-        ///          the term enum. </param>
+        ///          The term enum. </param>
         /// <param name="indexDivisor">
-        ///          the index divisor. </param>
+        ///          The index divisor. </param>
         /// <param name="tiiFileLength">
-        ///          the size of the tii file, used to approximate the size of the
+        ///          The size of the tii file, used to approximate the size of the
         ///          buffer. </param>
         /// <param name="totalIndexInterval">
-        ///          the total index interval. </param>
+        ///          The total index interval. </param>
         public TermInfosReaderIndex(SegmentTermEnum indexEnum, int indexDivisor, long tiiFileLength, int totalIndexInterval)
         {
             this.totalIndexInterval = totalIndexInterval;
@@ -161,8 +160,8 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// Binary search for the given term.
         /// </summary>
         /// <param name="term">
-        ///          the term to locate. </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        ///          The term to locate. </param>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal virtual int GetIndexOffset(Term term)
         {
             int lo = 0;
@@ -193,9 +192,9 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// Gets the term at the given position.  For testing.
         /// </summary>
         /// <param name="termIndex">
-        ///          the position to read the term from the index. </param>
-        /// <returns> the term. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        ///          The position to read the term from the index. </param>
+        /// <returns> The term. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal virtual Term GetTerm(int termIndex)
         {
             PagedBytesDataInput input = (PagedBytesDataInput)dataInput.Clone();
@@ -221,11 +220,11 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// term index. ie It returns negative N when term is less than index term;
         /// </summary>
         /// <param name="term">
-        ///          the given term. </param>
+        ///          The given term. </param>
         /// <param name="termIndex">
-        ///          the index of the of term to compare. </param>
+        ///          The index of the of term to compare. </param>
         /// <returns> int. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal virtual int CompareTo(Term term, int termIndex)
         {
             return CompareTo(term, termIndex, (PagedBytesDataInput)dataInput.Clone(), new BytesRef());
@@ -236,13 +235,13 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// compare. If equal compare terms.
         /// </summary>
         /// <param name="term">
-        ///          the term to compare. </param>
+        ///          The term to compare. </param>
         /// <param name="termIndex">
-        ///          the position of the term in the input to compare </param>
+        ///          The position of the term in the input to compare </param>
         /// <param name="input">
-        ///          the input buffer. </param>
+        ///          The input buffer. </param>
         /// <returns> int. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         private int CompareTo(Term term, int termIndex, PagedBytesDataInput input, BytesRef reuse)
         {
             // if term field does not equal mid's field index, then compare fields
@@ -262,13 +261,13 @@ namespace Lucene.Net.Codecs.Lucene3x
         /// Compares the fields before checking the text of the terms.
         /// </summary>
         /// <param name="term">
-        ///          the given term. </param>
+        ///          The given term. </param>
         /// <param name="termIndex">
-        ///          the term that exists in the data block. </param>
+        ///          The term that exists in the data block. </param>
         /// <param name="input">
-        ///          the data block. </param>
+        ///          The data block. </param>
         /// <returns> int. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         private int CompareField(Term term, int termIndex, PagedBytesDataInput input)
         {
             input.SetPosition(indexToDataOffset.Get(termIndex));


[04/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermRangeQuery.cs b/src/Lucene.Net/Search/TermRangeQuery.cs
index 13c0998..87c7579 100644
--- a/src/Lucene.Net/Search/TermRangeQuery.cs
+++ b/src/Lucene.Net/Search/TermRangeQuery.cs
@@ -28,16 +28,17 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A Query that matches documents within an range of terms.
+    /// A <see cref="Query"/> that matches documents within an range of terms.
     ///
-    /// <p>this query matches the documents looking for terms that fall into the
-    /// supplied range according to {@link
-    /// Byte#compareTo(Byte)}. It is not intended
-    /// for numerical ranges; use <seealso cref="NumericRangeQuery"/> instead.
+    /// <para/>This query matches the documents looking for terms that fall into the
+    /// supplied range according to 
+    /// <see cref="byte.CompareTo(byte)"/>. It is not intended
+    /// for numerical ranges; use <see cref="NumericRangeQuery"/> instead.
     ///
-    /// <p>this query uses the {@link
-    /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+    /// <para/>This query uses the
+    /// <see cref="MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/>
     /// rewrite method.
+    /// <para/>
     /// @since 2.9
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -51,25 +52,25 @@ namespace Lucene.Net.Search
         private bool includeUpper;
 
         /// <summary>
-        /// Constructs a query selecting all terms greater/equal than <code>lowerTerm</code>
-        /// but less/equal than <code>upperTerm</code>.
+        /// Constructs a query selecting all terms greater/equal than <paramref name="lowerTerm"/>
+        /// but less/equal than <paramref name="upperTerm"/>.
         ///
-        /// <p>
-        /// If an endpoint is null, it is said
+        /// <para/>
+        /// If an endpoint is <c>null</c>, it is said
         /// to be "open". Either or both endpoints may be open.  Open endpoints may not
         /// be exclusive (you can't select all but the first or last term without
         /// explicitly specifying the term to exclude.)
         /// </summary>
         /// <param name="field"> The field that holds both lower and upper terms. </param>
         /// <param name="lowerTerm">
-        ///          The term text at the lower end of the range </param>
+        ///          The term text at the lower end of the range. </param>
         /// <param name="upperTerm">
-        ///          The term text at the upper end of the range </param>
+        ///          The term text at the upper end of the range. </param>
         /// <param name="includeLower">
-        ///          If true, the <code>lowerTerm</code> is
+        ///          If true, the <paramref name="lowerTerm"/> is
         ///          included in the range. </param>
         /// <param name="includeUpper">
-        ///          If true, the <code>upperTerm</code> is
+        ///          If true, the <paramref name="upperTerm"/> is
         ///          included in the range. </param>
         public TermRangeQuery(string field, BytesRef lowerTerm, BytesRef upperTerm, bool includeLower, bool includeUpper)
             : base(field)
@@ -81,7 +82,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a new TermRangeQuery using Strings for term text.
+        /// Factory that creates a new <see cref="TermRangeQuery"/> using <see cref="string"/>s for term text.
         /// </summary>
         public static TermRangeQuery NewStringRange(string field, string lowerTerm, string upperTerm, bool includeLower, bool includeUpper)
         {
@@ -111,14 +112,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public virtual bool IncludesLower
         {
             get { return includeLower; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public virtual bool IncludesUpper
         {
             get { return includeUpper; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermRangeTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermRangeTermsEnum.cs b/src/Lucene.Net/Search/TermRangeTermsEnum.cs
index e33124b..f1b2381 100644
--- a/src/Lucene.Net/Search/TermRangeTermsEnum.cs
+++ b/src/Lucene.Net/Search/TermRangeTermsEnum.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// Subclass of FilteredTermEnum for enumerating all terms that match the
+    /// Subclass of <see cref="FilteredTermsEnum"/> for enumerating all terms that match the
     /// specified range parameters.
-    /// <p>Term enumerations are always ordered by
-    /// <seealso cref="#getComparer"/>.  Each term in the enumeration is
-    /// greater than all that precede it.</p>
+    /// <para>Term enumerations are always ordered by
+    /// <see cref="FilteredTermsEnum.Comparer"/>.  Each term in the enumeration is
+    /// greater than all that precede it.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -43,10 +43,10 @@ namespace Lucene.Net.Search
         private readonly IComparer<BytesRef> termComp;
 
         /// <summary>
-        /// Enumerates all terms greater/equal than <code>lowerTerm</code>
-        /// but less/equal than <code>upperTerm</code>.
+        /// Enumerates all terms greater/equal than <paramref name="lowerTerm"/>
+        /// but less/equal than <paramref name="upperTerm"/>.
         ///
-        /// If an endpoint is null, it is said to be "open". Either or both
+        /// If an endpoint is <c>null</c>, it is said to be "open". Either or both
         /// endpoints may be open.  Open endpoints may not be exclusive
         /// (you can't select all but the first or last term without
         /// explicitly specifying the term to exclude.)
@@ -58,9 +58,9 @@ namespace Lucene.Net.Search
         /// <param name="upperTerm">
         ///          The term text at the upper end of the range </param>
         /// <param name="includeLower">
-        ///          If true, the <code>lowerTerm</code> is included in the range. </param>
+        ///          If true, the <paramref name="lowerTerm"/> is included in the range. </param>
         /// <param name="includeUpper">
-        ///          If true, the <code>upperTerm</code> is included in the range. </param>
+        ///          If true, the <paramref name="upperTerm"/> is included in the range. </param>
         public TermRangeTermsEnum(TermsEnum tenum, BytesRef lowerTerm, BytesRef upperTerm, bool includeLower, bool includeUpper)
             : base(tenum)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermScorer.cs b/src/Lucene.Net/Search/TermScorer.cs
index 4ddb81a..bf71c84 100644
--- a/src/Lucene.Net/Search/TermScorer.cs
+++ b/src/Lucene.Net/Search/TermScorer.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Search
     using Similarity = Lucene.Net.Search.Similarities.Similarity;
 
     /// <summary>
-    /// Expert: A <code>Scorer</code> for documents matching a <code>Term</code>.
+    /// Expert: A <see cref="Scorer"/> for documents matching a <see cref="Index.Term"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -35,14 +35,14 @@ namespace Lucene.Net.Search
         private readonly Similarity.SimScorer docScorer;
 
         /// <summary>
-        /// Construct a <code>TermScorer</code>.
+        /// Construct a <see cref="TermScorer"/>.
         /// </summary>
         /// <param name="weight">
-        ///          The weight of the <code>Term</code> in the query. </param>
+        ///          The weight of the <see cref="Index.Term"/> in the query. </param>
         /// <param name="td">
-        ///          An iterator over the documents matching the <code>Term</code>. </param>
+        ///          An iterator over the documents matching the <see cref="Index.Term"/>. </param>
         /// <param name="docScorer">
-        ///          The </code>Similarity.SimScorer</code> implementation
+        ///          The <see cref="Similarity.SimScorer"/> implementation
         ///          to be used for score computations. </param>
         internal TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer)
             : base(weight)
@@ -62,9 +62,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Advances to the next document matching the query. <br>
+        /// Advances to the next document matching the query.
         /// </summary>
-        /// <returns> the document matching the query or NO_MORE_DOCS if there are no more documents. </returns>
+        /// <returns> The document matching the query or <see cref="DocIdSetIterator.NO_MORE_DOCS"/> if there are no more documents. </returns>
         public override int NextDoc()
         {
             return docsEnum.NextDoc();
@@ -78,12 +78,13 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Advances to the first match beyond the current whose document number is
-        /// greater than or equal to a given target. <br>
-        /// The implementation uses <seealso cref="docsEnum#advance(int)"/>.
+        /// greater than or equal to a given target.
+        /// <para/>
+        /// The implementation uses <see cref="DocIdSetIterator.Advance(int)"/>.
         /// </summary>
         /// <param name="target">
         ///          The target document number. </param>
-        /// <returns> the matching document or NO_MORE_DOCS if none exist. </returns>
+        /// <returns> The matching document or <see cref="DocIdSetIterator.NO_MORE_DOCS"/> if none exist. </returns>
         public override int Advance(int target)
         {
             return docsEnum.Advance(target);
@@ -95,7 +96,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns a string representation of this <code>TermScorer</code>. </summary>
+        /// Returns a string representation of this <see cref="TermScorer"/>. </summary>
         public override string ToString()
         {
             return "scorer(" + m_weight + ")";

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermStatistics.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermStatistics.cs b/src/Lucene.Net/Search/TermStatistics.cs
index 55bb270..5bb162f 100644
--- a/src/Lucene.Net/Search/TermStatistics.cs
+++ b/src/Lucene.Net/Search/TermStatistics.cs
@@ -24,6 +24,7 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Contains statistics for a specific term
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -35,6 +36,9 @@ namespace Lucene.Net.Search
         private readonly long docFreq;
         private readonly long totalTermFreq;
 
+        /// <summary>
+        /// Sole constructor.
+        /// </summary>
         public TermStatistics(BytesRef term, long docFreq, long totalTermFreq)
         {
             Debug.Assert(docFreq >= 0);
@@ -45,23 +49,23 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// returns the term text </summary>
+        /// Returns the term text </summary>
         public BytesRef Term
         {
             get { return term; }
         }
 
         /// <summary>
-        /// returns the number of documents this term occurs in </summary>
-        /// <seealso cref= TermsEnum#docFreq()  </seealso>
+        /// Returns the number of documents this term occurs in </summary>
+        /// <seealso cref="Index.TermsEnum.DocFreq"/>
         public long DocFreq
         {
             get { return docFreq; }
         }
 
         /// <summary>
-        /// returns the total number of occurrences of this term </summary>
-        /// <seealso cref= TermsEnum#totalTermFreq()  </seealso>
+        /// Returns the total number of occurrences of this term </summary>
+        /// <seealso cref="Index.TermsEnum.TotalTermFreq"/>
         public long TotalTermFreq
         {
             get { return totalTermFreq; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TimeLimitingCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TimeLimitingCollector.cs b/src/Lucene.Net/Search/TimeLimitingCollector.cs
index 2bb2689..3ec1412 100644
--- a/src/Lucene.Net/Search/TimeLimitingCollector.cs
+++ b/src/Lucene.Net/Search/TimeLimitingCollector.cs
@@ -28,10 +28,10 @@ namespace Lucene.Net.Search
     using Counter = Lucene.Net.Util.Counter;
 
     /// <summary>
-    /// The <seealso cref="TimeLimitingCollector"/> is used to timeout search requests that
+    /// The <see cref="TimeLimitingCollector"/> is used to timeout search requests that
     /// take longer than the maximum allowed search time limit. After this time is
     /// exceeded, the search thread is stopped by throwing a
-    /// <seealso cref="TimeExceededException"/>.
+    /// <see cref="TimeExceededException"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -116,11 +116,11 @@ namespace Lucene.Net.Search
         private int docBase;
 
         /// <summary>
-        /// Create a TimeLimitedCollector wrapper over another <seealso cref="ICollector"/> with a specified timeout. </summary>
-        /// <param name="collector"> the wrapped <seealso cref="ICollector"/> </param>
-        /// <param name="clock"> the timer clock </param>
-        /// <param name="ticksAllowed"> max time allowed for collecting
-        /// hits after which <seealso cref="TimeExceededException"/> is thrown </param>
+        /// Create a <see cref="TimeLimitingCollector"/> wrapper over another <see cref="ICollector"/> with a specified timeout. </summary>
+        /// <param name="collector"> The wrapped <see cref="ICollector"/> </param>
+        /// <param name="clock"> The timer clock </param>
+        /// <param name="ticksAllowed"> Max time allowed for collecting
+        /// hits after which <see cref="TimeExceededException"/> is thrown </param>
         public TimeLimitingCollector(ICollector collector, Counter clock, long ticksAllowed)
         {
             this.collector = collector;
@@ -133,18 +133,20 @@ namespace Lucene.Net.Search
         /// initialized once the first reader is passed to the collector.
         /// To include operations executed in prior to the actual document collection
         /// set the baseline through this method in your prelude.
-        /// <p>
+        /// <para>
         /// Example usage:
-        /// <pre class="prettyprint">
-        ///   Counter clock = ...;
-        ///   long baseline = clock.get();
-        ///   // ... prepare search
-        ///   TimeLimitingCollector collector = new TimeLimitingCollector(c, clock, numTicks);
-        ///   collector.setBaseline(baseline);
-        ///   indexSearcher.search(query, collector);
-        /// </pre>
-        /// </p> </summary>
-        /// <seealso cref= #setBaseline()  </seealso>
+        /// <code>
+        ///     // Counter is in the Lucene.Net.Util namespace
+        ///     Counter clock = Counter.NewCounter(true);
+        ///     long baseline = clock.Get();
+        ///     // ... prepare search
+        ///     TimeLimitingCollector collector = new TimeLimitingCollector(c, clock, numTicks);
+        ///     collector.SetBaseline(baseline);
+        ///     indexSearcher.Search(query, collector);
+        /// </code>
+        /// </para> 
+        /// </summary>
+        /// <seealso cref="SetBaseline()"/>
         public virtual void SetBaseline(long clockTime)
         {
             t0 = clockTime;
@@ -152,7 +154,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Syntactic sugar for <seealso cref="#setBaseline(long)"/> using <seealso cref="Counter#get()"/>
+        /// Syntactic sugar for <see cref="SetBaseline(long)"/> using <see cref="Counter.Get()"/>
         /// on the clock passed to the constructor.
         /// </summary>
         public virtual void SetBaseline()
@@ -162,11 +164,11 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Checks if this time limited collector is greedy in collecting the last hit.
-        /// A non greedy collector, upon a timeout, would throw a <seealso cref="TimeExceededException"/>
+        /// A non greedy collector, upon a timeout, would throw a <see cref="TimeExceededException"/>
         /// without allowing the wrapped collector to collect current doc. A greedy one would
         /// first allow the wrapped hit collector to collect current doc and only then
-        /// throw a <seealso cref="TimeExceededException"/>. </summary>
-        /// <seealso cref= #setGreedy(boolean) </seealso>
+        /// throw a <see cref="TimeExceededException"/>. 
+        /// </summary>
         public virtual bool IsGreedy
         {
             get
@@ -180,11 +182,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Calls <seealso cref="ICollector#collect(int)"/> on the decorated <seealso cref="ICollector"/>
+        /// Calls <see cref="ICollector.Collect(int)"/> on the decorated <see cref="ICollector"/>
         /// unless the allowed time has passed, in which case it throws an exception.
         /// </summary>
         /// <exception cref="TimeExceededException">
-        ///           if the time allowed has exceeded. </exception>
+        ///           If the time allowed has exceeded. </exception>
         public virtual void Collect(int doc)
         {
             long time = clock.Get();
@@ -223,26 +225,27 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this is so the same timer can be used with a multi-phase search process such as grouping.
-        /// We don't want to create a new TimeLimitingCollector for each phase because that would
+        /// This is so the same timer can be used with a multi-phase search process such as grouping.
+        /// We don't want to create a new <see cref="TimeLimitingCollector"/> for each phase because that would
         /// reset the timer for each phase.  Once time is up subsequent phases need to timeout quickly.
         /// </summary>
-        /// <param name="collector"> The actual collector performing search functionality </param>
+        /// <param name="collector"> The actual collector performing search functionality. </param>
         public virtual void SetCollector(ICollector collector)
         {
             this.collector = collector;
         }
 
         /// <summary>
-        /// Returns the global TimerThreads <seealso cref="Counter"/>
-        /// <p>
-        /// Invoking this creates may create a new instance of <seealso cref="TimerThread"/> iff
-        /// the global <seealso cref="TimerThread"/> has never been accessed before. The thread
+        /// Returns the global <see cref="TimerThread"/>'s <see cref="Counter"/>
+        /// <para>
+        /// Invoking this creates may create a new instance of <see cref="TimerThread"/> iff
+        /// the global <see cref="TimerThread"/> has never been accessed before. The thread
         /// returned from this method is started on creation and will be alive unless
-        /// you stop the <seealso cref="TimerThread"/> via <seealso cref="TimerThread#stopTimer()"/>.
-        /// </p> </summary>
-        /// <returns> the global TimerThreads <seealso cref="Counter"/>
-        /// @lucene.experimental </returns>
+        /// you stop the <see cref="TimerThread"/> via <see cref="TimerThread.StopTimer()"/>.
+        /// </para> 
+        /// @lucene.experimental
+        /// </summary>
+        /// <returns> the global TimerThreads <seealso cref="Counter"/> </returns>
         public static Counter GlobalCounter
         {
             get
@@ -252,16 +255,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the global <seealso cref="TimerThread"/>.
-        /// <p>
-        /// Invoking this creates may create a new instance of <seealso cref="TimerThread"/> iff
-        /// the global <seealso cref="TimerThread"/> has never been accessed before. The thread
+        /// Returns the global <see cref="TimerThread"/>.
+        /// <para>
+        /// Invoking this creates may create a new instance of <see cref="TimerThread"/> iff
+        /// the global <see cref="TimerThread"/> has never been accessed before. The thread
         /// returned from this method is started on creation and will be alive unless
-        /// you stop the <seealso cref="TimerThread"/> via <seealso cref="TimerThread#stopTimer()"/>.
-        /// </p>
+        /// you stop the <see cref="TimerThread"/> via <see cref="TimerThread.StopTimer()"/>.
+        /// </para>
+        /// @lucene.experimental
         /// </summary>
-        /// <returns> the global <seealso cref="TimerThread"/>
-        /// @lucene.experimental </returns>
+        /// <returns> the global <see cref="TimerThread"/> </returns>
         public static TimerThread GlobalTimerThread
         {
             get
@@ -286,7 +289,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Thread used to timeout search requests.
-        /// Can be stopped completely with <seealso cref="TimerThread#stopTimer()"/>
+        /// Can be stopped completely with <see cref="TimerThread.StopTimer()"/>
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -368,7 +372,6 @@ namespace Lucene.Net.Search
 
             /// <summary>
             /// Return the timer resolution. </summary>
-            /// <seealso cref= #setResolution(long) </seealso>
             public long Resolution
             {
                 get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopDocs.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopDocs.cs b/src/Lucene.Net/Search/TopDocs.cs
index c25123c..fec0de7 100644
--- a/src/Lucene.Net/Search/TopDocs.cs
+++ b/src/Lucene.Net/Search/TopDocs.cs
@@ -23,9 +23,9 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Represents hits returned by {@link
-    /// IndexSearcher#search(Query,Filter,int)} and {@link
-    /// IndexSearcher#search(Query,int)}.
+    /// Represents hits returned by 
+    /// <see cref="IndexSearcher.Search(Query,Filter,int)"/> and 
+    /// <see cref="IndexSearcher.Search(Query,int)"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -48,7 +48,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the maximum score value encountered. Note that in case
-        /// scores are not tracked, this returns <seealso cref="Float#NaN"/>.
+        /// scores are not tracked, this returns <see cref="float.NaN"/>.
         /// </summary>
         public virtual float MaxScore
         {
@@ -63,7 +63,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Constructs a TopDocs with a default maxScore=Float.NaN. </summary>
+        /// Constructs a <see cref="TopDocs"/> with a default <c>maxScore=System.Single.NaN</c>. </summary>
         internal TopDocs(int totalHits, ScoreDoc[] scoreDocs)
             : this(totalHits, scoreDocs, float.NaN)
         {
@@ -250,16 +250,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns a new TopDocs, containing topN results across
-        ///  the provided TopDocs, sorting by the specified {@link
-        ///  Sort}.  Each of the TopDocs must have been sorted by
-        ///  the same Sort, and sort field values must have been
-        ///  filled (ie, <code>fillFields=true</code> must be
-        ///  passed to {@link
-        ///  TopFieldCollector#create}.
-        ///
-        /// <p>Pass sort=null to merge sort by score descending.
+        /// Returns a new <see cref="TopDocs"/>, containing <paramref name="topN"/> results across
+        /// the provided <see cref="TopDocs"/>, sorting by the specified 
+        /// <see cref="Sort"/>.  Each of the <see cref="TopDocs"/> must have been sorted by
+        /// the same <see cref="Sort"/>, and sort field values must have been
+        /// filled (ie, <c>fillFields=true</c> must be
+        /// passed to
+        /// <see cref="TopFieldCollector.Create(Sort, int, bool, bool, bool, bool)"/>.
         ///
+        /// <para/>Pass <paramref name="sort"/>=null to merge sort by score descending.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public static TopDocs Merge(Sort sort, int topN, TopDocs[] shardHits)
@@ -268,8 +268,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Same as <seealso cref="#merge(Sort, int, TopDocs[])"/> but also slices the result at the same time based
-        /// on the provided start and size. The return TopDocs will always have a scoreDocs with length of at most size.
+        /// Same as <see cref="Merge(Sort, int, TopDocs[])"/> but also slices the result at the same time based
+        /// on the provided start and size. The return <c>TopDocs</c> will always have a scoreDocs with length of 
+        /// at most <see cref="Util.PriorityQueue{T}.Count"/>.
         /// </summary>
         public static TopDocs Merge(Sort sort, int start, int size, TopDocs[] shardHits)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopDocsCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopDocsCollector.cs b/src/Lucene.Net/Search/TopDocsCollector.cs
index 211e2ef..cb8b956 100644
--- a/src/Lucene.Net/Search/TopDocsCollector.cs
+++ b/src/Lucene.Net/Search/TopDocsCollector.cs
@@ -22,15 +22,16 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A base class for all collectors that return a <seealso cref="TopDocs"/> output. this
+    /// A base class for all collectors that return a <see cref="TopDocs"/> output. This
     /// collector allows easy extension by providing a single constructor which
-    /// accepts a <seealso cref="PriorityQueue"/> as well as protected members for that
-    /// priority queue and a counter of the number of total hits.<br>
+    /// accepts a <see cref="Util.PriorityQueue{T}"/> as well as protected members for that
+    /// priority queue and a counter of the number of total hits.
+    /// <para/>
     /// Extending classes can override any of the methods to provide their own
     /// implementation, as well as avoid the use of the priority queue entirely by
-    /// passing null to <seealso cref="#TopDocsCollector(PriorityQueue)"/>. In that case
+    /// passing null to <see cref="TopDocsCollector(Util.PriorityQueue{T})"/>. In that case
     /// however, you might want to consider overriding all methods, in order to avoid
-    /// a NullPointerException.
+    /// a <see cref="NullReferenceException"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -38,15 +39,15 @@ namespace Lucene.Net.Search
     public abstract class TopDocsCollector<T> : ICollector, ITopDocsCollector where T : ScoreDoc
     {
         /// <summary>
-        /// this is used in case topDocs() is called with illegal parameters, or there
-        ///  simply aren't (enough) results.
+        /// This is used in case <see cref="GetTopDocs()"/> is called with illegal parameters, or there
+        /// simply aren't (enough) results.
         /// </summary>
         protected static readonly TopDocs EMPTY_TOPDOCS = new TopDocs(0, new ScoreDoc[0], float.NaN);
 
         /// <summary>
         /// The priority queue which holds the top documents. Note that different
-        /// implementations of PriorityQueue give different meaning to 'top documents'.
-        /// HitQueue for example aggregates the top scoring documents, while other PQ
+        /// implementations of <see cref="PriorityQueue{T}"/> give different meaning to 'top documents'.
+        /// <see cref="HitQueue"/> for example aggregates the top scoring documents, while other priority queue
         /// implementations may hold documents sorted by other criteria.
         /// </summary>
         protected PriorityQueue<T> m_pq;
@@ -55,14 +56,17 @@ namespace Lucene.Net.Search
         /// The total number of documents that the collector encountered. </summary>
         protected int m_totalHits;
 
+        /// <summary>
+        /// Sole constructor.
+        /// </summary>
         protected TopDocsCollector(PriorityQueue<T> pq)
         {
             this.m_pq = pq;
         }
 
         /// <summary>
-        /// Populates the results array with the ScoreDoc instances. this can be
-        /// overridden in case a different ScoreDoc type should be returned.
+        /// Populates the results array with the <see cref="ScoreDoc"/> instances. This can be
+        /// overridden in case a different <see cref="ScoreDoc"/> type should be returned.
         /// </summary>
         protected virtual void PopulateResults(ScoreDoc[] results, int howMany)
         {
@@ -73,10 +77,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns a <seealso cref="TopDocs"/> instance containing the given results. If
-        /// <code>results</code> is null it means there are no results to return,
-        /// either because there were 0 calls to collect() or because the arguments to
-        /// topDocs were invalid.
+        /// Returns a <see cref="TopDocs"/> instance containing the given results. If
+        /// <paramref name="results"/> is <c>null</c> it means there are no results to return,
+        /// either because there were 0 calls to <see cref="Collect(int)"/> or because the arguments to
+        /// <see cref="TopDocs"/> were invalid.
         /// </summary>
         protected virtual TopDocs NewTopDocs(ScoreDoc[] results, int start)
         {
@@ -98,7 +102,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// The number of valid PQ entries 
+        /// The number of valid priority queue entries 
         /// </summary>
         protected virtual int TopDocsCount
         {
@@ -122,44 +126,48 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the documents in the rage [start .. pq.size()) that were collected
-        /// by this collector. Note that if start >= pq.size(), an empty TopDocs is
-        /// returned.<br>
-        /// this method is convenient to call if the application always asks for the
-        /// last results, starting from the last 'page'.<br>
+        /// Returns the documents in the rage [<paramref name="start"/> .. pq.Count) that were collected
+        /// by this collector. Note that if <paramref name="start"/> &gt;= pq.Count, an empty <see cref="TopDocs"/> is
+        /// returned.
+        /// <para/>
+        /// This method is convenient to call if the application always asks for the
+        /// last results, starting from the last 'page'.
+        /// <para/>
         /// <b>NOTE:</b> you cannot call this method more than once for each search
         /// execution. If you need to call it more than once, passing each time a
-        /// different <code>start</code>, you should call <seealso cref="#topDocs()"/> and work
-        /// with the returned <seealso cref="TopDocs"/> object, which will contain all the
+        /// different <paramref name="start"/>, you should call <see cref="GetTopDocs()"/> and work
+        /// with the returned <see cref="TopDocs"/> object, which will contain all the
         /// results this search execution collected.
         /// </summary>
         public virtual TopDocs GetTopDocs(int start)
         {
             // In case pq was populated with sentinel values, there might be less
-            // results than pq.size(). Therefore return all results until either
-            // pq.size() or totalHits.
+            // results than pq.Count. Therefore return all results until either
+            // pq.Count or totalHits.
             return GetTopDocs(start, TopDocsCount);
         }
 
         /// <summary>
-        /// Returns the documents in the rage [start .. start+howMany) that were
-        /// collected by this collector. Note that if start >= pq.size(), an empty
-        /// TopDocs is returned, and if pq.size() - start &lt; howMany, then only the
-        /// available documents in [start .. pq.size()) are returned.<br>
-        /// this method is useful to call in case pagination of search results is
+        /// Returns the documents in the rage [<paramref name="start"/> .. <paramref name="start"/>+<paramref name="howMany"/>) that were
+        /// collected by this collector. Note that if <paramref name="start"/> >= pq.Count, an empty
+        /// <see cref="TopDocs"/> is returned, and if pq.Count - <paramref name="start"/> &lt; <paramref name="howMany"/>, then only the
+        /// available documents in [<paramref name="start"/> .. pq.Count) are returned.
+        /// <para/>
+        /// This method is useful to call in case pagination of search results is
         /// allowed by the search application, as well as it attempts to optimize the
-        /// memory used by allocating only as much as requested by howMany.<br>
+        /// memory used by allocating only as much as requested by <paramref name="howMany"/>.
+        /// <para/>
         /// <b>NOTE:</b> you cannot call this method more than once for each search
         /// execution. If you need to call it more than once, passing each time a
-        /// different range, you should call <seealso cref="#topDocs()"/> and work with the
-        /// returned <seealso cref="TopDocs"/> object, which will contain all the results this
+        /// different range, you should call <see cref="GetTopDocs()"/> and work with the
+        /// returned <see cref="TopDocs"/> object, which will contain all the results this
         /// search execution collected.
         /// </summary>
         public virtual TopDocs GetTopDocs(int start, int howMany)
         {
             // In case pq was populated with sentinel values, there might be less
-            // results than pq.size(). Therefore return all results until either
-            // pq.size() or totalHits.
+            // results than pq.Count. Therefore return all results until either
+            // pq.Count or totalHits.
             int size = TopDocsCount;
 
             // Don't bother to throw an exception, just return an empty TopDocs in case
@@ -197,7 +205,7 @@ namespace Lucene.Net.Search
         /// Called before successive calls to <see cref="Collect(int)"/>. Implementations
         /// that need the score of the current document (passed-in to
         /// <see cref="Collect(int)"/>), should save the passed-in <see cref="Scorer"/> and call
-        /// scorer.Score() when needed.
+        /// <see cref="Scorer.GetScore()"/> when needed.
         /// </summary>
         public abstract void SetScorer(Scorer scorer);
 
@@ -219,11 +227,11 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Called before collecting from each <see cref="AtomicReaderContext"/>. All doc ids in
         /// <see cref="Collect(int)"/> will correspond to <see cref="Index.IndexReaderContext.Reader"/>.
-        ///
-        /// Add <see cref="AtomicReaderContext#docBase"/> to the current <see cref="Index.IndexReaderContext.Reader"/>'s
+        /// <para/>
+        /// Add <see cref="AtomicReaderContext.DocBase"/> to the current <see cref="Index.IndexReaderContext.Reader"/>'s
         /// internal document id to re-base ids in <see cref="Collect(int)"/>.
         /// </summary>
-        /// <param name="context">next atomic reader context </param>
+        /// <param name="context">Next atomic reader context </param>
         public abstract void SetNextReader(AtomicReaderContext context);
 
         /// <summary>
@@ -252,9 +260,46 @@ namespace Lucene.Net.Search
     public interface ITopDocsCollector : ICollector
     {
         // From TopDocsCollector<T>
+        /// <summary>
+        /// The total number of documents that matched this query. </summary>
         int TotalHits { get; }
+
+        /// <summary>
+        /// Returns the top docs that were collected by this collector. </summary>
         TopDocs GetTopDocs();
+
+        /// <summary>
+        /// Returns the documents in the rage [<paramref name="start"/> .. pq.Count) that were collected
+        /// by this collector. Note that if <paramref name="start"/> &gt;= pq.Count, an empty <see cref="TopDocs"/> is
+        /// returned.
+        /// <para/>
+        /// This method is convenient to call if the application always asks for the
+        /// last results, starting from the last 'page'.
+        /// <para/>
+        /// <b>NOTE:</b> you cannot call this method more than once for each search
+        /// execution. If you need to call it more than once, passing each time a
+        /// different <paramref name="start"/>, you should call <see cref="GetTopDocs()"/> and work
+        /// with the returned <see cref="TopDocs"/> object, which will contain all the
+        /// results this search execution collected.
+        /// </summary>
         TopDocs GetTopDocs(int start);
+
+        /// <summary>
+        /// Returns the documents in the rage [<paramref name="start"/> .. <paramref name="start"/>+<paramref name="howMany"/>) that were
+        /// collected by this collector. Note that if <paramref name="start"/> >= pq.Count, an empty
+        /// <see cref="TopDocs"/> is returned, and if pq.Count - <paramref name="start"/> &lt; <paramref name="howMany"/>, then only the
+        /// available documents in [<paramref name="start"/> .. pq.Count) are returned.
+        /// <para/>
+        /// This method is useful to call in case pagination of search results is
+        /// allowed by the search application, as well as it attempts to optimize the
+        /// memory used by allocating only as much as requested by <paramref name="howMany"/>.
+        /// <para/>
+        /// <b>NOTE:</b> you cannot call this method more than once for each search
+        /// execution. If you need to call it more than once, passing each time a
+        /// different range, you should call <see cref="GetTopDocs()"/> and work with the
+        /// returned <see cref="TopDocs"/> object, which will contain all the results this
+        /// search execution collected.
+        /// </summary>
         TopDocs GetTopDocs(int start, int howMany);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopFieldCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopFieldCollector.cs b/src/Lucene.Net/Search/TopFieldCollector.cs
index 9e07166..34911b0 100644
--- a/src/Lucene.Net/Search/TopFieldCollector.cs
+++ b/src/Lucene.Net/Search/TopFieldCollector.cs
@@ -24,12 +24,12 @@ namespace Lucene.Net.Search
     using Entry = Lucene.Net.Search.FieldValueHitQueue.Entry;
 
     /// <summary>
-    /// A <seealso cref="ICollector"/> that sorts by <seealso cref="SortField"/> using
-    /// <seealso cref="FieldComparer"/>s.
-    /// <p/>
-    /// See the <seealso cref="#create(Lucene.Net.Search.Sort, int, boolean, boolean, boolean, boolean)"/> method
-    /// for instantiating a TopFieldCollector.
-    ///
+    /// A <see cref="ICollector"/> that sorts by <see cref="SortField"/> using
+    /// <see cref="FieldComparer"/>s.
+    /// <para/>
+    /// See the <see cref="Create(Lucene.Net.Search.Sort, int, bool, bool, bool, bool)"/> method
+    /// for instantiating a <see cref="TopFieldCollector"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -42,10 +42,10 @@ namespace Lucene.Net.Search
         // always compare lower than a real hit; this would
         // save having to check queueFull on each insert
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, without
-         * tracking document scores and maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, without
+        /// tracking document scores and maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -115,11 +115,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, without
-         * tracking document scores and maxScore, and assumes out of orderness in doc
-         * Ids collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, without
+        /// tracking document scores and maxScore, and assumes out of orderness in doc
+        /// Ids collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -167,10 +167,10 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, while tracking
-         * document scores but no maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, while tracking
+        /// document scores but no maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -235,11 +235,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, while tracking
-         * document scores but no maxScore, and assumes out of orderness in doc Ids
-         * collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, while tracking
+        /// document scores but no maxScore, and assumes out of orderness in doc Ids
+        /// collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -293,10 +293,10 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, with tracking
-         * document scores and maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, with tracking
+        /// document scores and maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -362,11 +362,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over one SortField criteria, with tracking
-         * document scores and maxScore, and assumes out of orderness in doc Ids
-         * collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over one <see cref="SortField"/> criteria, with tracking
+        /// document scores and maxScore, and assumes out of orderness in doc Ids
+        /// collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -419,10 +419,10 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, without
-         * tracking document scores and maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, without
+        /// tracking document scores and maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -527,11 +527,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, without
-         * tracking document scores and maxScore, and assumes out of orderness in doc
-         * Ids collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, without
+        /// tracking document scores and maxScore, and assumes out of orderness in doc
+        /// Ids collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -612,10 +612,10 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, with
-         * tracking document scores and maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, with
+        /// tracking document scores and maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -710,11 +710,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, with
-         * tracking document scores and maxScore, and assumes out of orderness in doc
-         * Ids collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, with
+        /// tracking document scores and maxScore, and assumes out of orderness in doc
+        /// Ids collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -800,10 +800,10 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, with
-         * tracking document scores and maxScore.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, with
+        /// tracking document scores and maxScore.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -896,11 +896,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector over multiple SortField criteria, with
-         * tracking document scores and maxScore, and assumes out of orderness in doc
-         * Ids collection.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> over multiple <see cref="SortField"/> criteria, with
+        /// tracking document scores and maxScore, and assumes out of orderness in doc
+        /// Ids collection.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -992,9 +992,9 @@ namespace Lucene.Net.Search
             }
         }
 
-        /*
-         * Implements a TopFieldCollector when after != null.
-         */
+        /// <summary>
+        /// Implements a <see cref="TopFieldCollector"/> when after != null.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -1218,82 +1218,82 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="TopFieldCollector"/> from the given
+        /// Creates a new <see cref="TopFieldCollector"/> from the given
         /// arguments.
         ///
-        /// <p><b>NOTE</b>: The instances returned by this method
+        /// <para/><b>NOTE</b>: The instances returned by this method
         /// pre-allocate a full array of length
-        /// <code>numHits</code>.
+        /// <paramref name="numHits"/>.
         /// </summary>
         /// <param name="sort">
-        ///          the sort criteria (SortFields). </param>
+        ///          The sort criteria (<see cref="SortField"/>s). </param>
         /// <param name="numHits">
-        ///          the number of results to collect. </param>
+        ///          The number of results to collect. </param>
         /// <param name="fillFields">
-        ///          specifies whether the actual field values should be returned on
-        ///          the results (FieldDoc). </param>
+        ///          Specifies whether the actual field values should be returned on
+        ///          the results (<see cref="FieldDoc"/>). </param>
         /// <param name="trackDocScores">
-        ///          specifies whether document scores should be tracked and set on the
-        ///          results. Note that if set to false, then the results' scores will
-        ///          be set to Float.NaN. Setting this to true affects performance, as
+        ///          Specifies whether document scores should be tracked and set on the
+        ///          results. Note that if set to <c>false</c>, then the results' scores will
+        ///          be set to <see cref="float.NaN"/>. Setting this to <c>true</c> affects performance, as
         ///          it incurs the score computation on each competitive result.
         ///          Therefore if document scores are not required by the application,
-        ///          it is recommended to set it to false. </param>
+        ///          it is recommended to set it to <c>false</c>. </param>
         /// <param name="trackMaxScore">
-        ///          specifies whether the query's maxScore should be tracked and set
-        ///          on the resulting <seealso cref="TopDocs"/>. Note that if set to false,
-        ///          <seealso cref="TopDocs#getMaxScore()"/> returns Float.NaN. Setting this to
-        ///          true affects performance as it incurs the score computation on
-        ///          each result. Also, setting this true automatically sets
-        ///          <code>trackDocScores</code> to true as well. </param>
+        ///          Specifies whether the query's <see cref="maxScore"/> should be tracked and set
+        ///          on the resulting <see cref="TopDocs"/>. Note that if set to <c>false</c>,
+        ///          <see cref="TopDocs.MaxScore"/> returns <see cref="float.NaN"/>. Setting this to
+        ///          <c>true</c> affects performance as it incurs the score computation on
+        ///          each result. Also, setting this <c>true</c> automatically sets
+        ///          <paramref name="trackDocScores"/> to <c>true</c> as well. </param>
         /// <param name="docsScoredInOrder">
-        ///          specifies whether documents are scored in doc Id order or not by
-        ///          the given <seealso cref="Scorer"/> in <seealso cref="#setScorer(Scorer)"/>. </param>
-        /// <returns> a <seealso cref="TopFieldCollector"/> instance which will sort the results by
+        ///          Specifies whether documents are scored in doc Id order or not by
+        ///          the given <see cref="Scorer"/> in <see cref="ICollector.SetScorer(Scorer)"/>. </param>
+        /// <returns> A <see cref="TopFieldCollector"/> instance which will sort the results by
         ///         the sort criteria. </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public static TopFieldCollector Create(Sort sort, int numHits, bool fillFields, bool trackDocScores, bool trackMaxScore, bool docsScoredInOrder)
         {
             return Create(sort, numHits, null, fillFields, trackDocScores, trackMaxScore, docsScoredInOrder);
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="TopFieldCollector"/> from the given
+        /// Creates a new <see cref="TopFieldCollector"/> from the given
         /// arguments.
         ///
-        /// <p><b>NOTE</b>: The instances returned by this method
+        /// <para/><b>NOTE</b>: The instances returned by this method
         /// pre-allocate a full array of length
-        /// <code>numHits</code>.
+        /// <paramref name="numHits"/>.
         /// </summary>
         /// <param name="sort">
-        ///          the sort criteria (SortFields). </param>
+        ///          The sort criteria (<see cref="SortField"/>s). </param>
         /// <param name="numHits">
-        ///          the number of results to collect. </param>
+        ///          The number of results to collect. </param>
         /// <param name="after">
-        ///          only hits after this FieldDoc will be collected </param>
+        ///          Only hits after this <see cref="FieldDoc"/> will be collected </param>
         /// <param name="fillFields">
-        ///          specifies whether the actual field values should be returned on
-        ///          the results (FieldDoc). </param>
+        ///          Specifies whether the actual field values should be returned on
+        ///          the results (<see cref="FieldDoc"/>). </param>
         /// <param name="trackDocScores">
-        ///          specifies whether document scores should be tracked and set on the
-        ///          results. Note that if set to false, then the results' scores will
-        ///          be set to Float.NaN. Setting this to true affects performance, as
+        ///          Specifies whether document scores should be tracked and set on the
+        ///          results. Note that if set to <c>false</c>, then the results' scores will
+        ///          be set to <see cref="float.NaN"/>. Setting this to <c>true</c> affects performance, as
         ///          it incurs the score computation on each competitive result.
         ///          Therefore if document scores are not required by the application,
-        ///          it is recommended to set it to false. </param>
+        ///          it is recommended to set it to <c>false</c>. </param>
         /// <param name="trackMaxScore">
-        ///          specifies whether the query's maxScore should be tracked and set
-        ///          on the resulting <seealso cref="TopDocs"/>. Note that if set to false,
-        ///          <seealso cref="TopDocs#getMaxScore()"/> returns Float.NaN. Setting this to
-        ///          true affects performance as it incurs the score computation on
-        ///          each result. Also, setting this true automatically sets
-        ///          <code>trackDocScores</code> to true as well. </param>
+        ///          Specifies whether the query's maxScore should be tracked and set
+        ///          on the resulting <see cref="TopDocs"/>. Note that if set to <c>false</c>,
+        ///          <see cref="TopDocs.MaxScore"/> returns <see cref="float.NaN"/>. Setting this to
+        ///          <c>true</c> affects performance as it incurs the score computation on
+        ///          each result. Also, setting this <c>true</c> automatically sets
+        ///          <paramref name="trackDocScores"/> to <c>true</c> as well. </param>
         /// <param name="docsScoredInOrder">
-        ///          specifies whether documents are scored in doc Id order or not by
-        ///          the given <seealso cref="Scorer"/> in <seealso cref="#setScorer(Scorer)"/>. </param>
-        /// <returns> a <seealso cref="TopFieldCollector"/> instance which will sort the results by
+        ///          Specifies whether documents are scored in doc Id order or not by
+        ///          the given <see cref="Scorer"/> in <see cref="ICollector.SetScorer(Scorer)"/>. </param>
+        /// <returns> A <see cref="TopFieldCollector"/> instance which will sort the results by
         ///         the sort criteria. </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public static TopFieldCollector Create(Sort sort, int numHits, FieldDoc after, bool fillFields, bool trackDocScores, bool trackMaxScore, bool docsScoredInOrder)
         {
             if (sort.fields.Length == 0)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopFieldDocs.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopFieldDocs.cs b/src/Lucene.Net/Search/TopFieldDocs.cs
index b67a1c2..f0dcd68 100644
--- a/src/Lucene.Net/Search/TopFieldDocs.cs
+++ b/src/Lucene.Net/Search/TopFieldDocs.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Represents hits returned by {@link
-    /// IndexSearcher#search(Query,Filter,int,Sort)}.
+    /// Represents hits returned by 
+    /// <see cref="IndexSearcher.Search(Query, Filter, int, Sort)"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopScoreDocCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopScoreDocCollector.cs b/src/Lucene.Net/Search/TopScoreDocCollector.cs
index 683ca47..53ec973 100644
--- a/src/Lucene.Net/Search/TopScoreDocCollector.cs
+++ b/src/Lucene.Net/Search/TopScoreDocCollector.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// A <seealso cref="ICollector"/> implementation that collects the top-scoring hits,
-    /// returning them as a <seealso cref="TopDocs"/>. this is used by <seealso cref="IndexSearcher"/> to
-    /// implement <seealso cref="TopDocs"/>-based search. Hits are sorted by score descending
+    /// A <see cref="ICollector"/> implementation that collects the top-scoring hits,
+    /// returning them as a <see cref="TopDocs"/>. this is used by <see cref="IndexSearcher"/> to
+    /// implement <see cref="TopDocs"/>-based search. Hits are sorted by score descending
     /// and then (when the scores are tied) docID ascending. When you create an
     /// instance of this collector you should know in advance whether documents are
     /// going to be collected in doc Id order or not.
     ///
-    /// <p><b>NOTE</b>: The values <seealso cref="Float#NaN"/> and
-    /// <seealso cref="Float#NEGATIVE_INFINITY"/> are not valid scores.  this
+    /// <para/><b>NOTE</b>: The values <see cref="float.NaN"/> and
+    /// <see cref="float.NegativeInfinity"/> are not valid scores.  This
     /// collector will not properly collect hits with such
     /// scores.
     /// </summary>
@@ -261,13 +261,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="TopScoreDocCollector"/> given the number of hits to
+        /// Creates a new <see cref="TopScoreDocCollector"/> given the number of hits to
         /// collect and whether documents are scored in order by the input
-        /// <seealso cref="Scorer"/> to <seealso cref="#setScorer(Scorer)"/>.
+        /// <see cref="Scorer"/> to <see cref="SetScorer(Scorer)"/>.
         ///
-        /// <p><b>NOTE</b>: The instances returned by this method
+        /// <para/><b>NOTE</b>: The instances returned by this method
         /// pre-allocate a full array of length
-        /// <code>numHits</code>, and fill the array with sentinel
+        /// <paramref name="numHits"/>, and fill the array with sentinel
         /// objects.
         /// </summary>
         public static TopScoreDocCollector Create(int numHits, bool docsScoredInOrder)
@@ -276,13 +276,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="TopScoreDocCollector"/> given the number of hits to
+        /// Creates a new <see cref="TopScoreDocCollector"/> given the number of hits to
         /// collect, the bottom of the previous page, and whether documents are scored in order by the input
-        /// <seealso cref="Scorer"/> to <seealso cref="#setScorer(Scorer)"/>.
+        /// <see cref="Scorer"/> to <see cref="SetScorer(Scorer)"/>.
         ///
-        /// <p><b>NOTE</b>: The instances returned by this method
+        /// <para/><b>NOTE</b>: The instances returned by this method
         /// pre-allocate a full array of length
-        /// <code>numHits</code>, and fill the array with sentinel
+        /// <paramref name="numHits"/>, and fill the array with sentinel
         /// objects.
         /// </summary>
         public static TopScoreDocCollector Create(int numHits, ScoreDoc after, bool docsScoredInOrder)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TopTermsRewrite.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TopTermsRewrite.cs b/src/Lucene.Net/Search/TopTermsRewrite.cs
index 8f58ca5..dd5db58 100644
--- a/src/Lucene.Net/Search/TopTermsRewrite.cs
+++ b/src/Lucene.Net/Search/TopTermsRewrite.cs
@@ -31,10 +31,16 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
     using TermState = Lucene.Net.Index.TermState;
 
+    internal interface ITopTermsRewrite
+    {
+        int Count { get; } // LUCENENET NOTE: This was size() in Lucene.
+    }
+
     /// <summary>
     /// Base rewrite method for collecting only the top terms
     /// via a priority queue.
-    /// @lucene.internal Only public to be accessible by spans package.
+    /// <para/>
+    /// @lucene.internal - Only public to be accessible by spans package.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -45,10 +51,10 @@ namespace Lucene.Net.Search
         private readonly int size;
 
         /// <summary>
-        /// Create a TopTermsBooleanQueryRewrite for
+        /// Create a <see cref="TopTermsRewrite{Q}"/> for
         /// at most <paramref name="count"/> terms.
-        /// <p>
-        /// NOTE: if <seealso cref="BooleanQuery#getMaxClauseCount"/> is smaller than
+        /// <para/>
+        /// NOTE: if <see cref="BooleanQuery.MaxClauseCount"/> is smaller than
         /// <paramref name="count"/>, then it will be used instead.
         /// </summary>
         public TopTermsRewrite(int count)
@@ -57,7 +63,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// return the maximum priority queue size.
+        /// Return the maximum priority queue size.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public virtual int Count
@@ -69,7 +76,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// return the maximum size of the priority queue (for boolean rewrites this is BooleanQuery#getMaxClauseCount). </summary>
+        /// Return the maximum size of the priority queue (for boolean rewrites this is <see cref="BooleanQuery.MaxClauseCount"/>). </summary>
         protected abstract int MaxSize { get; }
 
         public override Query Rewrite(IndexReader reader, MultiTermQuery query)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Weight.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Weight.cs b/src/Lucene.Net/Search/Weight.cs
index 889f5ed..c977afd 100644
--- a/src/Lucene.Net/Search/Weight.cs
+++ b/src/Lucene.Net/Search/Weight.cs
@@ -24,32 +24,34 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Expert: Calculate query weights and build query scorers.
-    /// <p>
-    /// The purpose of <seealso cref="Weight"/> is to ensure searching does not modify a
-    /// <seealso cref="Query"/>, so that a <seealso cref="Query"/> instance can be reused. <br>
-    /// <seealso cref="IndexSearcher"/> dependent state of the query should reside in the
-    /// <seealso cref="Weight"/>. <br>
-    /// <seealso cref="AtomicReader"/> dependent state should reside in the <seealso cref="Scorer"/>.
-    /// <p>
-    /// Since <seealso cref="Weight"/> creates <seealso cref="Scorer"/> instances for a given
-    /// <seealso cref="AtomicReaderContext"/> (<seealso cref="#scorer(AtomicReaderContext, Bits)"/>)
+    /// <para/>
+    /// The purpose of <see cref="Weight"/> is to ensure searching does not modify a
+    /// <see cref="Search.Query"/>, so that a <see cref="Search.Query"/> instance can be reused.
+    /// <para/>
+    /// <see cref="IndexSearcher"/> dependent state of the query should reside in the
+    /// <see cref="Weight"/>.
+    /// <para/>
+    /// <see cref="Index.AtomicReader"/> dependent state should reside in the <see cref="Scorer"/>.
+    /// <para/>
+    /// Since <see cref="Weight"/> creates <see cref="Scorer"/> instances for a given
+    /// <see cref="AtomicReaderContext"/> (<see cref="GetScorer(AtomicReaderContext, IBits)"/>)
     /// callers must maintain the relationship between the searcher's top-level
-    /// <seealso cref="IndexReaderContext"/> and the context used to create a <seealso cref="Scorer"/>.
-    /// <p>
-    /// A <code>Weight</code> is used in the following way:
-    /// <ol>
-    /// <li>A <code>Weight</code> is constructed by a top-level query, given a
-    /// <code>IndexSearcher</code> (<seealso cref="Query#createWeight(IndexSearcher)"/>).
-    /// <li>The <seealso cref="#getValueForNormalization()"/> method is called on the
-    /// <code>Weight</code> to compute the query normalization factor
-    /// <seealso cref="Similarity#queryNorm(float)"/> of the query clauses contained in the
-    /// query.
-    /// <li>The query normalization factor is passed to <seealso cref="#normalize(float, float)"/>. At
-    /// this point the weighting is complete.
-    /// <li>A <code>Scorer</code> is constructed by
-    /// <seealso cref="#scorer(AtomicReaderContext, Bits)"/>.
-    /// </ol>
-    ///
+    /// <see cref="Index.IndexReaderContext"/> and the context used to create a <see cref="Scorer"/>.
+    /// <para/>
+    /// A <see cref="Weight"/> is used in the following way:
+    /// <list type="number">
+    ///     <item><description>A <see cref="Weight"/> is constructed by a top-level query, given a
+    ///         <see cref="IndexSearcher"/> (<see cref="Query.CreateWeight(IndexSearcher)"/>).</description></item>
+    ///     <item><description>The <see cref="GetValueForNormalization()"/> method is called on the
+    ///         <see cref="Weight"/> to compute the query normalization factor
+    ///         <see cref="Similarities.Similarity.QueryNorm(float)"/> of the query clauses contained in the
+    ///         query.</description></item>
+    ///     <item><description>The query normalization factor is passed to <see cref="Normalize(float, float)"/>. At
+    ///         this point the weighting is complete.</description></item>
+    ///     <item><description>A <see cref="Scorer"/> is constructed by
+    ///         <see cref="GetScorer(AtomicReaderContext, IBits)"/>.</description></item>
+    /// </list>
+    /// <para/>
     /// @since 2.9
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -60,10 +62,10 @@ namespace Lucene.Net.Search
         /// <summary>
         /// An explanation of the score computation for the named document.
         /// </summary>
-        /// <param name="context"> the readers context to create the <seealso cref="Explanation"/> for. </param>
-        /// <param name="doc"> the document's id relative to the given context's reader </param>
-        /// <returns> an Explanation for the score </returns>
-        /// <exception cref="IOException"> if an <seealso cref="IOException"/> occurs </exception>
+        /// <param name="context"> The readers context to create the <see cref="Explanation"/> for. </param>
+        /// <param name="doc"> The document's id relative to the given context's reader </param>
+        /// <returns> An <see cref="Explanation"/> for the score </returns>
+        /// <exception cref="System.IO.IOException"> if an <see cref="System.IO.IOException"/> occurs </exception>
         public abstract Explanation Explain(AtomicReaderContext context, int doc);
 
         /// <summary>
@@ -79,53 +81,54 @@ namespace Lucene.Net.Search
         public abstract void Normalize(float norm, float topLevelBoost);
 
         /// <summary>
-        /// Returns a <seealso cref="Scorer"/> which scores documents in/out-of order according
-        /// to <code>scoreDocsInOrder</code>.
-        /// <p>
-        /// <b>NOTE:</b> even if <code>scoreDocsInOrder</code> is false, it is
-        /// recommended to check whether the returned <code>Scorer</code> indeed scores
-        /// documents out of order (i.e., call <seealso cref="#scoresDocsOutOfOrder()"/>), as
-        /// some <code>Scorer</code> implementations will always return documents
-        /// in-order.<br>
-        /// <b>NOTE:</b> null can be returned if no documents will be scored by this
+        /// Returns a <see cref="Scorer"/> which scores documents in/out-of order according
+        /// to <c>scoreDocsInOrder</c>.
+        /// <para/>
+        /// <b>NOTE:</b> even if <c>scoreDocsInOrder</c> is <c>false</c>, it is
+        /// recommended to check whether the returned <see cref="Scorer"/> indeed scores
+        /// documents out of order (i.e., call <see cref="ScoresDocsOutOfOrder"/>), as
+        /// some <see cref="Scorer"/> implementations will always return documents
+        /// in-order.
+        /// <para/>
+        /// <b>NOTE:</b> <c>null</c> can be returned if no documents will be scored by this
         /// query.
         /// </summary>
         /// <param name="context">
-        ///          the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param>
+        ///          The <see cref="AtomicReaderContext"/> for which to return the <see cref="Scorer"/>. </param>
         /// <param name="acceptDocs">
-        ///          Bits that represent the allowable docs to match (typically deleted docs
+        ///          <see cref="IBits"/> that represent the allowable docs to match (typically deleted docs
         ///          but possibly filtering other documents)
         /// </param>
-        /// <returns> a <seealso cref="Scorer"/> which scores documents in/out-of order. </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <returns> A <see cref="Scorer"/> which scores documents in/out-of order. </returns>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public abstract Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs);
 
         /// <summary>
-        /// Optional method, to return a <seealso cref="BulkScorer"/> to
-        /// score the query and send hits to a <seealso cref="ICollector"/>.
+        /// Optional method, to return a <see cref="BulkScorer"/> to
+        /// score the query and send hits to a <see cref="ICollector"/>.
         /// Only queries that have a different top-level approach
         /// need to override this; the default implementation
-        /// pulls a normal <seealso cref="Scorer"/> and iterates and
+        /// pulls a normal <see cref="Scorer"/> and iterates and
         /// collects the resulting hits.
         /// </summary>
         /// <param name="context">
-        ///          the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param>
+        ///          The <see cref="AtomicReaderContext"/> for which to return the <see cref="Scorer"/>. </param>
         /// <param name="scoreDocsInOrder">
-        ///          specifies whether in-order scoring of documents is required. Note
-        ///          that if set to false (i.e., out-of-order scoring is required),
+        ///          Specifies whether in-order scoring of documents is required. Note
+        ///          that if set to <c>false</c> (i.e., out-of-order scoring is required),
         ///          this method can return whatever scoring mode it supports, as every
         ///          in-order scorer is also an out-of-order one. However, an
-        ///          out-of-order scorer may not support <seealso cref="Scorer#nextDoc()"/>
-        ///          and/or <seealso cref="Scorer#advance(int)"/>, therefore it is recommended to
+        ///          out-of-order scorer may not support <see cref="DocIdSetIterator.NextDoc()"/>
+        ///          and/or <see cref="DocIdSetIterator.Advance(int)"/>, therefore it is recommended to
         ///          request an in-order scorer if use of these
         ///          methods is required. </param>
         /// <param name="acceptDocs">
-        ///          Bits that represent the allowable docs to match (typically deleted docs
+        ///          <see cref="IBits"/> that represent the allowable docs to match (typically deleted docs
         ///          but possibly filtering other documents)
         /// </param>
-        /// <returns> a <seealso cref="BulkScorer"/> which scores documents and
+        /// <returns> A <see cref="BulkScorer"/> which scores documents and
         /// passes them to a collector. </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public virtual BulkScorer GetBulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, IBits acceptDocs)
         {
             Scorer scorer = GetScorer(context, acceptDocs);
@@ -141,7 +144,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Just wraps a Scorer and performs top scoring using it. </summary>
+        /// Just wraps a <see cref="Scorer"/> and performs top scoring using it. </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -204,15 +207,15 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff this implementation scores docs only out of order. this
-        /// method is used in conjunction with <seealso cref="ICollector"/>'s
-        /// <seealso cref="ICollector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder"/> and
-        /// <seealso cref="#bulkScorer(AtomicReaderContext, boolean, Bits)"/> to
-        /// create a matching <seealso cref="Scorer"/> instance for a given <seealso cref="ICollector"/>, or
+        /// Returns true if this implementation scores docs only out of order. This
+        /// method is used in conjunction with <see cref="ICollector"/>'s
+        /// <see cref="ICollector.AcceptsDocsOutOfOrder"/> and
+        /// <see cref="GetBulkScorer(AtomicReaderContext, bool, IBits)"/> to
+        /// create a matching <see cref="Scorer"/> instance for a given <see cref="ICollector"/>, or
         /// vice versa.
-        /// <p>
-        /// <b>NOTE:</b> the default implementation returns <code>false</code>, i.e.
-        /// the <code>Scorer</code> scores documents in-order.
+        /// <para/>
+        /// <b>NOTE:</b> the default implementation returns <c>false</c>, i.e.
+        /// the <see cref="Scorer"/> scores documents in-order.
         /// </summary>
         public virtual bool ScoresDocsOutOfOrder
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/WildcardQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/WildcardQuery.cs b/src/Lucene.Net/Search/WildcardQuery.cs
index 52f0d12..4a76450 100644
--- a/src/Lucene.Net/Search/WildcardQuery.cs
+++ b/src/Lucene.Net/Search/WildcardQuery.cs
@@ -29,19 +29,19 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// Implements the wildcard search query. Supported wildcards are <code>*</code>, which
-    /// matches any character sequence (including the empty one), and <code>?</code>,
+    /// Implements the wildcard search query. Supported wildcards are <c>*</c>, which
+    /// matches any character sequence (including the empty one), and <c>?</c>,
     /// which matches any single character. '\' is the escape character.
-    /// <p>
+    /// <para/>
     /// Note this query can be slow, as it
     /// needs to iterate over many terms. In order to prevent extremely slow WildcardQueries,
-    /// a Wildcard term should not start with the wildcard <code>*</code>
+    /// a Wildcard term should not start with the wildcard <c>*</c>
     ///
-    /// <p>this query uses the {@link
-    /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+    /// <para/>This query uses the 
+    /// <see cref="MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/>
     /// rewrite method.
     /// </summary>
-    /// <seealso cref= AutomatonQuery </seealso>
+    /// <seealso cref="AutomatonQuery"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -60,7 +60,7 @@ namespace Lucene.Net.Search
         public const char WILDCARD_ESCAPE = '\\';
 
         /// <summary>
-        /// Constructs a query for terms matching <code>term</code>.
+        /// Constructs a query for terms matching <paramref name="term"/>.
         /// </summary>
         public WildcardQuery(Term term)
             : base(term, ToAutomaton(term))
@@ -69,6 +69,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Convert Lucene wildcard syntax into an automaton.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public static Automaton ToAutomaton(Term wildcardquery)


[26/48] lucenenet git commit: Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/PriorityQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/PriorityQueue.cs b/src/Lucene.Net/Util/PriorityQueue.cs
index a2f4e07..36d5be5 100644
--- a/src/Lucene.Net/Util/PriorityQueue.cs
+++ b/src/Lucene.Net/Util/PriorityQueue.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A PriorityQueue maintains a partial ordering of its elements such that the
-    /// element with least priority can always be found in constant time. It is represented as a
-    /// Min-Heap so that Add()'s and Pop()'s require log(size) time.
+    /// A <see cref="PriorityQueue{T}"/> maintains a partial ordering of its elements such that the
+    /// element with least priority can always be found in constant time. Put()'s and Pop()'s
+    /// require log(size) time.
     ///
-    /// <p><b>NOTE</b>: this class will pre-allocate a full array of
-    /// length <code>maxSize+1</code> if instantiated via the
-    /// <seealso cref="#PriorityQueue(int,boolean)"/> constructor with
-    /// <code>prepopulate</code> set to <code>true</code>. That maximum
+    /// <para/><b>NOTE</b>: this class will pre-allocate a full array of
+    /// length <c>maxSize+1</c> if instantiated via the
+    /// <see cref="PriorityQueue(int, bool)"/> constructor with
+    /// <c>prepopulate</c> set to <c>true</c>. That maximum
     /// size can grow as we insert elements over the time.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -102,48 +102,48 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Determines the ordering of objects in this priority queue.  Subclasses
-        ///  must define this one method. </summary>
-        ///  <returns> <code>true</code> iff parameter <tt>a</tt> is less than parameter <tt>b</tt>. </returns>
+        /// must define this one method. </summary>
+        /// <returns> <c>true</c> if parameter <paramref name="a"/> is less than parameter <paramref name="b"/>. </returns>
         protected internal abstract bool LessThan(T a, T b);
 
         /// <summary>
-        /// this method can be overridden by extending classes to return a sentinel
-        /// object which will be used by the <seealso cref="PriorityQueue#PriorityQueue(int,boolean)"/>
+        /// This method can be overridden by extending classes to return a sentinel
+        /// object which will be used by the <see cref="PriorityQueue(int, bool)"/>
         /// constructor to fill the queue, so that the code which uses that queue can always
         /// assume it's full and only change the top without attempting to insert any new
-        /// object.<br>
-        ///
+        /// object.
+        /// <para/>
         /// Those sentinel values should always compare worse than any non-sentinel
-        /// value (i.e., <seealso cref="#lessThan"/> should always favor the
-        /// non-sentinel values).<br>
-        ///
-        /// By default, this method returns false, which means the queue will not be
+        /// value (i.e., <see cref="LessThan(T, T)"/> should always favor the
+        /// non-sentinel values).
+        /// <para/>
+        /// By default, this method returns <c>false</c>, which means the queue will not be
         /// filled with sentinel values. Otherwise, the value returned will be used to
-        /// pre-populate the queue. Adds sentinel values to the queue.<br>
-        ///
+        /// pre-populate the queue. Adds sentinel values to the queue.
+        /// <para/>
         /// If this method is extended to return a non-null value, then the following
         /// usage pattern is recommended:
         ///
-        /// <pre class="prettyprint">
-        /// // extends getSentinelObject() to return a non-null value.
+        /// <code>
+        /// // extends GetSentinelObject() to return a non-null value.
         /// PriorityQueue&lt;MyObject&gt; pq = new MyQueue&lt;MyObject&gt;(numHits);
         /// // save the 'top' element, which is guaranteed to not be null.
-        /// MyObject pqTop = pq.top();
+        /// MyObject pqTop = pq.Top;
         /// &lt;...&gt;
         /// // now in order to add a new element, which is 'better' than top (after
         /// // you've verified it is better), it is as simple as:
-        /// pqTop.change().
-        /// pqTop = pq.updateTop();
-        /// </pre>
-        ///
-        /// <b>NOTE:</b> if this method returns a non-null value, it will be called by
-        /// the <seealso cref="PriorityQueue#PriorityQueue(int,boolean)"/> constructor
-        /// <seealso cref="#size()"/> times, relying on a new object to be returned and will not
-        /// check if it's null again. Therefore you should ensure any call to this
+        /// pqTop.Change().
+        /// pqTop = pq.UpdateTop();
+        /// </code>
+        /// <para/>
+        /// <b>NOTE:</b> if this method returns a non-<c>null</c> value, it will be called by
+        /// the <see cref="PriorityQueue(int, bool)"/> constructor
+        /// <see cref="Count"/> times, relying on a new object to be returned and will not
+        /// check if it's <c>null</c> again. Therefore you should ensure any call to this
         /// method creates a new instance and behaves consistently, e.g., it cannot
-        /// return null if it previously returned non-null.
+        /// return <c>null</c> if it previously returned non-<c>null</c>.
         /// </summary>
-        /// <returns> the sentinel object to use to pre-populate the queue, or null if
+        /// <returns> The sentinel object to use to pre-populate the queue, or <c>null</c> if
         ///         sentinel objects are not supported. </returns>
         protected virtual T GetSentinelObject()
         {
@@ -151,11 +151,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds an Object to a PriorityQueue in log(size) time. If one tries to add
-        /// more objects than maxSize from initialize and it is not possible to resize
-        /// the heap, an <seealso cref="IndexOutOfRangeException"/> is thrown.
+        /// Adds an Object to a <see cref="PriorityQueue{T}"/> in log(size) time. If one tries to add
+        /// more objects than <see cref="maxSize"/> from initialize and it is not possible to resize
+        /// the heap, an <see cref="IndexOutOfRangeException"/> is thrown.
         /// </summary>
-        /// <returns> the new 'top' element in the queue. </returns>
+        /// <returns> The new 'top' element in the queue. </returns>
         public T Add(T element)
         {
             size++;
@@ -165,14 +165,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds an Object to a PriorityQueue in log(size) time.
+        /// Adds an Object to a <see cref="PriorityQueue{T}"/> in log(size) time.
         /// It returns the object (if any) that was
-        /// dropped off the heap because it was full. this can be
+        /// dropped off the heap because it was full. This can be
         /// the given parameter (in case it is smaller than the
         /// full heap's minimum, and couldn't be added), or another
         /// object that was previously the smallest value in the
-        /// heap and now has been replaced by a larger one, or null
-        /// if the queue wasn't yet full with maxSize elements.
+        /// heap and now has been replaced by a larger one, or <c>null</c>
+        /// if the queue wasn't yet full with <see cref="maxSize"/> elements.
         /// </summary>
         public virtual T InsertWithOverflow(T element)
         {
@@ -195,8 +195,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the least element of the PriorityQueue in constant time.
-        /// Returns null if the queue is empty. </summary>
+        /// Returns the least element of the <see cref="PriorityQueue{T}"/> in constant time.
+        /// Returns <c>null</c> if the queue is empty. </summary>
         public T Top
         {
             get
@@ -209,8 +209,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Removes and returns the least element of the PriorityQueue in log(size)
-        ///  time.
+        /// Removes and returns the least element of the <see cref="PriorityQueue{T}"/> in log(size)
+        /// time.
         /// </summary>
         public T Pop()
         {
@@ -233,20 +233,20 @@ namespace Lucene.Net.Util
         /// Should be called when the Object at top changes values. Still log(n) worst
         /// case, but it's at least twice as fast to
         ///
-        /// <pre class="prettyprint">
-        /// pq.top().change();
-        /// pq.updateTop();
-        /// </pre>
+        /// <code>
+        /// pq.Top.Change();
+        /// pq.UpdateTop();
+        /// </code>
         ///
         /// instead of
         ///
-        /// <pre class="prettyprint">
-        /// o = pq.pop();
-        /// o.change();
-        /// pq.push(o);
-        /// </pre>
+        /// <code>
+        /// o = pq.Pop();
+        /// o.Change();
+        /// pq.Push(o);
+        /// </code>
         /// </summary>
-        /// <returns> the new 'top' element. </returns>
+        /// <returns> The new 'top' element. </returns>
         public T UpdateTop()
         {
             DownHeap();
@@ -254,7 +254,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of elements currently stored in the PriorityQueue.
+        /// Returns the number of elements currently stored in the <see cref="PriorityQueue{T}"/>.
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -263,7 +263,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Removes all entries from the PriorityQueue. </summary>
+        /// Removes all entries from the <see cref="PriorityQueue{T}"/>. </summary>
         public void Clear()
         {
             for (int i = 0; i <= size; i++)
@@ -312,7 +312,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method returns the internal heap array as T[].
+        /// This method returns the internal heap array as T[].
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         [WritableArray]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/QueryBuilder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/QueryBuilder.cs b/src/Lucene.Net/Util/QueryBuilder.cs
index 18cd2d0..286b4e2 100644
--- a/src/Lucene.Net/Util/QueryBuilder.cs
+++ b/src/Lucene.Net/Util/QueryBuilder.cs
@@ -36,18 +36,18 @@ namespace Lucene.Net.Util
     using TokenStream = Lucene.Net.Analysis.TokenStream;
 
     /// <summary>
-    /// Creates queries from the <seealso cref="Analyzer"/> chain.
-    /// <p>
+    /// Creates queries from the <see cref="Analyzer"/> chain.
+    /// <para/>
     /// Example usage:
-    /// <pre class="prettyprint">
-    ///   QueryBuilder builder = new QueryBuilder(analyzer);
-    ///   Query a = builder.createBooleanQuery("body", "just a test");
-    ///   Query b = builder.createPhraseQuery("body", "another test");
-    ///   Query c = builder.createMinShouldMatchQuery("body", "another test", 0.5f);
-    /// </pre>
-    /// <p>
-    /// this can also be used as a subclass for query parsers to make it easier
-    /// to interact with the analysis chain. Factory methods such as {@code newTermQuery}
+    /// <code>
+    ///     QueryBuilder builder = new QueryBuilder(analyzer);
+    ///     Query a = builder.CreateBooleanQuery("body", "just a test");
+    ///     Query b = builder.CreatePhraseQuery("body", "another test");
+    ///     Query c = builder.CreateMinShouldMatchQuery("body", "another test", 0.5f);
+    /// </code>
+    /// <para/>
+    /// This can also be used as a subclass for query parsers to make it easier
+    /// to interact with the analysis chain. Factory methods such as <see cref="NewTermQuery(Term)"/>
     /// are provided so that the generated queries can be customized.
     /// </summary>
     public class QueryBuilder
@@ -56,7 +56,7 @@ namespace Lucene.Net.Util
         private bool enablePositionIncrements = true;
 
         /// <summary>
-        /// Creates a new QueryBuilder using the given analyzer. </summary>
+        /// Creates a new <see cref="QueryBuilder"/> using the given analyzer. </summary>
         public QueryBuilder(Analyzer analyzer)
         {
             this.analyzer = analyzer;
@@ -64,12 +64,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a boolean query from the query text.
-        /// <p>
-        /// this is equivalent to {@code createBooleanQuery(field, queryText, Occur.SHOULD)} </summary>
-        /// <param name="field"> field name </param>
-        /// <param name="queryText"> text to be passed to the analyzer </param>
-        /// <returns> {@code TermQuery} or {@code BooleanQuery}, based on the analysis
-        ///         of {@code queryText} </returns>
+        /// <para/>
+        /// This is equivalent to <c>CreateBooleanQuery(field, queryText, Occur.SHOULD)</c> </summary>
+        /// <param name="field"> Field name. </param>
+        /// <param name="queryText"> Text to be passed to the analyzer. </param>
+        /// <returns> <see cref="TermQuery"/> or <see cref="BooleanQuery"/>, based on the analysis
+        ///         of <paramref name="queryText"/>. </returns>
         public virtual Query CreateBooleanQuery(string field, string queryText)
         {
             return CreateBooleanQuery(field, queryText, Occur.SHOULD);
@@ -77,12 +77,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a boolean query from the query text.
-        /// <p> </summary>
-        /// <param name="field"> field name </param>
-        /// <param name="queryText"> text to be passed to the analyzer </param>
-        /// <param name="operator"> operator used for clauses between analyzer tokens. </param>
-        /// <returns> {@code TermQuery} or {@code BooleanQuery}, based on the analysis
-        ///         of {@code queryText} </returns>
+        /// </summary>
+        /// <param name="field"> Field name </param>
+        /// <param name="queryText"> Text to be passed to the analyzer. </param>
+        /// <param name="operator"> Operator used for clauses between analyzer tokens. </param>
+        /// <returns> <see cref="TermQuery"/> or <see cref="BooleanQuery"/>, based on the analysis
+        ///         of <paramref name="queryText"/>. </returns>
         public virtual Query CreateBooleanQuery(string field, string queryText, Occur @operator)
         {
             if (@operator != Occur.SHOULD && @operator != Occur.MUST)
@@ -94,12 +94,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a phrase query from the query text.
-        /// <p>
-        /// this is equivalent to {@code createPhraseQuery(field, queryText, 0)} </summary>
-        /// <param name="field"> field name </param>
-        /// <param name="queryText"> text to be passed to the analyzer </param>
-        /// <returns> {@code TermQuery}, {@code BooleanQuery}, {@code PhraseQuery}, or
-        ///         {@code MultiPhraseQuery}, based on the analysis of {@code queryText} </returns>
+        /// <para/>
+        /// This is equivalent to <c>CreatePhraseQuery(field, queryText, 0)</c> </summary>
+        /// <param name="field"> Field name. </param>
+        /// <param name="queryText"> Text to be passed to the analyzer. </param>
+        /// <returns> <see cref="TermQuery"/>, <see cref="BooleanQuery"/>, <see cref="PhraseQuery"/>, or
+        ///         <see cref="MultiPhraseQuery"/>, based on the analysis of <paramref name="queryText"/>. </returns>
         public virtual Query CreatePhraseQuery(string field, string queryText)
         {
             return CreatePhraseQuery(field, queryText, 0);
@@ -107,12 +107,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a phrase query from the query text.
-        /// <p> </summary>
-        /// <param name="field"> field name </param>
-        /// <param name="queryText"> text to be passed to the analyzer </param>
+        /// </summary>
+        /// <param name="field"> Field name. </param>
+        /// <param name="queryText"> Text to be passed to the analyzer. </param>
         /// <param name="phraseSlop"> number of other words permitted between words in query phrase </param>
-        /// <returns> {@code TermQuery}, {@code BooleanQuery}, {@code PhraseQuery}, or
-        ///         {@code MultiPhraseQuery}, based on the analysis of {@code queryText} </returns>
+        /// <returns> <see cref="TermQuery"/>, <see cref="BooleanQuery"/>, <see cref="PhraseQuery"/>, or
+        ///         <see cref="MultiPhraseQuery"/>, based on the analysis of <paramref name="queryText"/>. </returns>
         public virtual Query CreatePhraseQuery(string field, string queryText, int phraseSlop)
         {
             return CreateFieldQuery(analyzer, Occur.MUST, field, queryText, true, phraseSlop);
@@ -120,12 +120,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a minimum-should-match query from the query text.
-        /// <p> </summary>
-        /// <param name="field"> field name </param>
-        /// <param name="queryText"> text to be passed to the analyzer </param>
+        /// </summary>
+        /// <param name="field"> Field name. </param>
+        /// <param name="queryText"> Text to be passed to the analyzer. </param>
         /// <param name="fraction"> of query terms {@code [0..1]} that should match </param>
-        /// <returns> {@code TermQuery} or {@code BooleanQuery}, based on the analysis
-        ///         of {@code queryText} </returns>
+        /// <returns> <see cref="TermQuery"/> or <see cref="BooleanQuery"/>, based on the analysis
+        ///         of <paramref name="queryText"/>. </returns>
         public virtual Query CreateMinShouldMatchQuery(string field, string queryText, float fraction)
         {
             if (float.IsNaN(fraction) || fraction < 0 || fraction > 1)
@@ -149,8 +149,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the analyzer. </summary>
-        /// <seealso cref= #setAnalyzer(Analyzer) </seealso>
+        /// Gets or Sets the analyzer. </summary>
         public virtual Analyzer Analyzer
         {
             get
@@ -164,8 +163,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true if position increments are enabled. </summary>
-        /// <seealso cref= #setEnablePositionIncrements(boolean) </seealso>
+        /// Gets or Sets whether position increments are enabled.
+        /// <para/>
+        /// When <c>true</c>, result phrase and multi-phrase queries will
+        /// be aware of position increments.
+        /// Useful when e.g. a StopFilter increases the position increment of
+        /// the token that follows an omitted token.
+        /// <para/>
+        /// Default: true.
+        /// </summary>
         public virtual bool EnablePositionIncrements
         {
             get
@@ -180,16 +186,16 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Creates a query from the analysis chain.
-        /// <p>
+        /// <para/>
         /// Expert: this is more useful for subclasses such as queryparsers.
-        /// If using this class directly, just use <seealso cref="#createBooleanQuery(String, String)"/>
-        /// and <seealso cref="#createPhraseQuery(String, String)"/> </summary>
-        /// <param name="analyzer"> analyzer used for this query </param>
-        /// <param name="operator"> default boolean operator used for this query </param>
-        /// <param name="field"> field to create queries against </param>
-        /// <param name="queryText"> text to be passed to the analysis chain </param>
-        /// <param name="quoted"> true if phrases should be generated when terms occur at more than one position </param>
-        /// <param name="phraseSlop"> slop factor for phrase/multiphrase queries </param>
+        /// If using this class directly, just use <see cref="CreateBooleanQuery(string, string)"/>
+        /// and <see cref="CreatePhraseQuery(string, string)"/>. </summary>
+        /// <param name="analyzer"> Analyzer used for this query. </param>
+        /// <param name="operator"> Default boolean operator used for this query. </param>
+        /// <param name="field"> Field to create queries against. </param>
+        /// <param name="queryText"> Text to be passed to the analysis chain. </param>
+        /// <param name="quoted"> <c>true</c> if phrases should be generated when terms occur at more than one position. </param>
+        /// <param name="phraseSlop"> Slop factor for phrase/multiphrase queries. </param>
         protected Query CreateFieldQuery(Analyzer analyzer, Occur @operator, string field, string queryText, bool quoted, int phraseSlop)
         {
             Debug.Assert(@operator == Occur.SHOULD || @operator == Occur.MUST);
@@ -439,42 +445,46 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Builds a new BooleanQuery instance.
-        /// <p>
-        /// this is intended for subclasses that wish to customize the generated queries. </summary>
-        /// <param name="disableCoord"> disable coord </param>
-        /// <returns> new BooleanQuery instance </returns>
+        /// Builds a new <see cref="BooleanQuery"/> instance.
+        /// <para/>
+        /// This is intended for subclasses that wish to customize the generated queries. 
+        /// </summary>
+        /// <param name="disableCoord"> Disable coord. </param>
+        /// <returns> New <see cref="BooleanQuery"/> instance. </returns>
         protected virtual BooleanQuery NewBooleanQuery(bool disableCoord)
         {
             return new BooleanQuery(disableCoord);
         }
 
         /// <summary>
-        /// Builds a new TermQuery instance.
-        /// <p>
-        /// this is intended for subclasses that wish to customize the generated queries. </summary>
-        /// <param name="term"> term </param>
-        /// <returns> new TermQuery instance </returns>
+        /// Builds a new <see cref="TermQuery"/> instance.
+        /// <para/>
+        /// This is intended for subclasses that wish to customize the generated queries. 
+        /// </summary>
+        /// <param name="term"> Term. </param>
+        /// <returns> New <see cref="TermQuery"/> instance. </returns>
         protected virtual Query NewTermQuery(Term term)
         {
             return new TermQuery(term);
         }
 
         /// <summary>
-        /// Builds a new PhraseQuery instance.
-        /// <p>
-        /// this is intended for subclasses that wish to customize the generated queries. </summary>
-        /// <returns> new PhraseQuery instance </returns>
+        /// Builds a new <see cref="PhraseQuery"/> instance.
+        /// <para/>
+        /// This is intended for subclasses that wish to customize the generated queries. 
+        /// </summary>
+        /// <returns> New <see cref="PhraseQuery"/> instance. </returns>
         protected virtual PhraseQuery NewPhraseQuery()
         {
             return new PhraseQuery();
         }
 
         /// <summary>
-        /// Builds a new MultiPhraseQuery instance.
-        /// <p>
-        /// this is intended for subclasses that wish to customize the generated queries. </summary>
-        /// <returns> new MultiPhraseQuery instance </returns>
+        /// Builds a new <see cref="MultiPhraseQuery"/> instance.
+        /// <para/>
+        /// This is intended for subclasses that wish to customize the generated queries. 
+        /// </summary>
+        /// <returns> New <see cref="MultiPhraseQuery"/> instance. </returns>
         protected virtual MultiPhraseQuery NewMultiPhraseQuery()
         {
             return new MultiPhraseQuery();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/RamUsageEstimator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/RamUsageEstimator.cs b/src/Lucene.Net/Util/RamUsageEstimator.cs
index 6e0fca7..5ae74a8 100644
--- a/src/Lucene.Net/Util/RamUsageEstimator.cs
+++ b/src/Lucene.Net/Util/RamUsageEstimator.cs
@@ -27,13 +27,13 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Estimates the size (memory representation) of Java objects.
+    /// Estimates the size (memory representation) of .NET objects.
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
-    /// <seealso cref= #sizeOf(Object) </seealso>
-    /// <seealso cref= #shallowSizeOf(Object) </seealso>
-    /// <seealso cref= #shallowSizeOfInstance(Class)
-    ///
-    /// @lucene.internal </seealso>
+    /// <seealso cref="SizeOf(object)"/>
+    /// <seealso cref="ShallowSizeOf(object)"/>
+    /// <seealso cref="ShallowSizeOfInstance(Type)"/>
     public sealed class RamUsageEstimator
     {
         ///// <summary>
@@ -84,7 +84,7 @@ namespace Lucene.Net.Util
         public const int NUM_BYTES_DOUBLE = 8;
 
         /// <summary>
-        /// Number of bytes this jvm uses to represent an object reference.
+        /// Number of bytes this .NET runtime uses to represent an object reference.
         /// </summary>
         public static readonly int NUM_BYTES_OBJECT_REF;
 
@@ -99,7 +99,7 @@ namespace Lucene.Net.Util
         public static readonly int NUM_BYTES_ARRAY_HEADER;
 
         /// <summary>
-        /// A constant specifying the object alignment boundary inside the JVM. Objects will
+        /// A constant specifying the object alignment boundary inside the .NET runtime. Objects will
         /// always take a full multiple of this constant, possibly wasting some space.
         /// </summary>
         public static readonly int NUM_BYTES_OBJECT_ALIGNMENT;
@@ -234,14 +234,14 @@ namespace Lucene.Net.Util
             //JVM_INFO_STRING = "[JVM: " + Constants.JVM_NAME + ", " + Constants.JVM_VERSION + ", " + Constants.JVM_VENDOR + ", " + Constants.JAVA_VENDOR + ", " + Constants.JAVA_VERSION + "]";
         }
 
-        /// <summary>
-        /// A handle to <code>sun.misc.Unsafe</code>.
-        /// </summary>
+        ///// <summary>
+        ///// A handle to <code>sun.misc.Unsafe</code>.
+        ///// </summary>
         //private static readonly object TheUnsafe;
 
-        /// <summary>
-        /// A handle to <code>sun.misc.Unsafe#fieldOffset(Field)</code>.
-        /// </summary>
+        ///// <summary>
+        ///// A handle to <code>sun.misc.Unsafe#fieldOffset(Field)</code>.
+        ///// </summary>
         //private static readonly Method ObjectFieldOffsetMethod;
 
         /// <summary>
@@ -276,7 +276,7 @@ namespace Lucene.Net.Util
         //}
 
         /// <summary>
-        /// Aligns an object size to be the next multiple of <seealso cref="#NUM_BYTES_OBJECT_ALIGNMENT"/>.
+        /// Aligns an object size to be the next multiple of <see cref="NUM_BYTES_OBJECT_ALIGNMENT"/>.
         /// </summary>
         public static long AlignObjectSize(long size)
         {
@@ -285,7 +285,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the size in bytes of the byte[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:byte[]"/> object. </summary>
         // LUCENENET specific overload for CLS compliance
         public static long SizeOf(byte[] arr)
         {
@@ -293,7 +293,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the size in bytes of the sbyte[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:sbyte[]"/> object. </summary>
         [CLSCompliant(false)]
         public static long SizeOf(sbyte[] arr)
         {
@@ -301,62 +301,64 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the size in bytes of the boolean[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:bool[]"/> object. </summary>
         public static long SizeOf(bool[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the char[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:char[]"/> object. </summary>
         public static long SizeOf(char[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_CHAR * arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the short[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:short[]"/> object. </summary>
         public static long SizeOf(short[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_INT16 * arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the int[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:int[]"/> object. </summary>
         public static long SizeOf(int[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_INT32 * arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the float[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:float[]"/> object. </summary>
         public static long SizeOf(float[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_SINGLE * arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the long[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:long[]"/> object. </summary>
         public static long SizeOf(long[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_INT64 * arr.Length);
         }
 
         /// <summary>
-        /// Returns the size in bytes of the double[] object. </summary>
+        /// Returns the size in bytes of the <see cref="T:double[]"/> object. </summary>
         public static long SizeOf(double[] arr)
         {
             return AlignObjectSize((long)NUM_BYTES_ARRAY_HEADER + (long)NUM_BYTES_DOUBLE * arr.Length);
         }
 
+        // LUCENENET TODO: API - Add SizeOf() overloads for ulong, ushort, uint
+
         /// <summary>
         /// Estimates the RAM usage by the given object. It will
         /// walk the object tree and sum up all referenced objects.
         ///
-        /// <p><b>Resource Usage:</b> this method internally uses a set of
+        /// <para><b>Resource Usage:</b> this method internally uses a set of
         /// every object seen during traversals so it does allocate memory
         /// (it isn't side-effect free). After the method exits, this memory
-        /// should be GCed.</p>
+        /// should be GCed.</para>
         /// </summary>
         public static long SizeOf(object obj)
         {
@@ -367,8 +369,8 @@ namespace Lucene.Net.Util
         /// Estimates a "shallow" memory usage of the given object. For arrays, this will be the
         /// memory taken by array storage (no subreferences will be followed). For objects, this
         /// will be the memory taken by the fields.
-        ///
-        /// JVM object alignments are also applied.
+        /// <para/>
+        /// .NET object alignments are also applied.
         /// </summary>
         public static long ShallowSizeOf(object obj)
         {
@@ -389,11 +391,11 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the shallow instance size in bytes an instance of the given class would occupy.
-        /// this works with all conventional classes and primitive types, but not with arrays
+        /// This works with all conventional classes and primitive types, but not with arrays
         /// (the size then depends on the number of elements and varies from object to object).
         /// </summary>
-        /// <seealso cref= #shallowSizeOf(Object) </seealso>
-        /// <exception cref="IllegalArgumentException"> if {@code clazz} is an array class.  </exception>
+        /// <seealso cref="ShallowSizeOf(object)"/>
+        /// <exception cref="ArgumentException"> if <paramref name="clazz"/> is an array class. </exception>
         public static long ShallowSizeOfInstance(Type clazz)
         {
             if (clazz.IsArray)
@@ -428,7 +430,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Return shallow size of any <code>array</code>.
+        /// Return shallow size of any <paramref name="array"/>.
         /// </summary>
         private static long ShallowSizeOfArray(Array array)
         {
@@ -597,11 +599,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method returns the maximum representation size of an object. <code>sizeSoFar</code>
-        /// is the object's size measured so far. <code>f</code> is the field being probed.
+        /// This method returns the maximum representation size of an object. <paramref name="sizeSoFar"/>
+        /// is the object's size measured so far. <paramref name="f"/> is the field being probed.
         ///
-        /// <p>The returned offset will be the maximum of whatever was measured so far and
-        /// <code>f</code> field's offset and representation size (unaligned).
+        /// <para/>The returned offset will be the maximum of whatever was measured so far and
+        /// <paramref name="f"/> field's offset and representation size (unaligned).
         /// </summary>
         private static long AdjustForField(long sizeSoFar, FieldInfo f)
         {
@@ -629,7 +631,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns <code>size</code> in human-readable units (GB, MB, KB or bytes).
+        /// Returns <c>size</c> in human-readable units (GB, MB, KB or bytes).
         /// </summary>
         public static string HumanReadableUnits(long bytes)
         {
@@ -637,7 +639,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns <code>size</code> in human-readable units (GB, MB, KB or bytes).
+        /// Returns <c>size</c> in human-readable units (GB, MB, KB or bytes).
         /// </summary>
         public static string HumanReadableUnits(long bytes, IFormatProvider df)
         {
@@ -661,8 +663,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Return a human-readable size of a given object. </summary>
-        /// <seealso cref= #sizeOf(Object) </seealso>
-        /// <seealso cref= #humanReadableUnits(long) </seealso>
+        /// <seealso cref="SizeOf(object)"/>
+        /// <seealso cref="HumanReadableUnits(long)"/>
         public static string HumanSizeOf(object @object)
         {
             return HumanReadableUnits(SizeOf(@object));
@@ -670,10 +672,10 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// An identity hash set implemented using open addressing. No null keys are allowed.
-        ///
+        /// <para/>
         /// TODO: If this is useful outside this class, make it public - needs some work
         /// </summary>
-        public sealed class IdentityHashSet<KType> : IEnumerable<KType>
+        public sealed class IdentityHashSet<KType> : IEnumerable<KType> // LUCENENET TODO: API - This was internal in Lucene
         {
             /// <summary>
             /// Default load factor.
@@ -714,8 +716,8 @@ namespace Lucene.Net.Util
             private int resizeThreshold;
 
             /// <summary>
-            /// Creates a hash set with the default capacity of 16.
-            /// load factor of {@value #DEFAULT_LOAD_FACTOR}. `
+            /// Creates a hash set with the default capacity of 16,
+            /// load factor of <see cref="DEFAULT_LOAD_FACTOR"/>. 
             /// </summary>
             public IdentityHashSet()
                 : this(16, DEFAULT_LOAD_FACTOR)
@@ -724,7 +726,7 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Creates a hash set with the given capacity, load factor of
-            /// {@value #DEFAULT_LOAD_FACTOR}.
+            /// <see cref="DEFAULT_LOAD_FACTOR"/>.
             /// </summary>
             public IdentityHashSet(int initialCapacity)
                 : this(initialCapacity, DEFAULT_LOAD_FACTOR)
@@ -794,11 +796,12 @@ namespace Lucene.Net.Util
             /// <summary>
             /// Rehash via MurmurHash.
             ///
-            /// <p>The implementation is based on the
+            /// <para/>The implementation is based on the
             /// finalization step from Austin Appleby's
-            /// <code>MurmurHash3</code>.
+            /// <c>MurmurHash3</c>.
+            /// 
+            /// See <a target="_blank" href="http://sites.google.com/site/murmurhash/">http://sites.google.com/site/murmurhash/</a>.
             /// </summary>
-            /// <seealso cref= "http://sites.google.com/site/murmurhash/" </seealso>
             private static int Rehash(object o)
             {
                 int k = RuntimeHelpers.GetHashCode(o);
@@ -842,7 +845,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Allocate internal buffers for a given capacity.
+            /// Allocate internal buffers for a given <paramref name="capacity"/>.
             /// </summary>
             /// <param name="capacity">
             ///          New capacity (must be a power of two). </param>
@@ -898,6 +901,8 @@ namespace Lucene.Net.Util
                 get { return Assigned; }
             }
 
+            // LUCENENET TODO: API - bring back this IsEmpty property (doesn't work the same as !Any())
+
             //public bool Empty // LUCENENET NOTE: in .NET we can just use !Any() on IEnumerable<T>
             //{
             //    get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/RecyclingByteBlockAllocator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/RecyclingByteBlockAllocator.cs b/src/Lucene.Net/Util/RecyclingByteBlockAllocator.cs
index f92aa3f..d423f29 100644
--- a/src/Lucene.Net/Util/RecyclingByteBlockAllocator.cs
+++ b/src/Lucene.Net/Util/RecyclingByteBlockAllocator.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A <seealso cref="ByteBlockPool.Allocator"/> implementation that recycles unused byte
+    /// A <see cref="ByteBlockPool.Allocator"/> implementation that recycles unused byte
     /// blocks in a buffer and reuses them in subsequent calls to
-    /// <seealso cref="#getByteBlock()"/>.
-    /// <p>
-    /// Note: this class is not thread-safe
-    /// </p>
+    /// <see cref="GetByteBlock()"/>.
+    /// <para>
+    /// Note: this class is not thread-safe.
+    /// </para>
     /// @lucene.internal
     /// </summary>
     public sealed class RecyclingByteBlockAllocator : ByteBlockPool.Allocator
@@ -38,14 +38,14 @@ namespace Lucene.Net.Util
         public const int DEFAULT_BUFFERED_BLOCKS = 64;
 
         /// <summary>
-        /// Creates a new <seealso cref="RecyclingByteBlockAllocator"/>
+        /// Creates a new <see cref="RecyclingByteBlockAllocator"/>
         /// </summary>
         /// <param name="blockSize">
-        ///          the block size in bytes </param>
+        ///          The block size in bytes. </param>
         /// <param name="maxBufferedBlocks">
-        ///          maximum number of buffered byte block </param>
+        ///          Maximum number of buffered byte block. </param>
         /// <param name="bytesUsed">
-        ///          <seealso cref="Counter"/> reference counting internally allocated bytes </param>
+        ///          <see cref="Counter"/> reference counting internally allocated bytes. </param>
         public RecyclingByteBlockAllocator(int blockSize, int maxBufferedBlocks, Counter bytesUsed)
             : base(blockSize)
         {
@@ -55,22 +55,21 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="RecyclingByteBlockAllocator"/>.
+        /// Creates a new <see cref="RecyclingByteBlockAllocator"/>.
         /// </summary>
         /// <param name="blockSize">
-        ///          the block size in bytes </param>
+        ///          The block size in bytes. </param>
         /// <param name="maxBufferedBlocks">
-        ///          maximum number of buffered byte block </param>
+        ///          Maximum number of buffered byte block. </param>
         public RecyclingByteBlockAllocator(int blockSize, int maxBufferedBlocks)
             : this(blockSize, maxBufferedBlocks, Counter.NewCounter(false))
         {
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="RecyclingByteBlockAllocator"/> with a block size of
-        /// <seealso cref="ByteBlockPool#BYTE_BLOCK_SIZE"/>, upper buffered docs limit of
-        /// <seealso cref="#DEFAULT_BUFFERED_BLOCKS"/> ({@value #DEFAULT_BUFFERED_BLOCKS}).
-        ///
+        /// Creates a new <see cref="RecyclingByteBlockAllocator"/> with a block size of
+        /// <see cref="ByteBlockPool.BYTE_BLOCK_SIZE"/>, upper buffered docs limit of
+        /// <see cref="DEFAULT_BUFFERED_BLOCKS"/> (64).
         /// </summary>
         public RecyclingByteBlockAllocator()
             : this(ByteBlockPool.BYTE_BLOCK_SIZE, 64, Counter.NewCounter(false))
@@ -113,19 +112,19 @@ namespace Lucene.Net.Util
             Debug.Assert(bytesUsed.Get() >= 0);
         }
 
-        /// <returns> the number of currently buffered blocks </returns>
+        /// <returns> The number of currently buffered blocks. </returns>
         public int NumBufferedBlocks
         {
             get { return freeBlocks; }
         }
 
-        /// <returns> the number of bytes currently allocated by this <seealso cref="Allocator"/> </returns>
+        /// <returns> The number of bytes currently allocated by this <see cref="ByteBlockPool.Allocator"/>. </returns>
         public long BytesUsed
         {
             get { return bytesUsed.Get(); }
         }
 
-        /// <returns> the maximum number of buffered byte blocks </returns>
+        /// <returns> The maximum number of buffered byte blocks. </returns>
         public int MaxBufferedBlocks
         {
             get { return maxBufferedBlocks; }
@@ -135,8 +134,8 @@ namespace Lucene.Net.Util
         /// Removes the given number of byte blocks from the buffer if possible.
         /// </summary>
         /// <param name="num">
-        ///          the number of byte blocks to remove </param>
-        /// <returns> the number of actually removed buffers </returns>
+        ///          The number of byte blocks to remove. </param>
+        /// <returns> The number of actually removed buffers. </returns>
         public int FreeBlocks(int num)
         {
             Debug.Assert(num >= 0, "free blocks must be >= 0 but was: " + num);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/RecyclingIntBlockAllocator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/RecyclingIntBlockAllocator.cs b/src/Lucene.Net/Util/RecyclingIntBlockAllocator.cs
index 073ea2d..9547e42 100644
--- a/src/Lucene.Net/Util/RecyclingIntBlockAllocator.cs
+++ b/src/Lucene.Net/Util/RecyclingIntBlockAllocator.cs
@@ -23,11 +23,11 @@ namespace Lucene.Net.Util
     using Allocator = Lucene.Net.Util.Int32BlockPool.Allocator;
 
     /// <summary>
-    /// A <seealso cref="Allocator"/> implementation that recycles unused int
+    /// A <see cref="Allocator"/> implementation that recycles unused <see cref="int"/>
     /// blocks in a buffer and reuses them in subsequent calls to
-    /// <seealso cref="#getIntBlock()"/>.
+    /// <see cref="GetInt32Block()"/>.
     /// <para>
-    /// Note: this class is not thread-safe
+    /// Note: this class is not thread-safe.
     /// </para>
     /// <para>
     /// NOTE: This was RecyclingIntBlockAllocator in Lucene
@@ -43,14 +43,14 @@ namespace Lucene.Net.Util
         public const int DEFAULT_BUFFERED_BLOCKS = 64;
 
         /// <summary>
-        /// Creates a new <seealso cref="RecyclingInt32BlockAllocator"/>
+        /// Creates a new <see cref="RecyclingInt32BlockAllocator"/>.
         /// </summary>
         /// <param name="blockSize">
-        ///          the block size in bytes </param>
+        ///          The block size in bytes. </param>
         /// <param name="maxBufferedBlocks">
-        ///          maximum number of buffered int block </param>
+        ///          Maximum number of buffered int block. </param>
         /// <param name="bytesUsed">
-        ///          <seealso cref="Counter"/> reference counting internally allocated bytes </param>
+        ///          <see cref="Counter"/> reference counting internally allocated bytes. </param>
         public RecyclingInt32BlockAllocator(int blockSize, int maxBufferedBlocks, Counter bytesUsed)
             : base(blockSize)
         {
@@ -60,12 +60,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="RecyclingInt32BlockAllocator"/>.
+        /// Creates a new <see cref="RecyclingInt32BlockAllocator"/>.
         /// </summary>
         /// <param name="blockSize">
-        ///          the size of each block returned by this allocator </param>
+        ///          The size of each block returned by this allocator. </param>
         /// <param name="maxBufferedBlocks">
-        ///          maximum number of buffered int blocks </param>
+        ///          Maximum number of buffered int blocks. </param>
         public RecyclingInt32BlockAllocator(int blockSize, int maxBufferedBlocks)
             : this(blockSize, maxBufferedBlocks, Counter.NewCounter(false))
         {
@@ -75,7 +75,6 @@ namespace Lucene.Net.Util
         /// Creates a new <see cref="RecyclingInt32BlockAllocator"/> with a block size of
         /// <see cref="Int32BlockPool.INT32_BLOCK_SIZE"/>, upper buffered docs limit of
         /// <see cref="DEFAULT_BUFFERED_BLOCKS"/>.
-        ///
         /// </summary>
         public RecyclingInt32BlockAllocator()
             : this(Int32BlockPool.INT32_BLOCK_SIZE, 64, Counter.NewCounter(false))
@@ -124,19 +123,19 @@ namespace Lucene.Net.Util
             Debug.Assert(bytesUsed.Get() >= 0);
         }
 
-        /// <returns> the number of currently buffered blocks </returns>
+        /// <returns> The number of currently buffered blocks. </returns>
         public int NumBufferedBlocks
         {
             get { return freeBlocks; }
         }
 
-        /// <returns> the number of bytes currently allocated by this <seealso cref="Allocator"/> </returns>
+        /// <returns> The number of bytes currently allocated by this <see cref="Allocator"/>. </returns>
         public long BytesUsed 
         {
             get { return bytesUsed.Get(); }
         }
 
-        /// <returns> the maximum number of buffered byte blocks </returns>
+        /// <returns> The maximum number of buffered byte blocks. </returns>
         public int MaxBufferedBlocks
         {
             get { return maxBufferedBlocks; }
@@ -146,8 +145,8 @@ namespace Lucene.Net.Util
         /// Removes the given number of int blocks from the buffer if possible.
         /// </summary>
         /// <param name="num">
-        ///          the number of int blocks to remove </param>
-        /// <returns> the number of actually removed buffers </returns>
+        ///          The number of int blocks to remove. </param>
+        /// <returns> The number of actually removed buffers. </returns>
         public int FreeBlocks(int num)
         {
             Debug.Assert(num >= 0, "free blocks must be >= 0 but was: " + num);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/RollingBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/RollingBuffer.cs b/src/Lucene.Net/Util/RollingBuffer.cs
index defd7c7..e28c30a 100644
--- a/src/Lucene.Net/Util/RollingBuffer.cs
+++ b/src/Lucene.Net/Util/RollingBuffer.cs
@@ -20,8 +20,15 @@ namespace Lucene.Net.Util
      * limitations under the License.
      */
 
+    /// <summary>
+    /// LUCENENET specific class to allow referencing static members of
+    /// <see cref="RollingBuffer{T}"/> without referencing its generic closing type.
+    /// </summary>
     public static class RollingBuffer
     {
+        /// <summary>
+        /// Implement to reset an instance
+        /// </summary>
         public interface IResettable
         {
             void Reset();
@@ -29,10 +36,10 @@ namespace Lucene.Net.Util
     }
 
     /// <summary>
-    /// Acts like forever growing T[], but internally uses a
-    ///  circular buffer to reuse instances of T.
-    ///
-    ///  @lucene.internal
+    /// Acts like forever growing <see cref="T:T[]"/>, but internally uses a
+    /// circular buffer to reuse instances of <typeparam name="T"/>.
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
     public abstract class RollingBuffer<T>
         where T : RollingBuffer.IResettable
@@ -101,9 +108,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Get T instance for this absolute position;
-        ///  this is allowed to be arbitrarily far "in the
-        ///  future" but cannot be before the last freeBefore.
+        /// Get <typeparamref name="T"/> instance for this absolute position;
+        /// This is allowed to be arbitrarily far "in the
+        /// future" but cannot be before the last <see cref="FreeBefore(int)"/>.
         /// </summary>
         public virtual T Get(int pos)
         {
@@ -138,7 +145,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the maximum position looked up, or -1 if no
-        ///  position has been looked up sinc reset/init.
+        /// position has been looked up since <see cref="Reset()"/>/init.
         /// </summary>
         public virtual int MaxPos
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/SPIClassIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/SPIClassIterator.cs b/src/Lucene.Net/Util/SPIClassIterator.cs
index a2a3f02..8a75309 100644
--- a/src/Lucene.Net/Util/SPIClassIterator.cs
+++ b/src/Lucene.Net/Util/SPIClassIterator.cs
@@ -27,13 +27,12 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Helper class for loading SPI classes from classpath (META-INF files).
-    /// this is a light impl of <seealso cref="java.util.ServiceLoader"/> but is guaranteed to
+    /// This is a light impl of <c>java.util.ServiceLoader</c> but is guaranteed to
     /// be bug-free regarding classpath order and does not instantiate or initialize
     /// the classes found.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
-    ///
     public class SPIClassIterator<S> : IEnumerable<Type>
     {
         private static HashSet<Type> types;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/SentinelIntSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/SentinelIntSet.cs b/src/Lucene.Net/Util/SentinelIntSet.cs
index 079c212..d908c9c 100644
--- a/src/Lucene.Net/Util/SentinelIntSet.cs
+++ b/src/Lucene.Net/Util/SentinelIntSet.cs
@@ -23,23 +23,25 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A native int hash-based set where one value is reserved to mean "EMPTY" internally. The space overhead is fairly low
-    /// as there is only one power-of-two sized int[] to hold the values.  The set is re-hashed when adding a value that
-    /// would make it >= 75% full.  Consider extending and over-riding <seealso cref="#hash(int)"/> if the values might be poor
+    /// A native <see cref="int"/> hash-based set where one value is reserved to mean "EMPTY" internally. The space overhead is fairly low
+    /// as there is only one power-of-two sized <see cref="T:int[]"/> to hold the values.  The set is re-hashed when adding a value that
+    /// would make it >= 75% full.  Consider extending and over-riding <see cref="Hash(int)"/> if the values might be poor
     /// hash keys; Lucene docids should be fine.
     /// The internal fields are exposed publicly to enable more efficient use at the expense of better O-O principles.
     /// <para/>
     /// To iterate over the integers held in this set, simply use code like this:
     /// <code>
     /// SentinelIntSet set = ...
-    /// for (int v : set.keys) {
-    ///   if (v == set.emptyVal)
-    ///     continue;
-    ///   //use v...
-    /// }</code>
+    /// foreach (int v in set.keys) 
+    /// {
+    ///     if (v == set.EmptyVal)
+    ///         continue;
+    ///     //use v...
+    /// }
+    /// </code>
     /// <para/>
     /// NOTE: This was SentinelIntSet in Lucene
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class SentinelInt32Set
@@ -61,13 +63,13 @@ namespace Lucene.Net.Util
         public int EmptyVal { get; private set; }
 
         /// <summary>
-        /// the count at which a rehash should be done </summary>
+        /// The count at which a rehash should be done. </summary>
         public int RehashCount { get; set; }
 
         ///
         /// <param name="size">  The minimum number of elements this set should be able to hold without rehashing
-        ///              (i.e. the slots are guaranteed not to change) </param>
-        /// <param name="emptyVal"> The integer value to use for EMPTY </param>
+        ///              (i.e. the slots are guaranteed not to change). </param>
+        /// <param name="emptyVal"> The integer value to use for EMPTY. </param>
         public SentinelInt32Set(int size, int emptyVal)
         {
             this.EmptyVal = emptyVal;
@@ -109,7 +111,7 @@ namespace Lucene.Net.Util
         //}
 
         /// <summary>
-        /// (internal) Returns the slot for this key </summary>
+        /// (internal) Returns the slot for this key. </summary>
         public virtual int GetSlot(int key)
         {
             Debug.Assert(key != EmptyVal);
@@ -129,7 +131,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// (internal) Returns the slot for this key, or -slot-1 if not found </summary>
+        /// (internal) Returns the slot for this key, or -slot-1 if not found. </summary>
         public virtual int Find(int key)
         {
             Debug.Assert(key != EmptyVal);
@@ -191,7 +193,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// (internal) Rehashes by doubling {@code int[] key} and filling with the old values. </summary>
+        /// (internal) Rehashes by doubling key (<see cref="T:int[]"/>) and filling with the old values. </summary>
         public virtual void Rehash()
         {
             int newSize = keys.Length << 1;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/SetOnce.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/SetOnce.cs b/src/Lucene.Net/Util/SetOnce.cs
index 63c1b1d..cc74d54 100644
--- a/src/Lucene.Net/Util/SetOnce.cs
+++ b/src/Lucene.Net/Util/SetOnce.cs
@@ -96,6 +96,9 @@ namespace Lucene.Net.Util
 #endif
     public sealed class AlreadySetException : InvalidOperationException
     {
+        /// <summary>
+        /// Initializes a new instance of <see cref="AlreadySetException"/>.
+        /// </summary>
         public AlreadySetException()
             : base("The object cannot be set twice!")
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/SloppyMath.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/SloppyMath.cs b/src/Lucene.Net/Util/SloppyMath.cs
index 295e842..e7a3d09 100644
--- a/src/Lucene.Net/Util/SloppyMath.cs
+++ b/src/Lucene.Net/Util/SloppyMath.cs
@@ -60,16 +60,17 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the trigonometric cosine of an angle.
-        /// <p>
+        /// <para/>
         /// Error is around 1E-15.
-        /// <p>
+        /// <para/>
         /// Special cases:
-        /// <ul>
-        ///  <li>If the argument is {@code NaN} or an infinity, then the result is {@code NaN}.
-        /// </ul> </summary>
-        /// <param name="a"> an angle, in radians. </param>
-        /// <returns> the cosine of the argument. </returns>
-        /// <seealso cref= Math#cos(double) </seealso>
+        /// <list type="bullet">
+        ///     <item><description>If the argument is <see cref="double.NaN"/> or an infinity, then the result is <see cref="double.NaN"/>.</description></item>
+        /// </list> 
+        /// </summary>
+        /// <param name="a"> An angle, in radians. </param>
+        /// <returns> The cosine of the argument. </returns>
+        /// <seealso cref="Math.Cos(double)"/>
         public static double Cos(double a)
         {
             if (a < 0.0)
@@ -93,17 +94,18 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the arc sine of a value.
-        /// <p>
+        /// <para/>
         /// The returned angle is in the range <i>-pi</i>/2 through <i>pi</i>/2.
         /// Error is around 1E-7.
-        /// <p>
+        /// <para/>
         /// Special cases:
-        /// <ul>
-        ///  <li>If the argument is {@code NaN} or its absolute value is greater than 1, then the result is {@code NaN}.
-        /// </ul> </summary>
+        /// <list type="bullet">
+        ///     <item><description>If the argument is <see cref="double.NaN"/> or its absolute value is greater than 1, then the result is <see cref="double.NaN"/>.</description></item>
+        /// </list> 
+        /// </summary>
         /// <param name="a"> the value whose arc sine is to be returned. </param>
         /// <returns> arc sine of the argument </returns>
-        /// <seealso cref= Math#asin(double) </seealso>
+        /// <seealso cref="Math.Asin(double)"/>
         // because asin(-x) = -asin(x), asin(x) only needs to be computed on [0,1].
         // ---> we only have to compute asin(x) on [0,1].
         // For values not close to +-1, we use look-up tables;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/SmallFloat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/SmallFloat.cs b/src/Lucene.Net/Util/SmallFloat.cs
index 7eb3b07..cbad7ab 100644
--- a/src/Lucene.Net/Util/SmallFloat.cs
+++ b/src/Lucene.Net/Util/SmallFloat.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Util
     /// Floating point numbers smaller than 32 bits.
     /// <para/>
     /// NOTE: This was SmallFloat in Lucene
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class SmallSingle
@@ -36,15 +36,15 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Converts a 32 bit <see cref="float"/> to an 8 bit <see cref="float"/>.
-        /// <br>Values less than zero are all mapped to zero.
-        /// <br>Values are truncated (rounded down) to the nearest 8 bit value.
-        /// <br>Values between zero and the smallest representable value
+        /// <para/>Values less than zero are all mapped to zero.
+        /// <para/>Values are truncated (rounded down) to the nearest 8 bit value.
+        /// <para/>Values between zero and the smallest representable value
         /// are rounded up.
         /// </summary>
-        /// <param name="f"> the 32 bit <see cref="float"/> to be converted to an 8 bit <see cref="float"/> (<see cref="byte"/>)  </param>
-        /// <param name="numMantissaBits"> the number of mantissa bits to use in the byte, with the remainder to be used in the exponent </param>
-        /// <param name="zeroExp"> the zero-point in the range of exponent values </param>
-        /// <returns> the 8 bit float representation </returns>
+        /// <param name="f"> The 32 bit <see cref="float"/> to be converted to an 8 bit <see cref="float"/> (<see cref="byte"/>).  </param>
+        /// <param name="numMantissaBits"> The number of mantissa bits to use in the byte, with the remainder to be used in the exponent. </param>
+        /// <param name="zeroExp"> The zero-point in the range of exponent values. </param>
+        /// <returns> The 8 bit float representation. </returns>
         // LUCENENET specific overload for CLS compliance
         public static byte SingleToByte(float f, int numMantissaBits, int zeroExp)
         {
@@ -60,10 +60,10 @@ namespace Lucene.Net.Util
         /// <para/>
         /// NOTE: This was floatToByte() in Lucene
         /// </summary>
-        /// <param name="f"> the 32 bit <see cref="float"/> to be converted to an 8 bit <see cref="float"/> (<see cref="sbyte"/>) </param>
-        /// <param name="numMantissaBits"> the number of mantissa bits to use in the byte, with the remainder to be used in the exponent </param>
-        /// <param name="zeroExp"> the zero-point in the range of exponent values </param>
-        /// <returns> the 8 bit float representation </returns>
+        /// <param name="f"> The 32 bit <see cref="float"/> to be converted to an 8 bit <see cref="float"/> (<see cref="sbyte"/>). </param>
+        /// <param name="numMantissaBits"> The number of mantissa bits to use in the byte, with the remainder to be used in the exponent. </param>
+        /// <param name="zeroExp"> The zero-point in the range of exponent values. </param>
+        /// <returns> The 8 bit float representation. </returns>
         [CLSCompliant(false)]
         public static sbyte SingleToSByte(float f, int numMantissaBits, int zeroExp)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/Sorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Sorter.cs b/src/Lucene.Net/Util/Sorter.cs
index 1819541..79d2321 100644
--- a/src/Lucene.Net/Util/Sorter.cs
+++ b/src/Lucene.Net/Util/Sorter.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Base class for sorting algorithms implementations.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class Sorter
@@ -34,19 +35,19 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Compare entries found in slots <code>i</code> and <code>j</code>.
-        ///  The contract for the returned value is the same as
-        ///  <seealso cref="Comparer#compare(Object, Object)"/>.
+        /// Compare entries found in slots <paramref name="i"/> and <paramref name="j"/>.
+        /// The contract for the returned value is the same as
+        /// <see cref="System.Collections.Generic.IComparer{T}.Compare(T, T)"/>.
         /// </summary>
         protected abstract int Compare(int i, int j);
 
         /// <summary>
-        /// Swap values at slots <code>i</code> and <code>j</code>. </summary>
+        /// Swap values at slots <paramref name="i"/> and <paramref name="j"/>. </summary>
         protected abstract void Swap(int i, int j);
 
         /// <summary>
-        /// Sort the slice which starts at <code>from</code> (inclusive) and ends at
-        ///  <code>to</code> (exclusive).
+        /// Sort the slice which starts at <paramref name="from"/> (inclusive) and ends at
+        /// <paramref name="to"/> (exclusive).
         /// </summary>
         public abstract void Sort(int from, int to);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/StringHelper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/StringHelper.cs b/src/Lucene.Net/Util/StringHelper.cs
index b0fd150..1e1ebc4 100644
--- a/src/Lucene.Net/Util/StringHelper.cs
+++ b/src/Lucene.Net/Util/StringHelper.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Methods for manipulating strings.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class StringHelper
@@ -42,11 +42,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Compares two <seealso cref="BytesRef"/>, element by element, and returns the
+        /// Compares two <see cref="BytesRef"/>, element by element, and returns the
         /// number of elements common to both arrays.
         /// </summary>
-        /// <param name="left"> The first <seealso cref="BytesRef"/> to compare </param>
-        /// <param name="right"> The second <seealso cref="BytesRef"/> to compare </param>
+        /// <param name="left"> The first <see cref="BytesRef"/> to compare. </param>
+        /// <param name="right"> The second <see cref="BytesRef"/> to compare. </param>
         /// <returns> The number of common elements. </returns>
         public static int BytesDifference(BytesRef left, BytesRef right)
         {
@@ -69,8 +69,10 @@ namespace Lucene.Net.Util
         {
         }
 
-        /// <returns> a Comparer over versioned strings such as X.YY.Z
-        /// @lucene.internal </returns>
+        /// <summary> Returns a <see cref="T:IComparer{string}"/> over versioned strings such as X.YY.Z
+        /// <para/>
+        /// @lucene.internal
+        /// </summary>
         public static IComparer<string> VersionComparer
         {
             get
@@ -139,30 +141,30 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns <code>true</code> iff the ref starts with the given prefix.
-        /// Otherwise <code>false</code>.
+        /// Returns <c>true</c> if the <paramref name="ref"/> starts with the given <paramref name="prefix"/>.
+        /// Otherwise <c>false</c>.
         /// </summary>
         /// <param name="ref">
-        ///          the <seealso cref="BytesRef"/> to test </param>
+        ///          The <see cref="BytesRef"/> to test. </param>
         /// <param name="prefix">
-        ///          the expected prefix </param>
-        /// <returns> Returns <code>true</code> iff the ref starts with the given prefix.
-        ///         Otherwise <code>false</code>. </returns>
+        ///          The expected prefix </param>
+        /// <returns> Returns <c>true</c> if the <paramref name="ref"/> starts with the given <paramref name="prefix"/>.
+        ///         Otherwise <c>false</c>. </returns>
         public static bool StartsWith(BytesRef @ref, BytesRef prefix)
         {
             return SliceEquals(@ref, prefix, 0);
         }
 
         /// <summary>
-        /// Returns <code>true</code> iff the ref ends with the given suffix. Otherwise
-        /// <code>false</code>.
+        /// Returns <c>true</c> if the <paramref name="ref"/> ends with the given <paramref name="suffix"/>. Otherwise
+        /// <c>false</c>.
         /// </summary>
         /// <param name="ref">
-        ///          the <seealso cref="BytesRef"/> to test </param>
+        ///          The <see cref="BytesRef"/> to test. </param>
         /// <param name="suffix">
-        ///          the expected suffix </param>
-        /// <returns> Returns <code>true</code> iff the ref ends with the given suffix.
-        ///         Otherwise <code>false</code>. </returns>
+        ///          The expected suffix </param>
+        /// <returns> Returns <c>true</c> if the <paramref name="ref"/> ends with the given <paramref name="suffix"/>.
+        ///         Otherwise <c>false</c>. </returns>
         public static bool EndsWith(BytesRef @ref, BytesRef suffix)
         {
             return SliceEquals(@ref, suffix, @ref.Length - suffix.Length);
@@ -190,7 +192,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Pass this as the seed to <seealso cref="#murmurhash3_x86_32"/>. </summary>
+        /// Pass this as the seed to <see cref="Murmurhash3_x86_32(byte[], int, int, int)"/>. </summary>
 
         // Poached from Guava: set a different salt/seed
         // for each JVM instance, to frustrate hash key collision
@@ -238,7 +240,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the MurmurHash3_x86_32 hash.
-        /// Original source/tests at https://github.com/yonik/java_util/
+        /// Original source/tests at <a href="https://github.com/yonik/java_util/">https://github.com/yonik/java_util/</a>. 
         /// </summary>
         public static int Murmurhash3_x86_32(byte[] data, int offset, int len, int seed)
         {
@@ -296,6 +298,10 @@ namespace Lucene.Net.Util
             return h1;
         }
 
+        /// <summary>
+        /// Returns the MurmurHash3_x86_32 hash.
+        /// Original source/tests at <a href="https://github.com/yonik/java_util/">https://github.com/yonik/java_util/</a>. 
+        /// </summary>
         public static int Murmurhash3_x86_32(BytesRef bytes, int seed)
         {
             return Murmurhash3_x86_32(bytes.Bytes, bytes.Offset, bytes.Length, seed);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/TimSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/TimSorter.cs b/src/Lucene.Net/Util/TimSorter.cs
index fd4eb55..680edc1 100644
--- a/src/Lucene.Net/Util/TimSorter.cs
+++ b/src/Lucene.Net/Util/TimSorter.cs
@@ -21,20 +21,22 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// <seealso cref="Sorter"/> implementation based on the
+    /// <see cref="Sorter"/> implementation based on the
     /// <a href="http://svn.python.org/projects/python/trunk/Objects/listsort.txt">TimSort</a>
     /// algorithm.
-    /// <p>this implementation is especially good at sorting partially-sorted
+    /// <para/>This implementation is especially good at sorting partially-sorted
     /// arrays and sorts small arrays with binary sort.
-    /// <p><b>NOTE</b>:There are a few differences with the original implementation:<ul>
-    /// <li><a name="maxTempSlots"/>The extra amount of memory to perform merges is
-    /// configurable. this allows small merges to be very fast while large merges
-    /// will be performed in-place (slightly slower). You can make sure that the
-    /// fast merge routine will always be used by having <code>maxTempSlots</code>
-    /// equal to half of the length of the slice of data to sort.
-    /// <li>Only the fast merge routine can gallop (the one that doesn't run
-    /// in-place) and it only gallops on the longest slice.
-    /// </ul>
+    /// <para/><b>NOTE</b>:There are a few differences with the original implementation:
+    /// <list type="bullet">
+    ///     <item><description><a name="maxTempSlots"/>The extra amount of memory to perform merges is
+    ///         configurable. This allows small merges to be very fast while large merges
+    ///         will be performed in-place (slightly slower). You can make sure that the
+    ///         fast merge routine will always be used by having <c>maxTempSlots</c>
+    ///         equal to half of the length of the slice of data to sort.</description></item>
+    ///     <item><description>Only the fast merge routine can gallop (the one that doesn't run
+    ///         in-place) and it only gallops on the longest slice.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class TimSorter : Sorter
@@ -51,8 +53,8 @@ namespace Lucene.Net.Util
         internal int[] runEnds;
 
         /// <summary>
-        /// Create a new <seealso cref="TimSorter"/>. </summary>
-        /// <param name="maxTempSlots"> the <a href="#maxTempSlots">maximum amount of extra memory to run merges</a> </param>
+        /// Create a new <see cref="TimSorter"/>. </summary>
+        /// <param name="maxTempSlots"> The <a href="#maxTempSlots">maximum amount of extra memory to run merges</a> </param>
         protected TimSorter(int maxTempSlots)
             : base()
         {
@@ -61,7 +63,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Minimum run length for an array of length <code>length</code>. </summary>
+        /// Minimum run length for an array of length <paramref name="length"/>. </summary>
         internal static int MinRun(int length)
         {
             Debug.Assert(length >= MINRUN);
@@ -106,7 +108,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Compute the length of the next run, make the run sorted and return its
-        ///  length.
+        /// length.
         /// </summary>
         internal virtual int NextRun()
         {
@@ -227,6 +229,10 @@ namespace Lucene.Net.Util
             }
         }
 
+        /// <summary>
+        /// Sort the slice which starts at <paramref name="from"/> (inclusive) and ends at
+        /// <paramref name="to"/> (exclusive).
+        /// </summary>
         public override void Sort(int from, int to)
         {
             CheckRange(from, to);
@@ -448,23 +454,23 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copy data from slot <code>src</code> to slot <code>dest</code>. </summary>
+        /// Copy data from slot <paramref name="src"/> to slot <paramref name="dest"/>>. </summary>
         protected abstract void Copy(int src, int dest);
 
         /// <summary>
-        /// Save all elements between slots <code>i</code> and <code>i+len</code>
-        ///  into the temporary storage.
+        /// Save all elements between slots <paramref name="i"/> and <paramref name="i"/>+<paramref name="len"/>
+        /// into the temporary storage.
         /// </summary>
         protected abstract void Save(int i, int len);
 
         /// <summary>
-        /// Restore element <code>j</code> from the temporary storage into slot <code>i</code>. </summary>
+        /// Restore element <paramref name="j"/> from the temporary storage into slot <paramref name="i"/>. </summary>
         protected abstract void Restore(int i, int j);
 
         /// <summary>
-        /// Compare element <code>i</code> from the temporary storage with element
-        ///  <code>j</code> from the slice to sort, similarly to
-        ///  <seealso cref="#compare(int, int)"/>.
+        /// Compare element <paramref name="i"/> from the temporary storage with element
+        /// <paramref name="j"/> from the slice to sort, similarly to
+        /// <see cref="Sorter.Compare(int, int)"/>.
         /// </summary>
         protected abstract int CompareSaved(int i, int j);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/ToStringUtils.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ToStringUtils.cs b/src/Lucene.Net/Util/ToStringUtils.cs
index af1e127..cab76c6 100644
--- a/src/Lucene.Net/Util/ToStringUtils.cs
+++ b/src/Lucene.Net/Util/ToStringUtils.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Helper methods to ease implementing <seealso cref="Object#toString()"/>.
+    /// Helper methods to ease implementing <see cref="object.ToString()"/>.
     /// </summary>
     public sealed class ToStringUtils
     {
@@ -30,7 +30,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// for printing boost only if not 1.0
+        /// For printing boost only if not 1.0.
         /// </summary>
         public static string Boost(float boost)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/UnicodeUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/UnicodeUtil.cs b/src/Lucene.Net/Util/UnicodeUtil.cs
index 0e39d13..bfa7aa5 100644
--- a/src/Lucene.Net/Util/UnicodeUtil.cs
+++ b/src/Lucene.Net/Util/UnicodeUtil.cs
@@ -90,21 +90,19 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Class to encode java's UTF16 char[] into UTF8 byte[]
-    /// without always allocating a new byte[] as
-    /// String.getBytes(StandardCharsets.UTF_8) does.
-    ///
+    /// Class to encode .NET's UTF16 <see cref="T:char[]"/> into UTF8 <see cref="T:byte[]"/>
+    /// without always allocating a new <see cref="T:byte[]"/> as
+    /// <see cref="Encoding.GetBytes(string)"/> of <see cref="Encoding.UTF8"/> does.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
-
     public static class UnicodeUtil
     {
         /// <summary>
         /// A binary term consisting of a number of 0xff bytes, likely to be bigger than other terms
-        ///  (e.g. collation keys) one would normally encounter, and definitely bigger than any UTF-8 terms.
-        ///  <p>
-        ///  WARNING: this is not a valid UTF8 Term
-        ///
+        /// (e.g. collation keys) one would normally encounter, and definitely bigger than any UTF-8 terms.
+        /// <para/>
+        /// WARNING: this is not a valid UTF8 Term
         /// </summary>
         public static readonly BytesRef BIG_TERM = new BytesRef(new byte[] { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }); // TODO this is unrelated here find a better place for it
 
@@ -113,7 +111,6 @@ namespace Lucene.Net.Util
         public const int UNI_SUR_LOW_START = 0xDC00;
         public const int UNI_SUR_LOW_END = 0xDFFF;
         public const int UNI_REPLACEMENT_CHAR = 0xFFFD;
-        //private const int MIN_SUPPLEMENTARY_CODE_POINT = 0x10000;
 
         private const long UNI_MAX_BMP = 0x0000FFFF;
 
@@ -123,8 +120,8 @@ namespace Lucene.Net.Util
         private const int SURROGATE_OFFSET = Character.MIN_SUPPLEMENTARY_CODE_POINT - (UNI_SUR_HIGH_START << (int)HALF_SHIFT) - UNI_SUR_LOW_START;
 
         /// <summary>
-        /// Encode characters from a char[] source, starting at
-        ///  offset for length chars. After encoding, result.offset will always be 0.
+        /// Encode characters from a <see cref="T:char[]"/> <paramref name="source"/>, starting at
+        /// <paramref name="offset"/> for <paramref name="length"/> chars. After encoding, <c>result.Offset</c> will always be 0.
         /// </summary>
         // TODO: broken if incoming result.offset != 0
         public static void UTF16toUTF8(char[] source, int offset, int length, BytesRef result)
@@ -191,8 +188,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Encode characters from this <see cref="ICharSequence"/>, starting at offset
-        ///  for length characters. After encoding, result.offset will always be 0.
+        /// Encode characters from this <see cref="ICharSequence"/>, starting at <paramref name="offset"/>
+        /// for <paramref name="length"/> characters. After encoding, <c>result.Offset</c> will always be 0.
         /// </summary>
         // TODO: broken if incoming result.offset != 0
         public static void UTF16toUTF8(ICharSequence s, int offset, int length, BytesRef result)
@@ -258,8 +255,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Encode characters from this <see cref="string"/>, starting at offset
-        /// for length characters. After encoding, result.offset will always be 0.
+        /// Encode characters from this <see cref="string"/>, starting at <paramref name="offset"/>
+        /// for <paramref name="length"/> characters. After encoding, <c>result.Offset</c> will always be 0.
+        /// <para/>
         /// LUCENENET specific.
         /// </summary>
         // TODO: broken if incoming result.offset != 0
@@ -566,12 +564,12 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Returns the number of code points in this UTF8 sequence.
         ///
-        /// <p>this method assumes valid UTF8 input. this method
-        /// <strong>does not perform</strong> full UTF8 validation, it will check only the
+        /// <para/>This method assumes valid UTF8 input. This method
+        /// <b>does not perform</b> full UTF8 validation, it will check only the
         /// first byte of each codepoint (for multi-byte sequences any bytes after
         /// the head are skipped).
         /// </summary>
-        /// <exception cref="IllegalArgumentException"> If invalid codepoint header byte occurs or the
+        /// <exception cref="ArgumentException"> If invalid codepoint header byte occurs or the
         ///    content is prematurely truncated. </exception>
         public static int CodePointCount(BytesRef utf8)
         {
@@ -603,12 +601,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// <p>this method assumes valid UTF8 input. this method
-        /// <strong>does not perform</strong> full UTF8 validation, it will check only the
+        /// This method assumes valid UTF8 input. This method
+        /// <b>does not perform</b> full UTF8 validation, it will check only the
         /// first byte of each codepoint (for multi-byte sequences any bytes after
         /// the head are skipped).
         /// </summary>
-        /// <exception cref="IllegalArgumentException"> If invalid codepoint header byte occurs or the
+        /// <exception cref="ArgumentException"> If invalid codepoint header byte occurs or the
         ///    content is prematurely truncated. </exception>
         public static void UTF8toUTF32(BytesRef utf8, Int32sRef utf32)
         {
@@ -674,30 +672,30 @@ namespace Lucene.Net.Util
         private const int TRAIL_SURROGATE_MASK_ = 0x3FF;
 
         /// <summary>
-        /// Trail surrogate minimum value </summary>
+        /// Trail surrogate minimum value. </summary>
         private const int TRAIL_SURROGATE_MIN_VALUE = 0xDC00;
 
         /// <summary>
-        /// Lead surrogate minimum value </summary>
+        /// Lead surrogate minimum value. </summary>
         private const int LEAD_SURROGATE_MIN_VALUE = 0xD800;
 
         /// <summary>
-        /// The minimum value for Supplementary code points </summary>
+        /// The minimum value for Supplementary code points. </summary>
         private const int SUPPLEMENTARY_MIN_VALUE = 0x10000;
 
         /// <summary>
-        /// Value that all lead surrogate starts with </summary>
+        /// Value that all lead surrogate starts with. </summary>
         private static readonly int LEAD_SURROGATE_OFFSET_ = LEAD_SURROGATE_MIN_VALUE - (SUPPLEMENTARY_MIN_VALUE >> LEAD_SURROGATE_SHIFT_);
 
         /// <summary>
-        /// Cover JDK 1.5 API. Create a String from an array of codePoints.
+        /// Cover JDK 1.5 API. Create a String from an array of <paramref name="codePoints"/>.
         /// </summary>
-        /// <param name="codePoints"> The code array </param>
-        /// <param name="offset"> The start of the text in the code point array </param>
-        /// <param name="count"> The number of code points </param>
-        /// <returns> a String representing the code points between offset and count </returns>
-        /// <exception cref="IllegalArgumentException"> If an invalid code point is encountered </exception>
-        /// <exception cref="IndexOutOfBoundsException"> If the offset or count are out of bounds. </exception>
+        /// <param name="codePoints"> The code array. </param>
+        /// <param name="offset"> The start of the text in the code point array. </param>
+        /// <param name="count"> The number of code points. </param>
+        /// <returns> a String representing the code points between offset and count. </returns>
+        /// <exception cref="ArgumentException"> If an invalid code point is encountered. </exception>
+        /// <exception cref="IndexOutOfRangeException"> If the offset or count are out of bounds. </exception>
         public static string NewString(int[] codePoints, int offset, int count)
         {
             char[] chars = ToCharArray(codePoints, offset, count);
@@ -706,13 +704,13 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Generates char array that represents the provided input code points.
-        /// 
+        /// <para/>
         /// LUCENENET specific.
         /// </summary>
-        /// <param name="codePoints"> The code array </param>
-        /// <param name="offset"> The start of the text in the code point array </param>
-        /// <param name="count"> The number of code points </param>
-        /// <returns> a char array representing the code points between offset and count </returns>
+        /// <param name="codePoints"> The code array. </param>
+        /// <param name="offset"> The start of the text in the code point array. </param>
+        /// <param name="count"> The number of code points. </param>
+        /// <returns> a char array representing the code points between offset and count. </returns>
         // LUCENENET NOTE: This code was originally in the NewString() method (above).
         // It has been refactored from the original to remove the exception throw/catch and
         // instead proactively resizes the array instead of relying on excpetions + copy operations
@@ -811,11 +809,11 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Interprets the given byte array as UTF-8 and converts to UTF-16. The <seealso cref="CharsRef"/> will be extended if
+        /// Interprets the given byte array as UTF-8 and converts to UTF-16. The <see cref="CharsRef"/> will be extended if
         /// it doesn't provide enough space to hold the worst case of each byte becoming a UTF-16 codepoint.
-        /// <p>
+        /// <para/>
         /// NOTE: Full characters are read, even if this reads past the length passed (and
-        /// can result in an ArrayOutOfBoundsException if invalid UTF-8 is passed).
+        /// can result in an <see cref="IndexOutOfRangeException"/> if invalid UTF-8 is passed).
         /// Explicit checks for valid UTF-8 are not performed.
         /// </summary>
         // TODO: broken if chars.offset != 0
@@ -862,8 +860,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Utility method for <seealso cref="#UTF8toUTF16(byte[], int, int, CharsRef)"/> </summary>
-        /// <seealso cref= #UTF8toUTF16(byte[], int, int, CharsRef) </seealso>
+        /// Utility method for <see cref="UTF8toUTF16(byte[], int, int, CharsRef)"/> </summary>
+        /// <seealso cref="UTF8toUTF16(byte[], int, int, CharsRef)"/>
         public static void UTF8toUTF16(BytesRef bytesRef, CharsRef chars)
         {
             UTF8toUTF16(bytesRef.Bytes, bytesRef.Offset, bytesRef.Length, chars);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/Version.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Version.cs b/src/Lucene.Net/Util/Version.cs
index 6a39cda..dc5b96c 100644
--- a/src/Lucene.Net/Util/Version.cs
+++ b/src/Lucene.Net/Util/Version.cs
@@ -25,10 +25,10 @@ namespace Lucene.Net.Util
     /// Use by certain classes to match version compatibility
     /// across releases of Lucene.
     ///
-    /// <p><b>WARNING</b>: When changing the version parameter
+    /// <para><b>WARNING</b>: When changing the version parameter
     /// that you supply to components in Lucene, do not simply
     /// change the version at search-time, but instead also adjust
-    /// your indexing code to match, and re-index.</p>
+    /// your indexing code to match, and re-index.</para>
     /// </summary>
     public enum LuceneVersion
     {
@@ -124,16 +124,16 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Match settings and bugs in Lucene's 4.8 release.
-        ///  <p>
-        ///  Use this to get the latest &amp; greatest settings, bug
-        ///  fixes, etc, for Lucene.
+        /// <para/>
+        /// Use this to get the latest &amp; greatest settings, bug
+        /// fixes, etc, for Lucene.
         /// </summary>
         LUCENE_48,
 
         /* Add new constants for later versions **here** to respect order! */
 
         /// <summary>
-        /// <p><b>WARNING</b>: if you use this setting, and then
+        /// <para/><b>WARNING</b>: if you use this setting, and then
         /// upgrade to a newer release of Lucene, sizable changes
         /// may happen.  If backwards compatibility is important
         /// then you should instead explicitly specify an actual


[10/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCache.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCache.cs b/src/Lucene.Net/Search/FieldCache.cs
index f922244..0aa0729 100644
--- a/src/Lucene.Net/Search/FieldCache.cs
+++ b/src/Lucene.Net/Search/FieldCache.cs
@@ -37,104 +37,102 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Expert: Maintains caches of term values.
     ///
-    /// <p>Created: May 19, 2004 11:13:14 AM
-    ///
+    /// <para/>Created: May 19, 2004 11:13:14 AM
+    /// <para/>
+    /// @lucene.internal
+    /// <para/>
     /// @since   lucene 1.4 </summary>
-    /// <seealso cref=Lucene.Net.Util.FieldCacheSanityChecker</seealso>
+    /// <seealso cref="Lucene.Net.Util.FieldCacheSanityChecker"/>
     public interface IFieldCache
     {
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is found,
-        ///  reads the terms in <code>field</code> and returns a bit set at the size of
-        ///  <code>reader.maxDoc()</code>, with turned on bits for each docid that
-        ///  does have a value for this field.
+        /// reads the terms in <paramref name="field"/> and returns a bit set at the size of
+        /// <c>reader.MaxDoc</c>, with turned on bits for each docid that
+        /// does have a value for this field.
         /// </summary>
         IBits GetDocsWithField(AtomicReader reader, string field);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is
-        /// found, reads the terms in <code>field</code> as a single byte and returns an array
-        /// of size <code>reader.maxDoc()</code> of the value each document
+        /// found, reads the terms in <paramref name="field"/> as a single <see cref="byte"/> and returns an array
+        /// of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field. </summary>
         /// <param name="reader">  Used to get field values. </param>
-        /// <param name="field">   Which field contains the single byte values. </param>
-        /// <param name="setDocsWithField">  If true then <seealso cref="#getDocsWithField"/> will
-        ///        also be computed and stored in the FieldCache. </param>
+        /// <param name="field">   Which field contains the single <see cref="byte"/> values. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
-        /// @deprecated (4.4) Index as a numeric field using <seealso cref="IntField"/> and then use <seealso cref="#getInts(AtomicReader, String, boolean)"/> instead.
-        [Obsolete("(4.4) Index as a numeric field using IntField and then use GetInt32s(AtomicReader, string, bool) instead.")]
+        [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         FieldCache.Bytes GetBytes(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is found,
-        /// reads the terms in <code>field</code> as bytes and returns an array of
-        /// size <code>reader.maxDoc()</code> of the value each document has in the
+        /// reads the terms in <paramref name="field"/> as bytes and returns an array of
+        /// size <c>reader.MaxDoc</c> of the value each document has in the
         /// given field. </summary>
         /// <param name="reader">  Used to get field values. </param>
-        /// <param name="field">   Which field contains the bytes. </param>
-        /// <param name="parser">  Computes byte for string values. </param>
-        /// <param name="setDocsWithField">  If true then <seealso cref="#getDocsWithField"/> will
-        ///        also be computed and stored in the FieldCache. </param>
+        /// <param name="field">   Which field contains the <see cref="byte"/>s. </param>
+        /// <param name="parser">  Computes <see cref="byte"/> for string values. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
-        /// @deprecated (4.4) Index as a numeric field using <seealso cref="IntField"/> and then use <seealso cref="#getInts(AtomicReader, String, boolean)"/> instead.
-        [Obsolete("(4.4) Index as a numeric field using IntField and then use GetInt32s(AtomicReader, string, bool) instead.")]
+        [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         FieldCache.Bytes GetBytes(AtomicReader reader, string field, FieldCache.IByteParser parser, bool setDocsWithField);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is
-        /// found, reads the terms in <code>field</code> as shorts and returns an array
-        /// of size <code>reader.maxDoc()</code> of the value each document
+        /// found, reads the terms in <paramref name="field"/> as <see cref="short"/>s and returns an array
+        /// of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field. 
         /// <para/>
         /// NOTE: this was getShorts() in Lucene
         /// </summary>
         /// <param name="reader">  Used to get field values. </param>
-        /// <param name="field">   Which field contains the shorts. </param>
-        /// <param name="setDocsWithField">  If true then <seealso cref="#getDocsWithField"/> will
-        ///        also be computed and stored in the FieldCache. </param>
+        /// <param name="field">   Which field contains the <see cref="short"/>s. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
-        /// @deprecated (4.4) Index as a numeric field using <seealso cref="IntField"/> and then use <seealso cref="#getInts(AtomicReader, String, boolean)"/> instead.
         [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         FieldCache.Int16s GetInt16s(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is found,
-        /// reads the terms in <code>field</code> as shorts and returns an array of
-        /// size <code>reader.maxDoc()</code> of the value each document has in the
+        /// reads the terms in <paramref name="field"/> as shorts and returns an array of
+        /// size <c>reader.MaxDoc</c> of the value each document has in the
         /// given field. 
         /// <para/>
         /// NOTE: this was getShorts() in Lucene
         /// </summary>
         /// <param name="reader">  Used to get field values. </param>
-        /// <param name="field">   Which field contains the shorts. </param>
-        /// <param name="parser">  Computes short for string values. </param>
-        /// <param name="setDocsWithField">  If true then <seealso cref="#getDocsWithField"/> will
-        ///        also be computed and stored in the FieldCache. </param>
+        /// <param name="field">   Which field contains the <see cref="short"/>s. </param>
+        /// <param name="parser">  Computes <see cref="short"/> for string values. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
-        /// @deprecated (4.4) Index as a numeric field using <seealso cref="IntField"/> and then use <seealso cref="#getInts(AtomicReader, String, boolean)"/> instead.
         [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         FieldCache.Int16s GetInt16s(AtomicReader reader, string field, FieldCache.IInt16Parser parser, bool setDocsWithField);
 
         /// <summary>
-        /// Returns an <seealso cref="FieldCache.Int32s"/> over the values found in documents in the given
+        /// Returns an <see cref="FieldCache.Int32s"/> over the values found in documents in the given
         /// field.
         /// <para/>
         /// NOTE: this was getInts() in Lucene
         /// </summary>
-        /// <seealso cref= #getInts(AtomicReader, String, IntParser, boolean) </seealso>
+        /// <seealso cref="GetInt32s(AtomicReader, string, FieldCache.IInt32Parser, bool)"/>
         FieldCache.Int32s GetInt32s(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
-        /// Returns an <seealso cref="FieldCache.Int32s"/> over the values found in documents in the given
-        /// field. If the field was indexed as <seealso cref="NumericDocValuesField"/>, it simply
-        /// uses <seealso cref="AtomicReader#getNumericDocValues(String)"/> to read the values.
+        /// Returns an <see cref="FieldCache.Int32s"/> over the values found in documents in the given
+        /// field. If the field was indexed as <see cref="Documents.NumericDocValuesField"/>, it simply
+        /// uses <see cref="AtomicReader.GetNumericDocValues(string)"/> to read the values.
         /// Otherwise, it checks the internal cache for an appropriate entry, and if
-        /// none is found, reads the terms in <code>field</code> as ints and returns
-        /// an array of size <code>reader.maxDoc()</code> of the value each document
+        /// none is found, reads the terms in <paramref name="field"/> as <see cref="int"/>s and returns
+        /// an array of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field.
         /// <para/>
         /// NOTE: this was getInts() in Lucene
@@ -142,35 +140,35 @@ namespace Lucene.Net.Search
         /// <param name="reader">
         ///          Used to get field values. </param>
         /// <param name="field">
-        ///          Which field contains the longs. </param>
+        ///          Which field contains the <see cref="int"/>s. </param>
         /// <param name="parser">
-        ///          Computes int for string values. May be {@code null} if the
-        ///          requested field was indexed as <seealso cref="NumericDocValuesField"/> or
-        ///          <seealso cref="IntField"/>. </param>
+        ///          Computes <see cref="int"/> for string values. May be <c>null</c> if the
+        ///          requested field was indexed as <see cref="Documents.NumericDocValuesField"/> or
+        ///          <see cref="Documents.Int32Field"/>. </param>
         /// <param name="setDocsWithField">
-        ///          If true then <seealso cref="#getDocsWithField"/> will also be computed and
-        ///          stored in the FieldCache. </param>
+        ///          If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will also be computed and
+        ///          stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">
         ///           If any error occurs. </exception>
         FieldCache.Int32s GetInt32s(AtomicReader reader, string field, FieldCache.IInt32Parser parser, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Floats"/> over the values found in documents in the given
+        /// Returns a <see cref="FieldCache.Singles"/> over the values found in documents in the given
         /// field.
         /// <para/>
         /// NOTE: this was getFloats() in Lucene
         /// </summary>
-        /// <seealso cref= #getFloats(AtomicReader, String, FloatParser, boolean) </seealso>
+        /// <seealso cref="GetSingles(AtomicReader, string, FieldCache.ISingleParser, bool)"/>
         FieldCache.Singles GetSingles(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Floats"/> over the values found in documents in the given
-        /// field. If the field was indexed as <seealso cref="NumericDocValuesField"/>, it simply
-        /// uses <seealso cref="AtomicReader#getNumericDocValues(String)"/> to read the values.
+        /// Returns a <see cref="FieldCache.Singles"/> over the values found in documents in the given
+        /// field. If the field was indexed as <see cref="Documents.NumericDocValuesField"/>, it simply
+        /// uses <see cref="AtomicReader.GetNumericDocValues(string)"/> to read the values.
         /// Otherwise, it checks the internal cache for an appropriate entry, and if
-        /// none is found, reads the terms in <code>field</code> as floats and returns
-        /// an array of size <code>reader.maxDoc()</code> of the value each document
+        /// none is found, reads the terms in <paramref name="field"/> as <see cref="float"/>s and returns
+        /// an array of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field.
         /// <para/>
         /// NOTE: this was getFloats() in Lucene
@@ -178,35 +176,35 @@ namespace Lucene.Net.Search
         /// <param name="reader">
         ///          Used to get field values. </param>
         /// <param name="field">
-        ///          Which field contains the floats. </param>
+        ///          Which field contains the <see cref="float"/>s. </param>
         /// <param name="parser">
-        ///          Computes float for string values. May be {@code null} if the
-        ///          requested field was indexed as <seealso cref="NumericDocValuesField"/> or
-        ///          <seealso cref="FloatField"/>. </param>
+        ///          Computes <see cref="float"/> for string values. May be <c>null</c> if the
+        ///          requested field was indexed as <see cref="Documents.NumericDocValuesField"/> or
+        ///          <see cref="Documents.SingleField"/>. </param>
         /// <param name="setDocsWithField">
-        ///          If true then <seealso cref="#getDocsWithField"/> will also be computed and
-        ///          stored in the FieldCache. </param>
+        ///          If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will also be computed and
+        ///          stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">
         ///           If any error occurs. </exception>
         FieldCache.Singles GetSingles(AtomicReader reader, string field, FieldCache.ISingleParser parser, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Longs"/> over the values found in documents in the given
+        /// Returns a <see cref="FieldCache.Int64s"/> over the values found in documents in the given
         /// field.
         /// <para/>
         /// NOTE: this was getLongs() in Lucene
         /// </summary>
-        /// <seealso cref= #getLongs(AtomicReader, String, LongParser, boolean) </seealso>
+        /// <seealso cref="GetInt64s(AtomicReader, string, FieldCache.IInt64Parser, bool)"/>
         FieldCache.Int64s GetInt64s(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Longs"/> over the values found in documents in the given
-        /// field. If the field was indexed as <seealso cref="NumericDocValuesField"/>, it simply
-        /// uses <seealso cref="AtomicReader#getNumericDocValues(String)"/> to read the values.
+        /// Returns a <see cref="FieldCache.Int64s"/> over the values found in documents in the given
+        /// field. If the field was indexed as <see cref="Documents.NumericDocValuesField"/>, it simply
+        /// uses <see cref="AtomicReader.GetNumericDocValues(string)"/> to read the values.
         /// Otherwise, it checks the internal cache for an appropriate entry, and if
-        /// none is found, reads the terms in <code>field</code> as longs and returns
-        /// an array of size <code>reader.maxDoc()</code> of the value each document
+        /// none is found, reads the terms in <paramref name="field"/> as <see cref="long"/>s and returns
+        /// an array of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field.
         /// <para/>
         /// NOTE: this was getLongs() in Lucene
@@ -214,46 +212,46 @@ namespace Lucene.Net.Search
         /// <param name="reader">
         ///          Used to get field values. </param>
         /// <param name="field">
-        ///          Which field contains the longs. </param>
+        ///          Which field contains the <see cref="long"/>s. </param>
         /// <param name="parser">
-        ///          Computes long for string values. May be {@code null} if the
-        ///          requested field was indexed as <seealso cref="NumericDocValuesField"/> or
-        ///          <seealso cref="LongField"/>. </param>
+        ///          Computes <see cref="long"/> for string values. May be <c>null</c> if the
+        ///          requested field was indexed as <see cref="Documents.NumericDocValuesField"/> or
+        ///          <see cref="Documents.Int64Field"/>. </param>
         /// <param name="setDocsWithField">
-        ///          If true then <seealso cref="#getDocsWithField"/> will also be computed and
-        ///          stored in the FieldCache. </param>
+        ///          If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will also be computed and
+        ///          stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">
         ///           If any error occurs. </exception>
         FieldCache.Int64s GetInt64s(AtomicReader reader, string field, FieldCache.IInt64Parser parser, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Doubles"/> over the values found in documents in the given
+        /// Returns a <see cref="FieldCache.Doubles"/> over the values found in documents in the given
         /// field.
         /// </summary>
-        /// <seealso cref= #getDoubles(AtomicReader, String, DoubleParser, boolean) </seealso>
+        /// <seealso cref="GetDoubles(AtomicReader, string, FieldCache.IDoubleParser, bool)"/>
         FieldCache.Doubles GetDoubles(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
-        /// Returns a <seealso cref="Doubles"/> over the values found in documents in the given
-        /// field. If the field was indexed as <seealso cref="NumericDocValuesField"/>, it simply
-        /// uses <seealso cref="AtomicReader#getNumericDocValues(String)"/> to read the values.
+        /// Returns a <see cref="FieldCache.Doubles"/> over the values found in documents in the given
+        /// field. If the field was indexed as <see cref="Documents.NumericDocValuesField"/>, it simply
+        /// uses <see cref="AtomicReader.GetNumericDocValues(string)"/> to read the values.
         /// Otherwise, it checks the internal cache for an appropriate entry, and if
-        /// none is found, reads the terms in <code>field</code> as doubles and returns
-        /// an array of size <code>reader.maxDoc()</code> of the value each document
+        /// none is found, reads the terms in <paramref name="field"/> as <see cref="double"/>s and returns
+        /// an array of size <c>reader.MaxDoc</c> of the value each document
         /// has in the given field.
         /// </summary>
         /// <param name="reader">
         ///          Used to get field values. </param>
         /// <param name="field">
-        ///          Which field contains the longs. </param>
+        ///          Which field contains the <see cref="double"/>s. </param>
         /// <param name="parser">
-        ///          Computes double for string values. May be {@code null} if the
-        ///          requested field was indexed as <seealso cref="NumericDocValuesField"/> or
-        ///          <seealso cref="DoubleField"/>. </param>
+        ///          Computes <see cref="double"/> for string values. May be <c>null</c> if the
+        ///          requested field was indexed as <see cref="Documents.NumericDocValuesField"/> or
+        ///          <see cref="Documents.DoubleField"/>. </param>
         /// <param name="setDocsWithField">
-        ///          If true then <seealso cref="#getDocsWithField"/> will also be computed and
-        ///          stored in the FieldCache. </param>
+        ///          If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will also be computed and
+        ///          stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">
         ///           If any error occurs. </exception>
@@ -261,32 +259,32 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none
-        /// is found, reads the term values in <code>field</code>
-        /// and returns a <seealso cref="BinaryDocValues"/> instance, providing a
-        /// method to retrieve the term (as a BytesRef) per document. </summary>
+        /// is found, reads the term values in <paramref name="field"/>
+        /// and returns a <see cref="BinaryDocValues"/> instance, providing a
+        /// method to retrieve the term (as a <see cref="BytesRef"/>) per document. </summary>
         /// <param name="reader">  Used to get field values. </param>
         /// <param name="field">   Which field contains the strings. </param>
-        /// <param name="setDocsWithField">  If true then <seealso cref="#getDocsWithField"/> will
-        ///        also be computed and stored in the FieldCache. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
         /// <returns> The values in the given field for each document. </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
         BinaryDocValues GetTerms(AtomicReader reader, string field, bool setDocsWithField);
 
         /// <summary>
-        /// Expert: just like <seealso cref="#getTerms(AtomicReader,String,boolean)"/>,
-        ///  but you can specify whether more RAM should be consumed in exchange for
-        ///  faster lookups (default is "true").  Note that the
-        ///  first call for a given reader and field "wins",
-        ///  subsequent calls will share the same cache entry.
+        /// Expert: just like <see cref="GetTerms(AtomicReader, string, bool)"/>,
+        /// but you can specify whether more RAM should be consumed in exchange for
+        /// faster lookups (default is "true").  Note that the
+        /// first call for a given reader and field "wins",
+        /// subsequent calls will share the same cache entry.
         /// </summary>
         BinaryDocValues GetTerms(AtomicReader reader, string field, bool setDocsWithField, float acceptableOverheadRatio);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none
-        /// is found, reads the term values in <code>field</code>
-        /// and returns a <seealso cref="SortedDocValues"/> instance,
+        /// is found, reads the term values in <paramref name="field"/>
+        /// and returns a <see cref="SortedDocValues"/> instance,
         /// providing methods to retrieve sort ordinals and terms
-        /// (as a ByteRef) per document. </summary>
+        /// (as a <see cref="BytesRef"/>) per document. </summary>
         /// <param name="reader">  Used to get field values. </param>
         /// <param name="field">   Which field contains the strings. </param>
         /// <returns> The values in the given field for each document. </returns>
@@ -294,61 +292,57 @@ namespace Lucene.Net.Search
         SortedDocValues GetTermsIndex(AtomicReader reader, string field);
 
         /// <summary>
-        /// Expert: just like {@link
-        ///  #getTermsIndex(AtomicReader,String)}, but you can specify
-        ///  whether more RAM should be consumed in exchange for
-        ///  faster lookups (default is "true").  Note that the
-        ///  first call for a given reader and field "wins",
-        ///  subsequent calls will share the same cache entry.
+        /// Expert: just like 
+        /// <see cref="GetTermsIndex(AtomicReader, string)"/>, but you can specify
+        /// whether more RAM should be consumed in exchange for
+        /// faster lookups (default is "true").  Note that the
+        /// first call for a given reader and field "wins",
+        /// subsequent calls will share the same cache entry.
         /// </summary>
         SortedDocValues GetTermsIndex(AtomicReader reader, string field, float acceptableOverheadRatio);
 
         /// <summary>
         /// Checks the internal cache for an appropriate entry, and if none is found, reads the term values
-        /// in <code>field</code> and returns a <seealso cref="DocTermOrds"/> instance, providing a method to retrieve
+        /// in <paramref name="field"/> and returns a <see cref="SortedSetDocValues"/> instance, providing a method to retrieve
         /// the terms (as ords) per document.
         /// </summary>
-        /// <param name="reader">  Used to build a <seealso cref="DocTermOrds"/> instance </param>
+        /// <param name="reader">  Used to build a <see cref="SortedSetDocValues"/> instance </param>
         /// <param name="field">   Which field contains the strings. </param>
-        /// <returns> a <seealso cref="DocTermOrds"/> instance </returns>
+        /// <returns> a <see cref="SortedSetDocValues"/> instance </returns>
         /// <exception cref="IOException">  If any error occurs. </exception>
         SortedSetDocValues GetDocTermOrds(AtomicReader reader, string field);
 
-        /// <summary>
-        /// EXPERT: A unique Identifier/Description for each item in the FieldCache.
-        /// Can be useful for logging/debugging.
-        /// @lucene.experimental
-        /// </summary>
+        // LUCENENET specific CacheEntry moved to FieldCache static class
 
         /// <summary>
-        /// EXPERT: Generates an array of CacheEntry objects representing all items
-        /// currently in the FieldCache.
-        /// <p>
-        /// NOTE: These CacheEntry objects maintain a strong reference to the
-        /// Cached Values.  Maintaining references to a CacheEntry the AtomicIndexReader
+        /// EXPERT: Generates an array of <see cref="FieldCache.CacheEntry"/> objects representing all items
+        /// currently in the <see cref="IFieldCache"/>.
+        /// <para>
+        /// NOTE: These <see cref="FieldCache.CacheEntry"/> objects maintain a strong reference to the
+        /// Cached Values.  Maintaining references to a <see cref="FieldCache.CacheEntry"/> the <see cref="AtomicReader"/>
         /// associated with it has garbage collected will prevent the Value itself
-        /// from being garbage collected when the Cache drops the WeakReference.
-        /// </p>
+        /// from being garbage collected when the Cache drops the <see cref="WeakReference"/>.
+        /// </para>
         /// @lucene.experimental
         /// </summary>
         FieldCache.CacheEntry[] GetCacheEntries();
 
         /// <summary>
-        /// <p>
+        /// <para>
         /// EXPERT: Instructs the FieldCache to forcibly expunge all entries
-        /// from the underlying caches.  this is intended only to be used for
+        /// from the underlying caches.  This is intended only to be used for
         /// test methods as a way to ensure a known base state of the Cache
-        /// (with out needing to rely on GC to free WeakReferences).
+        /// (with out needing to rely on GC to free <see cref="WeakReference"/>s).
         /// It should not be relied on for "Cache maintenance" in general
         /// application code.
-        /// </p>
+        /// </para>
         /// @lucene.experimental
         /// </summary>
         void PurgeAllCaches();
 
         /// <summary>
         /// Expert: drops all cache entries associated with this
-        /// reader <seealso cref="IndexReader#getCoreCacheKey"/>.  NOTE: this cache key must
+        /// reader <see cref="Index.IndexReader.CoreCacheKey"/>.  NOTE: this cache key must
         /// precisely match the reader that the cache entry is
         /// keyed on. If you pass a top-level reader, it usually
         /// will have no effect as Lucene now caches at the segment
@@ -357,32 +351,40 @@ namespace Lucene.Net.Search
         void PurgeByCacheKey(object coreCacheKey);
 
         /// <summary>
-        /// If non-null, FieldCacheImpl will warn whenever
+        /// If non-null, <see cref="FieldCacheImpl"/> will warn whenever
         /// entries are created that are not sane according to
-        /// <seealso cref="Lucene.Net.Util.FieldCacheSanityChecker"/>.
+        /// <see cref="Lucene.Net.Util.FieldCacheSanityChecker"/>.
         /// </summary>
         TextWriter InfoStream { set; get; }
     }
 
-    // LUCENENET TODO: Copy documentation from Lucene
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
     public static class FieldCache 
     {
+        /// <summary>
+        /// Field values as 8-bit signed bytes
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public abstract class Bytes
         {
+            /// <summary>
+            /// Return a single Byte representation of this field's value.
+            /// </summary>
             public abstract byte Get(int docID);
 
+            /// <summary>
+            /// Zero value for every document
+            /// </summary>
             public static readonly Bytes EMPTY = new EmptyBytes();
 
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptyBytes : Bytes
+            private sealed class EmptyBytes : Bytes
             {
                 public override byte Get(int docID)
                 {
@@ -392,6 +394,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// Field values as 16-bit signed shorts
+        /// <para/>
         /// NOTE: This was Shorts in Lucene
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -399,17 +403,20 @@ namespace Lucene.Net.Search
 #endif
         public abstract class Int16s
         {
+            /// <summary>
+            /// Return a <see cref="short"/> representation of this field's value.
+            /// </summary>
             public abstract short Get(int docID);
 
-            public static readonly Int16s EMPTY = new EmptyInt16s();
-
             /// <summary>
-            /// NOTE: This was EmptyShorts in Lucene
+            /// Zero value for every document
             /// </summary>
+            public static readonly Int16s EMPTY = new EmptyInt16s();
+
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptyInt16s : Int16s
+            private sealed class EmptyInt16s : Int16s
             {
                 public override short Get(int docID)
                 {
@@ -419,6 +426,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// Field values as 32-bit signed integers
+        /// <para/>
         /// NOTE: This was Ints in Lucene
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -426,17 +435,20 @@ namespace Lucene.Net.Search
 #endif
         public abstract class Int32s
         {
+            /// <summary>
+            /// Return an <see cref="int"/> representation of this field's value.
+            /// </summary>
             public abstract int Get(int docID);
 
-            public static readonly Int32s EMPTY = new EmptyInt32s();
-
             /// <summary>
-            /// NOTE: This was EmptyInts in Lucene
+            /// Zero value for every document
             /// </summary>
+            public static readonly Int32s EMPTY = new EmptyInt32s();
+
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptyInt32s : Int32s
+            private sealed class EmptyInt32s : Int32s
             {
                 public override int Get(int docID)
                 {
@@ -446,6 +458,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// Field values as 64-bit signed long integers
+        /// <para/>
         /// NOTE: This was Longs in Lucene
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -453,17 +467,20 @@ namespace Lucene.Net.Search
 #endif
         public abstract class Int64s
         {
+            /// <summary>
+            /// Return an <see cref="long"/> representation of this field's value.
+            /// </summary>
             public abstract long Get(int docID);
 
-            public static readonly Int64s EMPTY = new EmptyInt64s();
-
             /// <summary>
-            /// NOTE: This was EmptyLongs in Lucene
+            /// Zero value for every document
             /// </summary>
+            public static readonly Int64s EMPTY = new EmptyInt64s();
+
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptyInt64s : Int64s
+            private sealed class EmptyInt64s : Int64s
             {
                 public override long Get(int docID)
                 {
@@ -473,6 +490,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// Field values as 32-bit floats
+        /// <para/>
         /// NOTE: This was Floats in Lucene
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -480,17 +499,20 @@ namespace Lucene.Net.Search
 #endif
         public abstract class Singles
         {
+            /// <summary>
+            /// Return an <see cref="float"/> representation of this field's value.
+            /// </summary>
             public abstract float Get(int docID);
 
-            public static readonly Singles EMPTY = new EmptySingles();
-
             /// <summary>
-            /// NOTE: This was EmptySingles in Lucene
+            /// Zero value for every document
             /// </summary>
+            public static readonly Singles EMPTY = new EmptySingles();
+
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptySingles : Singles
+            private sealed class EmptySingles : Singles
             {
                 public override float Get(int docID)
                 {
@@ -499,19 +521,29 @@ namespace Lucene.Net.Search
             }
         }
 
+        /// <summary>
+        /// Field values as 64-bit doubles
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public abstract class Doubles
         {
+            /// <summary>
+            /// Return a <see cref="double"/> representation of this field's value.
+            /// </summary>
+            /// <param name="docID"></param>
             public abstract double Get(int docID);
 
+            /// <summary>
+            /// Zero value for every document
+            /// </summary>
             public static readonly Doubles EMPTY = new EmptyDoubles();
 
 #if FEATURE_SERIALIZABLE
             [Serializable]
 #endif
-            public sealed class EmptyDoubles : Doubles
+            private sealed class EmptyDoubles : Doubles
             {
                 public override double Get(int docID)
                 {
@@ -520,6 +552,9 @@ namespace Lucene.Net.Search
             }
         }
 
+        /// <summary>
+        /// Placeholder indicating creation of this cache is currently in-progress.
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -528,73 +563,129 @@ namespace Lucene.Net.Search
             internal object Value { get; set; }
         }
 
+        /// <summary>
+        /// Marker interface as super-interface to all parsers. It
+        /// is used to specify a custom parser to
+        /// <see cref="SortField.SortField(string, IParser)"/>.
+        /// </summary>
         public interface IParser
         {
+            /// <summary>
+            /// Pulls a <see cref="Index.TermsEnum"/> from the given <see cref="Index.Terms"/>. This method allows certain parsers
+            /// to filter the actual <see cref="Index.TermsEnum"/> before the field cache is filled.
+            /// </summary>
+            /// <param name="terms">The <see cref="Index.Terms"/> instance to create the <see cref="Index.TermsEnum"/> from.</param>
+            /// <returns>A possibly filtered <see cref="Index.TermsEnum"/> instance, this method must not return <c>null</c>.</returns>
+            /// <exception cref="System.IO.IOException">If an <see cref="IOException"/> occurs</exception>
             TermsEnum TermsEnum(Terms terms);
         }
 
+        /// <summary>
+        /// Interface to parse bytes from document fields.
+        /// </summary>
+        /// <seealso cref="IFieldCache.GetBytes(AtomicReader, string, IByteParser, bool)"/>
+        [Obsolete]
         public interface IByteParser : IParser
         {
+            /// <summary>
+            /// Return a single Byte representation of this field's value.
+            /// </summary>
             byte ParseByte(BytesRef term);
         }
 
         /// <summary>
+        /// Interface to parse <see cref="short"/>s from document fields.
+        /// <para/>
         /// NOTE: This was ShortParser in Lucene
         /// </summary>
+        /// <seealso cref="IFieldCache.GetInt16s(AtomicReader, string, IInt16Parser, bool)"/>
+        [Obsolete]
         public interface IInt16Parser : IParser
         {
             /// <summary>
+            /// Return a <see cref="short"/> representation of this field's value.
+            /// <para/>
             /// NOTE: This was parseShort() in Lucene
             /// </summary>
             short ParseInt16(BytesRef term);
         }
 
         /// <summary>
+        /// Interface to parse <see cref="int"/>s from document fields.
+        /// <para/>
         /// NOTE: This was IntParser in Lucene
         /// </summary>
+        /// <seealso cref="IFieldCache.GetInt32s(AtomicReader, string, IInt32Parser, bool)"/>
         public interface IInt32Parser : IParser
         {
             /// <summary>
+            /// Return an <see cref="int"/> representation of this field's value.
+            /// <para/>
             /// NOTE: This was parseInt() in Lucene
             /// </summary>
             int ParseInt32(BytesRef term);
         }
 
         /// <summary>
+        /// Interface to parse <see cref="float"/>s from document fields.
+        /// <para/>
         /// NOTE: This was FloatParser in Lucene
         /// </summary>
         public interface ISingleParser : IParser
         {
             /// <summary>
+            /// Return an <see cref="float"/> representation of this field's value.
+            /// <para/>
             /// NOTE: This was parseFloat() in Lucene
             /// </summary>
             float ParseSingle(BytesRef term);
         }
 
         /// <summary>
+        /// Interface to parse <see cref="long"/> from document fields.
+        /// <para/>
         /// NOTE: This was LongParser in Lucene
         /// </summary>
+        /// <seealso cref="IFieldCache.GetInt64s(AtomicReader, string, IInt64Parser, bool)"/>
         public interface IInt64Parser : IParser
         {
             /// <summary>
+            /// Return a <see cref="long"/> representation of this field's value.
+            /// <para/>
             /// NOTE: This was parseLong() in Lucene
             /// </summary>
             long ParseInt64(BytesRef term);
         }
 
+        /// <summary>
+        /// Interface to parse <see cref="double"/>s from document fields.
+        /// </summary>
+        /// <seealso cref="IFieldCache.GetDoubles(AtomicReader, string, IDoubleParser, bool)"/>
         public interface IDoubleParser : IParser
         {
+            /// <summary>
+            /// Return an <see cref="double"/> representation of this field's value.
+            /// </summary>
             double ParseDouble(BytesRef term);
         }
 
+        /// <summary>
+        /// Expert: The cache used internally by sorting and range query classes.
+        /// </summary>
         public static IFieldCache DEFAULT = new FieldCacheImpl();
 
-        public static readonly IByteParser DEFAULT_BYTE_PARSER = new AnonymousByteParser();
+        /// <summary>
+        /// The default parser for byte values, which are encoded by <see cref="sbyte.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// </summary>
+        [Obsolete]
+        public static readonly IByteParser DEFAULT_BYTE_PARSER = new ByteParser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousByteParser : IByteParser
+        private sealed class ByteParser : IByteParser
         {
             public byte ParseByte(BytesRef term)
             {
@@ -617,14 +708,19 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// The default parser for <see cref="short"/> values, which are encoded by <see cref="short.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// <para/>
         /// NOTE: This was DEFAULT_SHORT_PARSER in Lucene
         /// </summary>
-        public static readonly IInt16Parser DEFAULT_INT16_PARSER = new AnonymousInt16Parser();
+        [Obsolete]
+        public static readonly IInt16Parser DEFAULT_INT16_PARSER = new Int16Parser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousInt16Parser : IInt16Parser
+        private sealed class Int16Parser : IInt16Parser
         {
             /// <summary>
             /// NOTE: This was parseShort() in Lucene
@@ -650,14 +746,19 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// The default parser for <see cref="int"/> values, which are encoded by <see cref="int.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// <para/>
         /// NOTE: This was DEFAULT_INT_PARSER in Lucene
         /// </summary>
-        public static readonly IInt32Parser DEFAULT_INT32_PARSER = new AnonymousInt32Parser();
+        [Obsolete]
+        public static readonly IInt32Parser DEFAULT_INT32_PARSER = new Int32Parser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousInt32Parser : IInt32Parser
+        private sealed class Int32Parser : IInt32Parser
         {
             /// <summary>
             /// NOTE: This was parseInt() in Lucene
@@ -683,14 +784,19 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// The default parser for <see cref="float"/> values, which are encoded by <see cref="float.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// <para/>
         /// NOTE: This was DEFAULT_FLOAT_PARSER in Lucene
         /// </summary>
-        public static readonly ISingleParser DEFAULT_SINGLE_PARSER = new AnonymousSingleParser();
+        [Obsolete]
+        public static readonly ISingleParser DEFAULT_SINGLE_PARSER = new SingleParser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousSingleParser : ISingleParser
+        private sealed class SingleParser : ISingleParser
         {
             /// <summary>
             /// NOTE: This was parseFloat() in Lucene
@@ -723,14 +829,19 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// The default parser for <see cref="long"/> values, which are encoded by <see cref="long.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// <para/>
         /// NOTE: This was DEFAULT_LONG_PARSER in Lucene
         /// </summary>
-        public static readonly IInt64Parser DEFAULT_INT64_PARSER = new AnonymousInt64Parser();
+        [Obsolete]
+        public static readonly IInt64Parser DEFAULT_INT64_PARSER = new Int64Parser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousInt64Parser : IInt64Parser
+        private sealed class Int64Parser : IInt64Parser
         {
             /// <summary>
             /// NOTE: This was parseLong() in Lucene
@@ -755,12 +866,18 @@ namespace Lucene.Net.Search
             }
         }
 
-        public static readonly IDoubleParser DEFAULT_DOUBLE_PARSER = new AnonymousDoubleParser();
+        /// <summary>
+        /// The default parser for <see cref="double"/> values, which are encoded by <see cref="double.ToString(string, IFormatProvider)"/>
+        /// using <see cref="CultureInfo.InvariantCulture"/>.
+        /// </summary>
+        [Obsolete]
+        public static readonly IDoubleParser DEFAULT_DOUBLE_PARSER = new DoubleParser();
 
+        [Obsolete]
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousDoubleParser : IDoubleParser
+        private sealed class DoubleParser : IDoubleParser
         {
             public double ParseDouble(BytesRef term)
             {
@@ -794,14 +911,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// A parser instance for <see cref="int"/> values encoded by <see cref="NumericUtils"/>, e.g. when indexed
+        /// via <see cref="Documents.Int32Field"/>/<see cref="Analysis.NumericTokenStream"/>.
+        /// <para/>
         /// NOTE: This was NUMERIC_UTILS_INT_PARSER in Lucene
         /// </summary>
-        public static readonly IInt32Parser NUMERIC_UTILS_INT32_PARSER = new AnonymousNumericUtilsInt32Parser();
+        public static readonly IInt32Parser NUMERIC_UTILS_INT32_PARSER = new NumericUtilsInt32Parser();
 
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousNumericUtilsInt32Parser : IInt32Parser
+        private sealed class NumericUtilsInt32Parser : IInt32Parser
         {
             /// <summary>
             /// NOTE: This was parseInt() in Lucene
@@ -823,14 +943,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// A parser instance for <see cref="float"/> values encoded with <see cref="NumericUtils"/>, e.g. when indexed
+        /// via <see cref="Documents.SingleField"/>/<see cref="Analysis.NumericTokenStream"/>.
+        /// <para/>
         /// NOTE: This was NUMERIC_UTILS_FLOAT_PARSER in Lucene
         /// </summary>
-        public static readonly ISingleParser NUMERIC_UTILS_SINGLE_PARSER = new AnonymousNumericUtilsSingleParser();
+        public static readonly ISingleParser NUMERIC_UTILS_SINGLE_PARSER = new NumericUtilsSingleParser();
 
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousNumericUtilsSingleParser : ISingleParser
+        private sealed class NumericUtilsSingleParser : ISingleParser
         {
             /// <summary>
             /// NOTE: This was parseFloat() in Lucene
@@ -852,14 +975,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
+        /// A parser instance for <see cref="long"/> values encoded by <see cref="NumericUtils"/>, e.g. when indexed
+        /// via <see cref="Documents.Int64Field"/>/<see cref="Analysis.NumericTokenStream"/>.
+        /// <para/>
         /// NOTE: This was NUMERIC_UTILS_LONG_PARSER in Lucene
         /// </summary>
-        public static readonly IInt64Parser NUMERIC_UTILS_INT64_PARSER = new AnonymousNumericUtilsInt64Parser();
+        public static readonly IInt64Parser NUMERIC_UTILS_INT64_PARSER = new NumericUtilsInt64Parser();
 
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousNumericUtilsInt64Parser : IInt64Parser
+        private sealed class NumericUtilsInt64Parser : IInt64Parser
         {
             /// <summary>
             /// NOTE: This was parseLong() in Lucene
@@ -880,12 +1006,16 @@ namespace Lucene.Net.Search
             }
         }
 
-        public static readonly IDoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new AnonymousNumericUtilsDoubleParser();
+        /// <summary>
+        /// A parser instance for <see cref="double"/> values encoded with <see cref="NumericUtils"/>, e.g. when indexed
+        /// via <see cref="Documents.DoubleField"/>/<see cref="Analysis.NumericTokenStream"/>.
+        /// </summary>
+        public static readonly IDoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new NumericUtilsDoubleParser();
 
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        private sealed class AnonymousNumericUtilsDoubleParser : IDoubleParser
+        private sealed class NumericUtilsDoubleParser : IDoubleParser
         {
             public double ParseDouble(BytesRef term)
             {
@@ -904,6 +1034,12 @@ namespace Lucene.Net.Search
         }
 
         // .NET Port: skipping down to about line 681 of java version. The actual interface methods of FieldCache are in IFieldCache below.
+        /// <summary>
+        /// EXPERT: A unique Identifier/Description for each item in the <see cref="IFieldCache"/>. 
+        /// Can be useful for logging/debugging.
+        /// <para/>
+        /// @lucene.experimental
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -953,12 +1089,20 @@ namespace Lucene.Net.Search
                 get { return value; }
             }
 
+            /// <summary>
+            /// Computes (and stores) the estimated size of the cache <see cref="Value"/>
+            /// </summary>
+            /// <seealso cref="EstimatedSize"/>
             public void EstimateSize()
             {
                 long bytesUsed = RamUsageEstimator.SizeOf(Value);
                 size = RamUsageEstimator.HumanReadableUnits(bytesUsed);
             }
 
+            /// <summary>
+            /// The most recently estimated size of the value, <c>null</c> unless 
+            /// <see cref="EstimateSize()"/> has been called.
+            /// </summary>
             public string EstimatedSize
             {
                 get { return size; }
@@ -983,542 +1127,4 @@ namespace Lucene.Net.Search
             }
         }
     }
-    
-//  // LUCENENET NOTE: refactoring because of enum nonsense
-//  public static readonly FieldCache DEFAULT = new FieldCacheImpl();
-
-//  private class FieldCache_ByteParserAnonymousInnerClassHelper : FieldCache_ByteParser
-//  {
-//      public FieldCache_ByteParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual sbyte ParseByte(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // IntField, instead, which already decodes
-//      // directly from byte[]
-//      return (sbyte)Convert.ToByte(term.Utf8ToString());
-//      }
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_BYTE_PARSER";
-//    }
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-//  }
-
-//  private class FieldCache_ShortParserAnonymousInnerClassHelper : FieldCache_ShortParser
-//  {
-//      public FieldCache_ShortParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual short ParseShort(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // IntField, instead, which already decodes
-//      // directly from byte[]
-//      return Convert.ToInt16(term.Utf8ToString());
-//      }
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_SHORT_PARSER";
-//    }
-
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-//  }
-
-//  private class FieldCache_IntParserAnonymousInnerClassHelper : FieldCache_IntParser
-//  {
-//      public FieldCache_IntParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual int ParseInt(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // IntField, instead, which already decodes
-//      // directly from byte[]
-//      return Convert.ToInt32(term.Utf8ToString());
-//      }
-
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_INT_PARSER";
-//    }
-//  }
-
-//  private class FieldCache_FloatParserAnonymousInnerClassHelper : FieldCache_FloatParser
-//  {
-//      public FieldCache_FloatParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual float ParseFloat(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // FloatField, instead, which already decodes
-//      // directly from byte[]
-//      return Convert.ToSingle(term.Utf8ToString());
-//      }
-
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_FLOAT_PARSER";
-//    }
-//  }
-
-//  private class FieldCache_LongParserAnonymousInnerClassHelper : FieldCache_LongParser
-//  {
-//      public FieldCache_LongParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual long ParseLong(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // LongField, instead, which already decodes
-//      // directly from byte[]
-//      return Convert.ToInt64(term.Utf8ToString());
-//      }
-
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_LONG_PARSER";
-//    }
-//  }
-
-//  private class FieldCache_DoubleParserAnonymousInnerClassHelper : FieldCache_DoubleParser
-//  {
-//      public FieldCache_DoubleParserAnonymousInnerClassHelper()
-//      {
-//      }
-
-//      public virtual double ParseDouble(BytesRef term)
-//      {
-//      // TODO: would be far better to directly parse from
-//      // UTF8 bytes... but really users should use
-//      // DoubleField, instead, which already decodes
-//      // directly from byte[]
-//      return Convert.ToDouble(term.Utf8ToString());
-//      }
-
-//    public virtual TermsEnum TermsEnum(Terms terms)
-//    {
-//      return terms.Iterator(null);
-//    }
-
-//    public override string ToString()
-//    {
-//      return typeof(FieldCache).Name + ".DEFAULT_DOUBLE_PARSER";
-//    }
-//  }
-
-//  private class FieldCache_IntParserAnonymousInnerClassHelper2 : FieldCache_IntParser
-//  {
-//      public FieldCache_IntParserAnonymousInnerClassHelper2()
-//      {
-//      }
-
-//      public override int ParseInt(BytesRef term)
-//      {
-//        return NumericUtils.PrefixCodedToInt(term);
-//      }
-
-//      public override TermsEnum TermsEnum(Terms terms)
-//      {
-//        return NumericUtils.FilterPrefixCodedInts(terms.Iterator(null));
-//      }
-
-//      public override string ToString()
-//      {
-//        return typeof(FieldCache).Name + ".NUMERIC_UTILS_INT_PARSER";
-//      }
-//  }
-
-//  private class FieldCache_FloatParserAnonymousInnerClassHelper2 : FieldCache_FloatParser
-//  {
-//      public FieldCache_FloatParserAnonymousInnerClassHelper2()
-//      {
-//      }
-
-//      public override float ParseFloat(BytesRef term)
-//      {
-//        return NumericUtils.SortableIntToFloat(NumericUtils.PrefixCodedToInt(term));
-//      }
-//      public override string ToString()
-//      {
-//        return typeof(FieldCache).Name + ".NUMERIC_UTILS_FLOAT_PARSER";
-//      }
-
-//      public override TermsEnum TermsEnum(Terms terms)
-//      {
-//        return NumericUtils.FilterPrefixCodedInts(terms.Iterator(null));
-//      }
-//  }
-
-//  private class FieldCache_LongParserAnonymousInnerClassHelper2 : FieldCache_LongParser
-//  {
-//      public FieldCache_LongParserAnonymousInnerClassHelper2()
-//      {
-//      }
-
-//      public override long ParseLong(BytesRef term)
-//      {
-//        return NumericUtils.PrefixCodedToLong(term);
-//      }
-//      public override string ToString()
-//      {
-//        return typeof(FieldCache).Name + ".NUMERIC_UTILS_LONG_PARSER";
-//      }
-
-//      public override TermsEnum TermsEnum(Terms terms)
-//      {
-//        return NumericUtils.FilterPrefixCodedLongs(terms.Iterator(null));
-//      }
-//  }
-
-//  private class FieldCache_DoubleParserAnonymousInnerClassHelper2 : FieldCache_DoubleParser
-//  {
-//      public FieldCache_DoubleParserAnonymousInnerClassHelper2()
-//      {
-//      }
-
-//      public override double ParseDouble(BytesRef term)
-//      {
-//        return NumericUtils.SortableLongToDouble(NumericUtils.PrefixCodedToLong(term));
-//      }
-//      public override string ToString()
-//      {
-//        return typeof(FieldCache).Name + ".NUMERIC_UTILS_DOUBLE_PARSER";
-//      }
-
-//      public override TermsEnum TermsEnum(Terms terms)
-//      {
-//        return NumericUtils.FilterPrefixCodedLongs(terms.Iterator(null));
-//      }
-//  }
-//}
-
-//  public abstract class FieldCache_Bytes
-//  {
-//    /// <summary>
-//    /// Return a single Byte representation of this field's value. </summary>
-//    public abstract sbyte Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Bytes EMPTY = new FieldCache_BytesAnonymousInnerClassHelper();
-
-//    private class FieldCache_BytesAnonymousInnerClassHelper : FieldCache_Bytes
-//    {
-//        public FieldCache_BytesAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override sbyte Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public abstract class FieldCache_Shorts
-//  {
-//    /// <summary>
-//    /// Return a short representation of this field's value. </summary>
-//    public abstract short Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Shorts EMPTY = new FieldCache_ShortsAnonymousInnerClassHelper();
-
-//    private class FieldCache_ShortsAnonymousInnerClassHelper : FieldCache_Shorts
-//    {
-//        public FieldCache_ShortsAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override short Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public abstract class FieldCache_Ints
-//  {
-//    /// <summary>
-//    /// Return an integer representation of this field's value. </summary>
-//    public abstract int Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Ints EMPTY = new FieldCache_IntsAnonymousInnerClassHelper();
-
-//    private class FieldCache_IntsAnonymousInnerClassHelper : FieldCache_Ints
-//    {
-//        public FieldCache_IntsAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override int Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public abstract class FieldCache_Longs
-//  {
-//    /// <summary>
-//    /// Return an long representation of this field's value. </summary>
-//    public abstract long Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Longs EMPTY = new FieldCache_LongsAnonymousInnerClassHelper();
-
-//    private class FieldCache_LongsAnonymousInnerClassHelper : FieldCache_Longs
-//    {
-//        public FieldCache_LongsAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override long Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public abstract class FieldCache_Floats
-//  {
-//    /// <summary>
-//    /// Return an float representation of this field's value. </summary>
-//    public abstract float Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Floats EMPTY = new FieldCache_FloatsAnonymousInnerClassHelper();
-
-//    private class FieldCache_FloatsAnonymousInnerClassHelper : FieldCache_Floats
-//    {
-//        public FieldCache_FloatsAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override float Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public abstract class FieldCache_Doubles
-//  {
-//    /// <summary>
-//    /// Return an double representation of this field's value. </summary>
-//    public abstract double Get(int docID);
-
-//    /// <summary>
-//    /// Zero value for every document </summary>
-//    public static readonly FieldCache_Doubles EMPTY = new FieldCache_DoublesAnonymousInnerClassHelper();
-
-//    private class FieldCache_DoublesAnonymousInnerClassHelper : FieldCache_Doubles
-//    {
-//        public FieldCache_DoublesAnonymousInnerClassHelper()
-//        {
-//        }
-
-//      public override double Get(int docID)
-//      {
-//        return 0;
-//      }
-//    }
-//  }
-
-//  public sealed class FieldCache_CreationPlaceholder
-//  {
-//    internal object Value;
-//  }
-
-//  public interface FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Pulls a <seealso cref="TermsEnum"/> from the given <seealso cref="Terms"/>. this method allows certain parsers
-//    /// to filter the actual TermsEnum before the field cache is filled.
-//    /// </summary>
-//    /// <param name="terms"> the <seealso cref="Terms"/> instance to create the <seealso cref="TermsEnum"/> from. </param>
-//    /// <returns> a possibly filtered <seealso cref="TermsEnum"/> instance, this method must not return <code>null</code>. </returns>
-//    /// <exception cref="IOException"> if an <seealso cref="IOException"/> occurs </exception>
-//    TermsEnum TermsEnum(Terms terms);
-//  }
-
-//  [Obsolete]
-//  public interface FieldCache_ByteParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return a single Byte representation of this field's value. </summary>
-//    sbyte ParseByte(BytesRef term);
-//  }
-
-//  [Obsolete]
-//  public interface FieldCache_ShortParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return a short representation of this field's value. </summary>
-//    short ParseShort(BytesRef term);
-//  }
-
-//  public interface FieldCache_IntParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return an integer representation of this field's value. </summary>
-//    int ParseInt(BytesRef term);
-//  }
-
-//  public interface FieldCache_FloatParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return an float representation of this field's value. </summary>
-//    float ParseFloat(BytesRef term);
-//  }
-
-//  public interface FieldCache_LongParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return an long representation of this field's value. </summary>
-//    long ParseLong(BytesRef term);
-//  }
-
-//  public interface FieldCache_DoubleParser : FieldCache_Parser
-//  {
-//    /// <summary>
-//    /// Return an double representation of this field's value. </summary>
-//    double ParseDouble(BytesRef term);
-//  }
-
-//  public sealed class FieldCache_CacheEntry
-//  {
-//    private readonly object readerKey;
-//    private readonly string fieldName;
-//    private readonly Type cacheType;
-//    private readonly object custom;
-//    private readonly object value;
-//    private string Size;
-
-//    public FieldCache_CacheEntry(object readerKey, string fieldName, Type cacheType, object custom, object value)
-//    {
-//      this.readerKey = readerKey;
-//      this.fieldName = fieldName;
-//      this.cacheType = cacheType;
-//      this.custom = custom;
-//      this.value = value;
-//    }
-
-//    public object ReaderKey
-//    {
-//        get
-//        {
-//          return readerKey;
-//        }
-//    }
-
-//    public string FieldName
-//    {
-//        get
-//        {
-//          return fieldName;
-//        }
-//    }
-
-//    public Type CacheType
-//    {
-//        get
-//        {
-//          return cacheType;
-//        }
-//    }
-
-//    public object Custom
-//    {
-//        get
-//        {
-//          return custom;
-//        }
-//    }
-
-//    public object Value
-//    {
-//        get
-//        {
-//          return value;
-//        }
-//    }
-
-//    /// <summary>
-//    /// Computes (and stores) the estimated size of the cache Value </summary>
-//    /// <seealso cref= #getEstimatedSize </seealso>
-//    public void EstimateSize()
-//    {
-//      long bytesUsed = RamUsageEstimator.SizeOf(Value);
-//      Size = RamUsageEstimator.HumanReadableUnits(bytesUsed);
-//    }
-
-//    /// <summary>
-//    /// The most recently estimated size of the value, null unless
-//    /// estimateSize has been called.
-//    /// </summary>
-//    public string EstimatedSize
-//    {
-//        get
-//        {
-//          return Size;
-//        }
-//    }
-
-//    public override string ToString()
-//    {
-//      StringBuilder b = new StringBuilder();
-//      b.Append("'").Append(ReaderKey).Append("'=>");
-//      b.Append("'").Append(FieldName).Append("',");
-//      b.Append(CacheType).Append(",").Append(Custom);
-//      b.Append("=>").Append(Value.GetType().Name).Append("#");
-//      b.Append(System.Runtime.CompilerServices.RuntimeHelpers.GetHashCode(Value));
-
-//      string s = EstimatedSize;
-//      if (null != s)
-//      {
-//        b.Append(" (size =~ ").Append(s).Append(')');
-//      }
-
-//      return b.ToString();
-//    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCacheDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCacheDocIdSet.cs b/src/Lucene.Net/Search/FieldCacheDocIdSet.cs
index cdf63a4..4fd7a90 100644
--- a/src/Lucene.Net/Search/FieldCacheDocIdSet.cs
+++ b/src/Lucene.Net/Search/FieldCacheDocIdSet.cs
@@ -24,12 +24,13 @@ namespace Lucene.Net.Search
     using OpenBitSet = Lucene.Net.Util.OpenBitSet;
 
     /// <summary>
-    /// Base class for DocIdSet to be used with FieldCache. The implementation
+    /// Base class for <see cref="DocIdSet"/> to be used with <see cref="IFieldCache"/>. The implementation
     /// of its iterator is very stupid and slow if the implementation of the
-    /// <seealso cref="#matchDoc"/> method is not optimized, as iterators simply increment
-    /// the document id until {@code matchDoc(int)} returns true. Because of this
-    /// {@code matchDoc(int)} must be as fast as possible and in no case do any
+    /// <see cref="MatchDoc(int)"/> method is not optimized, as iterators simply increment
+    /// the document id until <see cref="MatchDoc(int)"/> returns <c>true</c>. Because of this
+    /// <see cref="MatchDoc(int)"/> must be as fast as possible and in no case do any
     /// I/O.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -47,12 +48,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this method checks, if a doc is a hit
+        /// This method checks, if a doc is a hit
         /// </summary>
         protected internal abstract bool MatchDoc(int doc);
 
         /// <summary>
-        /// this DocIdSet is always cacheable (does not go back
+        /// This DocIdSet is always cacheable (does not go back
         /// to the reader for iteration)
         /// </summary>
         public override sealed bool IsCacheable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldCacheImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldCacheImpl.cs b/src/Lucene.Net/Search/FieldCacheImpl.cs
index 9242770..30ca516 100644
--- a/src/Lucene.Net/Search/FieldCacheImpl.cs
+++ b/src/Lucene.Net/Search/FieldCacheImpl.cs
@@ -51,7 +51,7 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Expert: The default cache implementation, storing all values in memory.
     /// A WeakHashMap is used for storage.
-    ///
+    /// <para/>
     /// @since   lucene 1.4
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -234,7 +234,7 @@ namespace Lucene.Net.Search
 
             /// <summary>
             /// Sets the key to the value for the provided reader;
-            ///  if the key is already set then this doesn't change it.
+            /// if the key is already set then this doesn't change it.
             /// </summary>
             public virtual void Put(AtomicReader reader, CacheKey key, object value)
             {
@@ -363,7 +363,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Two of these are equal iff they reference the same field and type. </summary>
+            /// Two of these are equal if they reference the same field and type. </summary>
             public override bool Equals(object o)
             {
                 if (o is CacheKey)
@@ -492,13 +492,26 @@ namespace Lucene.Net.Search
             caches[typeof(DocsWithFieldCache)].Put(reader, new CacheKey(field, null), bits);
         }
 
-        // inherit javadocs
+        /// <summary>
+        /// Checks the internal cache for an appropriate entry, and if none is
+        /// found, reads the terms in <paramref name="field"/> as a single <see cref="byte"/> and returns an array
+        /// of size <c>reader.MaxDoc</c> of the value each document
+        /// has in the given field. </summary>
+        /// <param name="reader">  Used to get field values. </param>
+        /// <param name="field">   Which field contains the single <see cref="byte"/> values. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
+        /// <returns> The values in the given field for each document. </returns>
+        /// <exception cref="IOException">  If any error occurs. </exception>
+        [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         public virtual FieldCache.Bytes GetBytes(AtomicReader reader, string field, bool setDocsWithField)
         {
             return GetBytes(reader, field, null, setDocsWithField);
         }
 
+#pragma warning disable 612, 618
         public virtual FieldCache.Bytes GetBytes(AtomicReader reader, string field, FieldCache.IByteParser parser, bool setDocsWithField)
+#pragma warning restore 612, 618
         {
             NumericDocValues valuesIn = reader.GetNumericDocValues(field);
             if (valuesIn != null)
@@ -579,13 +592,17 @@ namespace Lucene.Net.Search
             {
                 int maxDoc = reader.MaxDoc;
                 sbyte[] values;
+#pragma warning disable 612, 618
                 FieldCache.IByteParser parser = (FieldCache.IByteParser)key.custom;
+#pragma warning restore 612, 618
                 if (parser == null)
                 {
                     // Confusing: must delegate to wrapper (vs simply
                     // setting parser = DEFAULT_INT16_PARSER) so cache
                     // key includes DEFAULT_INT16_PARSER:
+#pragma warning disable 612, 618
                     return wrapper.GetBytes(reader, key.field, FieldCache.DEFAULT_BYTE_PARSER, setDocsWithField);
+#pragma warning restore 612, 618
                 }
 
                 values = new sbyte[maxDoc];
@@ -608,9 +625,11 @@ namespace Lucene.Net.Search
             private class UninvertAnonymousInnerClassHelper : Uninvert
             {
                 private readonly sbyte[] values;
+#pragma warning disable 612, 618
                 private readonly FieldCache.IByteParser parser;
 
                 public UninvertAnonymousInnerClassHelper(sbyte[] values, FieldCache.IByteParser parser)
+#pragma warning restore 612, 618
                 {
                     this.values = values;
                     this.parser = parser;
@@ -635,20 +654,45 @@ namespace Lucene.Net.Search
             }
         }
 
-        // inherit javadocs
         /// <summary>
+        /// Checks the internal cache for an appropriate entry, and if none is
+        /// found, reads the terms in <paramref name="field"/> as <see cref="short"/>s and returns an array
+        /// of size <c>reader.MaxDoc</c> of the value each document
+        /// has in the given field. 
+        /// <para/>
         /// NOTE: this was getShorts() in Lucene
         /// </summary>
+        /// <param name="reader">  Used to get field values. </param>
+        /// <param name="field">   Which field contains the <see cref="short"/>s. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
+        /// <returns> The values in the given field for each document. </returns>
+        /// <exception cref="IOException">  If any error occurs. </exception>
+        [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
         public virtual FieldCache.Int16s GetInt16s(AtomicReader reader, string field, bool setDocsWithField)
         {
             return GetInt16s(reader, field, null, setDocsWithField);
         }
 
-        // inherit javadocs
         /// <summary>
+        /// Checks the internal cache for an appropriate entry, and if none is found,
+        /// reads the terms in <paramref name="field"/> as shorts and returns an array of
+        /// size <c>reader.MaxDoc</c> of the value each document has in the
+        /// given field. 
+        /// <para/>
         /// NOTE: this was getShorts() in Lucene
         /// </summary>
+        /// <param name="reader">  Used to get field values. </param>
+        /// <param name="field">   Which field contains the <see cref="short"/>s. </param>
+        /// <param name="parser">  Computes <see cref="short"/> for string values. </param>
+        /// <param name="setDocsWithField">  If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will
+        ///        also be computed and stored in the <see cref="IFieldCache"/>. </param>
+        /// <returns> The values in the given field for each document. </returns>
+        /// <exception cref="IOException">  If any error occurs. </exception>
+        [Obsolete("(4.4) Index as a numeric field using Int32Field and then use GetInt32s(AtomicReader, string, bool) instead.")]
+#pragma warning disable 612, 618
         public virtual FieldCache.Int16s GetInt16s(AtomicReader reader, string field, FieldCache.IInt16Parser parser, bool setDocsWithField)
+#pragma warning restore 612, 618
         {
             NumericDocValues valuesIn = reader.GetNumericDocValues(field);
             if (valuesIn != null)
@@ -735,6 +779,7 @@ namespace Lucene.Net.Search
             {
                 int maxDoc = reader.MaxDoc;
                 short[] values;
+#pragma warning disable 612, 618
                 FieldCache.IInt16Parser parser = (FieldCache.IInt16Parser)key.custom;
                 if (parser == null)
                 {
@@ -743,6 +788,7 @@ namespace Lucene.Net.Search
                     // key includes DEFAULT_INT16_PARSER:
                     return wrapper.GetInt16s(reader, key.field, FieldCache.DEFAULT_INT16_PARSER, setDocsWithField);
                 }
+#pragma warning restore 612, 618
 
                 values = new short[maxDoc];
                 Uninvert u = new UninvertAnonymousInnerClassHelper(this, values, parser);
@@ -764,9 +810,11 @@ namespace Lucene.Net.Search
                 private readonly Int16Cache outerInstance;
 
                 private short[] values;
+#pragma warning disable 612, 618
                 private FieldCache.IInt16Parser parser;
 
                 public UninvertAnonymousInnerClassHelper(Int16Cache outerInstance, short[] values, FieldCache.IInt16Parser parser)
+#pragma warning restore 612, 618
                 {
                     this.outerInstance = outerInstance;
                     this.values = values;
@@ -792,18 +840,43 @@ namespace Lucene.Net.Search
             }
         }
 
-        // inherit javadocs
         /// <summary>
+        /// Returns an <see cref="FieldCache.Int32s"/> over the values found in documents in the given
+        /// field.
+        /// <para/>
         /// NOTE: this was getInts() in Lucene
         /// </summary>
+        /// <seealso cref="GetInt32s(AtomicReader, string, FieldCache.IInt32Parser, bool)"/>
         public virtual FieldCache.Int32s GetInt32s(AtomicReader reader, string field, bool setDocsWithField)
         {
             return GetInt32s(reader, field, null, setDocsWithField);
         }
 
         /// <summary>
+        /// Returns an <see cref="FieldCache.Int32s"/> over the values found in documents in the given
+        /// field. If the field was indexed as <see cref="Documents.NumericDocValuesField"/>, it simply
+        /// uses <see cref="AtomicReader.GetNumericDocValues(string)"/> to read the values.
+        /// Otherwise, it checks the internal cache for an appropriate entry, and if
+        /// none is found, reads the terms in <paramref name="field"/> as <see cref="int"/>s and returns
+        /// an array of size <c>reader.MaxDoc</c> of the value each document
+        /// has in the given field.
+        /// <para/>
         /// NOTE: this was getInts() in Lucene
         /// </summary>
+        /// <param name="reader">
+        ///          Used to get field values. </param>
+        /// <param name="field">
+        ///          Which field contains the <see cref="int"/>s. </param>
+        /// <param name="parser">
+        ///          Computes <see cref="int"/> for string values. May be <c>null</c> if the
+        ///          requested field was indexed as <see cref="Documents.NumericDocValuesField"/> or
+        ///          <see cref="Documents.Int32Field"/>. </param>
+        /// <param name="setDocsWithField">
+        ///          If true then <see cref="GetDocsWithField(AtomicReader, string)"/> will also be computed and
+        ///          stored in the <see cref="IFieldCache"/>. </param>
+        /// <returns> The values in the given field for each document. </returns>
+        /// <exception cref="IOException">
+        ///           If any error occurs. </exception>
         public virtual FieldCache.Int32s GetInt32s(AtomicReader reader, string field, FieldCache.IInt32Parser parser, bool setDocsWithField)
         {
             NumericDocValues valuesIn = reader.GetNumericDocValues(field);
@@ -936,7 +1009,9 @@ namespace Lucene.Net.Search
                     // DEFAULT_INT32_PARSER/NUMERIC_UTILS_INT32_PARSER:
                     try
                     {
+#pragma warning disable 612, 618
                         return wrapper.GetInt32s(reader, key.field, FieldCache.DEFAULT_INT32_PARSER, setDocsWithField);
+#pragma warning restore 612, 618
                     }
                     catch (System.FormatException)
                     {
@@ -1222,7 +1297,9 @@ namespace Lucene.Net.Search
                     // DEFAULT_SINGLE_PARSER/NUMERIC_UTILS_SINGLE_PARSER:
                     try
                     {
+#pragma warning disable 612, 618
                         return wrapper.GetSingles(reader, key.field, FieldCache.DEFAULT_SINGLE_PARSER, setDocsWithField);
+#pragma warning restore 612, 618
                     }
                     catch (System.FormatException)
                     {
@@ -1405,7 +1482,9 @@ namespace Lucene.Net.Search
                     // DEFAULT_INT64_PARSER/NUMERIC_UTILS_INT64_PARSER:
                     try
                     {
+#pragma warning disable 612, 618
                         return wrapper.GetInt64s(reader, key.field, FieldCache.DEFAULT_INT64_PARSER, setDocsWithField);
+#pragma warning restore 612, 618
                     }
                     catch (System.FormatException)
                     {
@@ -1590,7 +1669,9 @@ namespace Lucene.Net.Search
                     // DEFAULT_DOUBLE_PARSER/NUMERIC_UTILS_DOUBLE_PARSER:
                     try
                     {
+#pragma warning disable 612, 618
                         return wrapper.GetDoubles(reader, key.field, FieldCache.DEFAULT_DOUBLE_PARSER, setDocsWithField);
+#pragma warning restore 612, 618
                     }
                     catch (System.FormatException)
                     {


[12/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
Lucene.Net.Search: Fixed up documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b2db5313
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b2db5313
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b2db5313

Branch: refs/heads/master
Commit: b2db5313fd2dd81e07a8f6ca64fde2b6b89b945a
Parents: 396db51
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sat Jun 3 22:51:17 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sat Jun 3 22:52:41 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |    5 +-
 src/Lucene.Net.Tests/Search/TestFieldCache.cs   |    4 +-
 src/Lucene.Net.Tests/Search/TestSort.cs         |    4 +
 .../Util/TestFieldCacheSanityChecker.cs         |    4 +-
 src/Lucene.Net/Lucene.Net.csproj                |    3 +-
 src/Lucene.Net/Search/AutomatonQuery.cs         |   30 +-
 src/Lucene.Net/Search/BitsFilteredDocIdSet.cs   |   26 +-
 src/Lucene.Net/Search/BooleanClause.cs          |   18 +-
 src/Lucene.Net/Search/BooleanQuery.cs           |   67 +-
 src/Lucene.Net/Search/BooleanScorer.cs          |   66 +-
 src/Lucene.Net/Search/BooleanScorer2.cs         |   28 +-
 src/Lucene.Net/Search/BoostAttribute.cs         |   18 +-
 src/Lucene.Net/Search/BoostAttributeImpl.cs     |    8 +-
 src/Lucene.Net/Search/BulkScorer.cs             |   14 +-
 src/Lucene.Net/Search/CachingCollector.cs       |   61 +-
 src/Lucene.Net/Search/CachingWrapperFilter.cs   |   21 +-
 src/Lucene.Net/Search/CollectionStatistics.cs   |   22 +-
 .../Search/CollectionTerminatedException.cs     |   12 +-
 src/Lucene.Net/Search/Collector.cs              |  163 +--
 src/Lucene.Net/Search/ComplexExplanation.cs     |   13 +-
 .../Search/ConstantScoreAutoRewrite.cs          |   29 +-
 src/Lucene.Net/Search/ConstantScoreQuery.cs     |   26 +-
 .../Search/ControlledRealTimeReopenThread.cs    |   10 +-
 src/Lucene.Net/Search/DisjunctionMaxQuery.cs    |   82 +-
 src/Lucene.Net/Search/DisjunctionMaxScorer.cs   |   22 +-
 src/Lucene.Net/Search/DisjunctionScorer.cs      |   18 +-
 src/Lucene.Net/Search/DisjunctionSumScorer.cs   |    8 +-
 src/Lucene.Net/Search/DocIdSet.cs               |   30 +-
 src/Lucene.Net/Search/DocIdSetIterator.cs       |   76 +-
 src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs |   18 +-
 .../Search/DocTermOrdsRewriteMethod.cs          |   11 +-
 src/Lucene.Net/Search/Explanation.cs            |   14 +-
 src/Lucene.Net/Search/FakeScorer.cs             |    4 +-
 src/Lucene.Net/Search/FieldCache.cs             | 1014 ++++++------------
 src/Lucene.Net/Search/FieldCacheDocIdSet.cs     |   13 +-
 src/Lucene.Net/Search/FieldCacheImpl.cs         |   95 +-
 src/Lucene.Net/Search/FieldCacheRangeFilter.cs  |  976 ++---------------
 .../Search/FieldCacheRewriteMethod.cs           |    9 +-
 src/Lucene.Net/Search/FieldCacheTermsFilter.cs  |   40 +-
 src/Lucene.Net/Search/FieldComparator.cs        |  391 +++----
 src/Lucene.Net/Search/FieldComparatorSource.cs  |    9 +-
 src/Lucene.Net/Search/FieldDoc.cs               |   26 +-
 src/Lucene.Net/Search/FieldValueFilter.cs       |   22 +-
 src/Lucene.Net/Search/FieldValueHitQueue.cs     |   28 +-
 src/Lucene.Net/Search/Filter.cs                 |   28 +-
 src/Lucene.Net/Search/FilteredDocIdSet.cs       |   24 +-
 .../Search/FilteredDocIdSetIterator.cs          |   12 +-
 src/Lucene.Net/Search/FilteredQuery.cs          |  150 +--
 src/Lucene.Net/Search/FuzzyQuery.cs             |   63 +-
 src/Lucene.Net/Search/FuzzyTermsEnum.cs         |   45 +-
 src/Lucene.Net/Search/HitQueue.cs               |   44 +-
 .../Search/IMaxNonCompetitiveBoostAttribute.cs  |   46 -
 src/Lucene.Net/Search/ITopTermsRewrite.cs       |   24 -
 src/Lucene.Net/Search/IndexSearcher.cs          |  337 +++---
 src/Lucene.Net/Search/LiveFieldValues.cs        |   35 +-
 src/Lucene.Net/Search/MatchAllDocsQuery.cs      |    1 -
 .../Search/MaxNonCompetitiveBoostAttribute.cs   |   68 +-
 .../MaxNonCompetitiveBoostAttributeImpl.cs      |   76 ++
 .../Search/MinShouldMatchSumScorer.cs           |   49 +-
 src/Lucene.Net/Search/MultiCollector.cs         |   30 +-
 src/Lucene.Net/Search/MultiPhraseQuery.cs       |   30 +-
 src/Lucene.Net/Search/MultiTermQuery.cs         |  198 ++--
 .../Search/MultiTermQueryWrapperFilter.cs       |   26 +-
 src/Lucene.Net/Search/NGramPhraseQuery.cs       |    9 +-
 src/Lucene.Net/Search/NumericRangeFilter.cs     |   95 +-
 src/Lucene.Net/Search/NumericRangeQuery.cs      |  232 ++--
 src/Lucene.Net/Search/PhrasePositions.cs        |    8 +-
 src/Lucene.Net/Search/PhraseQuery.cs            |   31 +-
 .../Search/PositiveScoresOnlyCollector.cs       |    4 +-
 src/Lucene.Net/Search/PrefixFilter.cs           |    2 +-
 src/Lucene.Net/Search/PrefixQuery.cs            |   10 +-
 src/Lucene.Net/Search/PrefixTermsEnum.cs        |    8 +-
 src/Lucene.Net/Search/Query.cs                  |   52 +-
 src/Lucene.Net/Search/QueryRescorer.cs          |   16 +-
 src/Lucene.Net/Search/QueryWrapperFilter.cs     |   10 +-
 src/Lucene.Net/Search/ReferenceManager.cs       |  137 +--
 src/Lucene.Net/Search/RegexpQuery.cs            |   53 +-
 src/Lucene.Net/Search/ReqExclScorer.cs          |   32 +-
 src/Lucene.Net/Search/ReqOptSumScorer.cs        |   18 +-
 src/Lucene.Net/Search/Rescorer.cs               |   16 +-
 .../Search/ScoreCachingWrappingScorer.cs        |   13 +-
 src/Lucene.Net/Search/ScoreDoc.cs               |   14 +-
 src/Lucene.Net/Search/Scorer.cs                 |   48 +-
 src/Lucene.Net/Search/ScoringRewrite.cs         |   46 +-
 src/Lucene.Net/Search/SearcherFactory.cs        |   35 +-
 .../Search/SearcherLifetimeManager.cs           |  163 +--
 src/Lucene.Net/Search/SearcherManager.cs        |   85 +-
 src/Lucene.Net/Search/SloppyPhraseScorer.cs     |   94 +-
 src/Lucene.Net/Search/Sort.cs                   |   87 +-
 src/Lucene.Net/Search/SortField.cs              |  151 +--
 src/Lucene.Net/Search/SortRescorer.cs           |    2 +-
 src/Lucene.Net/Search/TermCollectingRewrite.cs  |    6 +-
 src/Lucene.Net/Search/TermQuery.cs              |   24 +-
 src/Lucene.Net/Search/TermRangeFilter.cs        |   33 +-
 src/Lucene.Net/Search/TermRangeQuery.cs         |   37 +-
 src/Lucene.Net/Search/TermRangeTermsEnum.cs     |   18 +-
 src/Lucene.Net/Search/TermScorer.cs             |   23 +-
 src/Lucene.Net/Search/TermStatistics.cs         |   14 +-
 src/Lucene.Net/Search/TimeLimitingCollector.cs  |   93 +-
 src/Lucene.Net/Search/TopDocs.cs                |   33 +-
 src/Lucene.Net/Search/TopDocsCollector.cs       |  123 ++-
 src/Lucene.Net/Search/TopFieldCollector.cs      |  212 ++--
 src/Lucene.Net/Search/TopFieldDocs.cs           |    4 +-
 src/Lucene.Net/Search/TopScoreDocCollector.cs   |   26 +-
 src/Lucene.Net/Search/TopTermsRewrite.cs        |   19 +-
 src/Lucene.Net/Search/Weight.cs                 |  127 +--
 src/Lucene.Net/Search/WildcardQuery.cs          |   17 +-
 107 files changed, 3070 insertions(+), 4054 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 45f77ee..ca36869 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,9 +52,8 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Search (namespace) (Except for Search.Payloads, Search.Similarities, and Search.Spans)
-   3. Support (namespace)
-   4. Util (namespace) (Except for Util.Fst)
+   2. Support (namespace)
+   3. Util (namespace) (Except for Util.Fst)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net.Tests/Search/TestFieldCache.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Search/TestFieldCache.cs b/src/Lucene.Net.Tests/Search/TestFieldCache.cs
index b360751..be8a5e4 100644
--- a/src/Lucene.Net.Tests/Search/TestFieldCache.cs
+++ b/src/Lucene.Net.Tests/Search/TestFieldCache.cs
@@ -204,6 +204,7 @@ namespace Lucene.Net.Search
         [Test]
         public virtual void Test()
         {
+#pragma warning disable 612, 618
             IFieldCache cache = FieldCache.DEFAULT;
             FieldCache.Doubles doubles = cache.GetDoubles(Reader, "theDouble", Random().NextBoolean());
             Assert.AreSame(doubles, cache.GetDoubles(Reader, "theDouble", Random().NextBoolean()), "Second request to cache return same array");
@@ -221,7 +222,6 @@ namespace Lucene.Net.Search
                 Assert.IsTrue(longs.Get(i) == (long.MaxValue - i), longs.Get(i) + " does not equal: " + (long.MaxValue - i) + " i=" + i);
             }
 
-#pragma warning disable 612, 618
             FieldCache.Bytes bytes = cache.GetBytes(Reader, "theByte", Random().NextBoolean());
             Assert.AreSame(bytes, cache.GetBytes(Reader, "theByte", Random().NextBoolean()), "Second request to cache return same array");
             Assert.AreSame(bytes, cache.GetBytes(Reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, Random().NextBoolean()), "Second request with explicit parser return same array");
@@ -237,7 +237,6 @@ namespace Lucene.Net.Search
             {
                 Assert.IsTrue(shorts.Get(i) == (short)(short.MaxValue - i), shorts.Get(i) + " does not equal: " + (short.MaxValue - i));
             }
-#pragma warning restore 612, 618
 
             FieldCache.Int32s ints = cache.GetInt32s(Reader, "theInt", Random().NextBoolean());
             Assert.AreSame(ints, cache.GetInt32s(Reader, "theInt", Random().NextBoolean()), "Second request to cache return same array");
@@ -254,6 +253,7 @@ namespace Lucene.Net.Search
             {
                 Assert.IsTrue(floats.Get(i) == (float.MaxValue - i), floats.Get(i) + " does not equal: " + (float.MaxValue - i));
             }
+#pragma warning restore 612, 618
 
             IBits docsWithField = cache.GetDocsWithField(Reader, "theLong");
             Assert.AreSame(docsWithField, cache.GetDocsWithField(Reader, "theLong"), "Second request to cache return same array");

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net.Tests/Search/TestSort.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Search/TestSort.cs b/src/Lucene.Net.Tests/Search/TestSort.cs
index f911682..c98bffa 100644
--- a/src/Lucene.Net.Tests/Search/TestSort.cs
+++ b/src/Lucene.Net.Tests/Search/TestSort.cs
@@ -1639,7 +1639,9 @@ namespace Lucene.Net.Search
             dir.Dispose();
         }
 
+#pragma warning disable 612, 618
         private class ByteParserAnonymousInnerClassHelper : FieldCache.IByteParser
+#pragma warning restore 612, 618
         {
             private readonly TestSort OuterInstance;
 
@@ -1697,7 +1699,9 @@ namespace Lucene.Net.Search
             dir.Dispose();
         }
 
+#pragma warning disable 612, 618
         private class ShortParserAnonymousInnerClassHelper : FieldCache.IInt16Parser
+#pragma warning restore 612, 618
         {
             private readonly TestSort OuterInstance;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net.Tests/Util/TestFieldCacheSanityChecker.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests/Util/TestFieldCacheSanityChecker.cs b/src/Lucene.Net.Tests/Util/TestFieldCacheSanityChecker.cs
index 63243db..880d6a4 100644
--- a/src/Lucene.Net.Tests/Util/TestFieldCacheSanityChecker.cs
+++ b/src/Lucene.Net.Tests/Util/TestFieldCacheSanityChecker.cs
@@ -122,12 +122,14 @@ namespace Lucene.Net.Util
             cache.PurgeAllCaches();
 
             cache.GetDoubles(ReaderA, "theDouble", false);
+#pragma warning disable 612, 618
             cache.GetDoubles(ReaderA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
             cache.GetDoubles(ReaderAclone, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
             cache.GetDoubles(ReaderB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
 
             cache.GetInt32s(ReaderX, "theInt", false);
             cache.GetInt32s(ReaderX, "theInt", FieldCache.DEFAULT_INT32_PARSER, false);
+#pragma warning restore 612, 618
 
             // // //
 
@@ -148,9 +150,9 @@ namespace Lucene.Net.Util
             IFieldCache cache = FieldCache.DEFAULT;
             cache.PurgeAllCaches();
 
+#pragma warning disable 612, 618
             cache.GetInt32s(ReaderX, "theInt", FieldCache.DEFAULT_INT32_PARSER, false);
             cache.GetTerms(ReaderX, "theInt", false);
-#pragma warning disable 612, 618
             cache.GetBytes(ReaderX, "theByte", false);
 #pragma warning restore 612, 618
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Lucene.Net.csproj b/src/Lucene.Net/Lucene.Net.csproj
index eb166ac..3352bf3 100644
--- a/src/Lucene.Net/Lucene.Net.csproj
+++ b/src/Lucene.Net/Lucene.Net.csproj
@@ -445,11 +445,10 @@
     <Compile Include="Search\FuzzyTermsEnum.cs" />
     <Compile Include="Search\HitQueue.cs" />
     <Compile Include="Search\IndexSearcher.cs" />
-    <Compile Include="Search\ITopTermsRewrite.cs" />
     <Compile Include="Search\LiveFieldValues.cs" />
     <Compile Include="Search\MatchAllDocsQuery.cs" />
-    <Compile Include="Search\IMaxNonCompetitiveBoostAttribute.cs" />
     <Compile Include="Search\MaxNonCompetitiveBoostAttribute.cs" />
+    <Compile Include="Search\MaxNonCompetitiveBoostAttributeImpl.cs" />
     <Compile Include="Search\MinShouldMatchSumScorer.cs" />
     <Compile Include="Search\MultiCollector.cs" />
     <Compile Include="Search\MultiPhraseQuery.cs" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/AutomatonQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/AutomatonQuery.cs b/src/Lucene.Net/Search/AutomatonQuery.cs
index fc85e4f..9d2ad73 100644
--- a/src/Lucene.Net/Search/AutomatonQuery.cs
+++ b/src/Lucene.Net/Search/AutomatonQuery.cs
@@ -29,21 +29,21 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A <seealso cref="Query"/> that will match terms against a finite-state machine.
-    /// <p>
-    /// this query will match documents that contain terms accepted by a given
+    /// A <see cref="Query"/> that will match terms against a finite-state machine.
+    /// <para>
+    /// This query will match documents that contain terms accepted by a given
     /// finite-state machine. The automaton can be constructed with the
-    /// <seealso cref="Lucene.Net.Util.Automaton"/> API. Alternatively, it can be
-    /// created from a regular expression with <seealso cref="RegexpQuery"/> or from
-    /// the standard Lucene wildcard syntax with <seealso cref="WildcardQuery"/>.
-    /// </p>
-    /// <p>
+    /// <see cref="Lucene.Net.Util.Automaton"/> API. Alternatively, it can be
+    /// created from a regular expression with <see cref="RegexpQuery"/> or from
+    /// the standard Lucene wildcard syntax with <see cref="WildcardQuery"/>.
+    /// </para>
+    /// <para>
     /// When the query is executed, it will create an equivalent DFA of the
     /// finite-state machine, and will enumerate the term dictionary in an
     /// intelligent way to reduce the number of comparisons. For example: the regular
-    /// expression of <code>[dl]og?</code> will make approximately four comparisons:
+    /// expression of <c>[dl]og?</c> will make approximately four comparisons:
     /// do, dog, lo, and log.
-    /// </p>
+    /// </para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -52,21 +52,21 @@ namespace Lucene.Net.Search
     public class AutomatonQuery : MultiTermQuery
     {
         /// <summary>
-        /// the automaton to match index terms against </summary>
+        /// The automaton to match index terms against </summary>
         protected readonly Automaton m_automaton;
 
         protected readonly CompiledAutomaton m_compiled;
 
         /// <summary>
-        /// term containing the field, and possibly some pattern structure </summary>
+        /// Term containing the field, and possibly some pattern structure </summary>
         protected readonly Term m_term;
 
         /// <summary>
-        /// Create a new AutomatonQuery from an <seealso cref="Automaton"/>.
+        /// Create a new AutomatonQuery from an <see cref="Automaton"/>.
         /// </summary>
-        /// <param name="term"> Term containing field and possibly some pattern structure. The
+        /// <param name="term"> <see cref="Term"/> containing field and possibly some pattern structure. The
         ///        term text is ignored. </param>
-        /// <param name="automaton"> Automaton to run, terms that are accepted are considered a
+        /// <param name="automaton"> <see cref="Automaton"/> to run, terms that are accepted are considered a
         ///        match. </param>
         public AutomatonQuery(Term term, Automaton automaton)
             : base(term.Field)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BitsFilteredDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BitsFilteredDocIdSet.cs b/src/Lucene.Net/Search/BitsFilteredDocIdSet.cs
index ba03be0..5de931c 100644
--- a/src/Lucene.Net/Search/BitsFilteredDocIdSet.cs
+++ b/src/Lucene.Net/Search/BitsFilteredDocIdSet.cs
@@ -22,13 +22,13 @@ namespace Lucene.Net.Search
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    /// this implementation supplies a filtered DocIdSet, that excludes all
-    /// docids which are not in a Bits instance. this is especially useful in
-    /// <seealso cref="Lucene.Net.Search.Filter"/> to apply the {@code acceptDocs}
-    /// passed to {@code getDocIdSet()} before returning the final DocIdSet.
+    /// This implementation supplies a filtered <see cref="DocIdSet"/>, that excludes all
+    /// docids which are not in a <see cref="IBits"/> instance. This is especially useful in
+    /// <see cref="Lucene.Net.Search.Filter"/> to apply the <see cref="acceptDocs"/>
+    /// passed to <see cref="Filter.GetDocIdSet(Index.AtomicReaderContext, IBits)"/> before returning the final <see cref="DocIdSet"/>.
     /// </summary>
-    /// <seealso cref= DocIdSet </seealso>
-    /// <seealso cref= Lucene.Net.Search.Filter </seealso>
+    /// <seealso cref="DocIdSet"/>
+    /// <seealso cref="Lucene.Net.Search.Filter"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -37,10 +37,10 @@ namespace Lucene.Net.Search
         private readonly IBits acceptDocs;
 
         /// <summary>
-        /// Convenience wrapper method: If {@code acceptDocs == null} it returns the original set without wrapping. </summary>
-        /// <param name="set"> Underlying DocIdSet. If {@code null}, this method returns {@code null} </param>
-        /// <param name="acceptDocs"> Allowed docs, all docids not in this set will not be returned by this DocIdSet.
-        /// If {@code null}, this method returns the original set without wrapping. </param>
+        /// Convenience wrapper method: If <c>acceptDocs == null</c> it returns the original set without wrapping. </summary>
+        /// <param name="set"> Underlying DocIdSet. If <c>null</c>, this method returns <c>null</c> </param>
+        /// <param name="acceptDocs"> Allowed docs, all docids not in this set will not be returned by this <see cref="DocIdSet"/>.
+        /// If <c>null</c>, this method returns the original set without wrapping. </param>
         public static DocIdSet Wrap(DocIdSet set, IBits acceptDocs)
         {
             return (set == null || acceptDocs == null) ? set : new BitsFilteredDocIdSet(set, acceptDocs);
@@ -48,14 +48,14 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructor. </summary>
-        /// <param name="innerSet"> Underlying DocIdSet </param>
-        /// <param name="acceptDocs"> Allowed docs, all docids not in this set will not be returned by this DocIdSet </param>
+        /// <param name="innerSet"> Underlying <see cref="DocIdSet"/> </param>
+        /// <param name="acceptDocs"> Allowed docs, all docids not in this set will not be returned by this <see cref="DocIdSet"/> </param>
         public BitsFilteredDocIdSet(DocIdSet innerSet, IBits acceptDocs)
             : base(innerSet)
         {
             if (acceptDocs == null)
             {
-                throw new System.NullReferenceException("acceptDocs is null");
+                throw new System.NullReferenceException("acceptDocs is null"); // LUCENENET TODO: API throw ArgumentNullException ?
             }
             this.acceptDocs = acceptDocs;
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BooleanClause.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BooleanClause.cs b/src/Lucene.Net/Search/BooleanClause.cs
index e7bd463..cd81e97 100644
--- a/src/Lucene.Net/Search/BooleanClause.cs
+++ b/src/Lucene.Net/Search/BooleanClause.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A clause in a BooleanQuery. </summary>
+    /// A clause in a <see cref="BooleanQuery"/>. </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -55,7 +55,7 @@ namespace Lucene.Net.Search
         private Occur occur;
 
         /// <summary>
-        /// Constructs a BooleanClause.
+        /// Constructs a <see cref="BooleanClause"/>.
         /// </summary>
         public BooleanClause(Query query, Occur occur)
         {
@@ -104,7 +104,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             BooleanClause bc = o as BooleanClause;
@@ -126,7 +126,7 @@ namespace Lucene.Net.Search
             bool success = true;
             if (object.ReferenceEquals(null, other))
             {
-                return object.ReferenceEquals(null, this);
+                return object.ReferenceEquals(null, this); // LUCENENET TODO: This can never happen - revert to original code
             }
             if (query == null)
             {
@@ -152,16 +152,16 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Use this operator for clauses that <i>should</i> appear in the
-        /// matching documents. For a BooleanQuery with no <code>MUST</code>
-        /// clauses one or more <code>SHOULD</code> clauses must match a document
-        /// for the BooleanQuery to match. </summary>
-        /// <seealso cref= BooleanQuery#setMinimumNumberShouldMatch</seealso>
+        /// matching documents. For a <see cref="BooleanQuery"/> with no <see cref="MUST"/>
+        /// clauses one or more <see cref="SHOULD"/> clauses must match a document
+        /// for the <see cref="BooleanQuery"/> to match. </summary>
+        /// <seealso cref="BooleanQuery.MinimumNumberShouldMatch"/>
         SHOULD,
 
         /// <summary>
         /// Use this operator for clauses that <i>must not</i> appear in the matching documents.
         /// Note that it is not possible to search for queries that only consist
-        /// of a <code>MUST_NOT</code> clause.
+        /// of a <see cref="MUST_NOT"/> clause.
         /// </summary>
         MUST_NOT
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BooleanQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BooleanQuery.cs b/src/Lucene.Net/Search/BooleanQuery.cs
index b28ed5f..b144393 100644
--- a/src/Lucene.Net/Search/BooleanQuery.cs
+++ b/src/Lucene.Net/Search/BooleanQuery.cs
@@ -36,9 +36,9 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A Query that matches documents matching boolean combinations of other
-    /// queries, e.g. <seealso cref="TermQuery"/>s, <seealso cref="PhraseQuery"/>s or other
-    /// BooleanQuerys.
+    /// A <see cref="Query"/> that matches documents matching boolean combinations of other
+    /// queries, e.g. <see cref="TermQuery"/>s, <see cref="PhraseQuery"/>s or other
+    /// <see cref="BooleanQuery"/>s.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -48,9 +48,9 @@ namespace Lucene.Net.Search
         private static int maxClauseCount = 1024;
 
         /// <summary>
-        /// Thrown when an attempt is made to add more than {@link
-        /// #getMaxClauseCount()} clauses. this typically happens if
-        /// a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery
+        /// Thrown when an attempt is made to add more than 
+        /// <see cref="MaxClauseCount"/> clauses. This typically happens if
+        /// a <see cref="PrefixQuery"/>, <see cref="FuzzyQuery"/>, <see cref="WildcardQuery"/>, or <see cref="TermRangeQuery"/>
         /// is expanded to many terms during search.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
@@ -79,9 +79,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Return the maximum number of clauses permitted, 1024 by default.
-        /// Attempts to add more than the permitted number of clauses cause {@link
-        /// TooManyClauses} to be thrown. </summary>
-        /// <seealso cref= #setMaxClauseCount(int) </seealso>
+        /// Attempts to add more than the permitted number of clauses cause 
+        /// <see cref="TooManyClausesException"/> to be thrown. </summary>
         public static int MaxClauseCount
         {
             get
@@ -110,23 +109,23 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructs an empty boolean query.
-        ///
-        /// <seealso cref="Similarity#coord(int,int)"/> may be disabled in scoring, as
+        /// <para/>
+        /// <see cref="Similarity.Coord(int,int)"/> may be disabled in scoring, as
         /// appropriate. For example, this score factor does not make sense for most
-        /// automatically generated queries, like <seealso cref="WildcardQuery"/> and {@link
-        /// FuzzyQuery}.
+        /// automatically generated queries, like <see cref="WildcardQuery"/> and 
+        /// <see cref="FuzzyQuery"/>.
         /// </summary>
-        /// <param name="disableCoord"> disables <seealso cref="Similarity#coord(int,int)"/> in scoring. </param>
+        /// <param name="disableCoord"> Disables <see cref="Similarity.Coord(int,int)"/> in scoring. </param>
         public BooleanQuery(bool disableCoord)
         {
             this.disableCoord = disableCoord;
         }
 
         /// <summary>
-        /// Returns true iff <seealso cref="Similarity#coord(int,int)"/> is disabled in
+        /// Returns true if <see cref="Similarity.Coord(int,int)"/> is disabled in
         /// scoring for this query instance. </summary>
-        /// <seealso cref= #BooleanQuery(boolean) </seealso>
-        public virtual bool CoordDisabled // LUCENENET TODO: Change to CoordEnabled? Per MSDN, properties should be in the affirmative.
+        /// <seealso cref="BooleanQuery(bool)"/>
+        public virtual bool CoordDisabled // LUCENENET TODO: API Change to CoordEnabled? Per MSDN, properties should be in the affirmative.
         {
             get
             {
@@ -135,21 +134,21 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Specifies a minimum number of the optional BooleanClauses
+        /// Specifies a minimum number of the optional <see cref="BooleanClause"/>s
         /// which must be satisfied.
         ///
-        /// <p>
+        /// <para>
         /// By default no optional clauses are necessary for a match
         /// (unless there are no required clauses).  If this method is used,
         /// then the specified number of clauses is required.
-        /// </p>
-        /// <p>
+        /// </para>
+        /// <para>
         /// Use of this method is totally independent of specifying that
-        /// any specific clauses are required (or prohibited).  this number will
+        /// any specific clauses are required (or prohibited).  This number will
         /// only be compared against the number of matching optional clauses.
-        /// </p>
+        /// </para>
         /// </summary>
-        /// <param name="min"> the number of optional clauses that must match </param>
+        /// <param name="value"> The number of optional clauses that must match </param>
         public virtual int MinimumNumberShouldMatch
         {
             set
@@ -167,8 +166,8 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Adds a clause to a boolean query.
         /// </summary>
-        /// <exception cref="TooManyClausesException"> if the new number of clauses exceeds the maximum clause number </exception>
-        /// <seealso cref= #getMaxClauseCount() </seealso>
+        /// <exception cref="TooManyClausesException"> If the new number of clauses exceeds the maximum clause number </exception>
+        /// <seealso cref="MaxClauseCount"/>
         public virtual void Add(Query query, Occur occur)
         {
             Add(new BooleanClause(query, occur));
@@ -176,8 +175,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Adds a clause to a boolean query. </summary>
-        /// <exception cref="TooManyClausesException"> if the new number of clauses exceeds the maximum clause number </exception>
-        /// <seealso cref= #getMaxClauseCount() </seealso>
+        /// <exception cref="TooManyClausesException"> If the new number of clauses exceeds the maximum clause number </exception>
+        /// <seealso cref="MaxClauseCount"/>
         public virtual void Add(BooleanClause clause)
         {
             if (clauses.Count >= maxClauseCount)
@@ -203,9 +202,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns an iterator on the clauses in this query. It implements the <seealso cref="Iterable"/> interface to
+        /// Returns an iterator on the clauses in this query. It implements the <see cref="T:IEnumerable{BooleanClause}"/> interface to
         /// make it possible to do:
-        /// <pre class="prettyprint">for (BooleanClause clause : booleanQuery) {}</pre>
+        /// <code>foreach (BooleanClause clause in booleanQuery) {}</code>
         /// </summary>
         public IEnumerator<BooleanClause> GetEnumerator()
         {
@@ -218,9 +217,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: the Weight for BooleanQuery, used to
+        /// Expert: the <see cref="Weight"/> for <see cref="BooleanQuery"/>, used to
         /// normalize, score and explain these queries.
-        ///
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -231,7 +230,7 @@ namespace Lucene.Net.Search
             private readonly BooleanQuery outerInstance;
 
             /// <summary>
-            /// The Similarity implementation. </summary>
+            /// The <see cref="Similarities.Similarity"/> implementation. </summary>
             protected Similarity m_similarity;
 
             protected List<Weight> m_weights;
@@ -678,7 +677,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!(o is BooleanQuery))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BooleanScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BooleanScorer.cs b/src/Lucene.Net/Search/BooleanScorer.cs
index 55137e2..44a6986 100644
--- a/src/Lucene.Net/Search/BooleanScorer.cs
+++ b/src/Lucene.Net/Search/BooleanScorer.cs
@@ -24,38 +24,40 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
     using BooleanWeight = Lucene.Net.Search.BooleanQuery.BooleanWeight;
 
-    /* Description from Doug Cutting (excerpted from
-     * LUCENE-1483):
-     *
-     * BooleanScorer uses an array to score windows of
-     * 2K docs. So it scores docs 0-2K first, then docs 2K-4K,
-     * etc. For each window it iterates through all query terms
-     * and accumulates a score in table[doc%2K]. It also stores
-     * in the table a bitmask representing which terms
-     * contributed to the score. Non-zero scores are chained in
-     * a linked list. At the end of scoring each window it then
-     * iterates through the linked list and, if the bitmask
-     * matches the boolean constraints, collects a hit. For
-     * boolean queries with lots of frequent terms this can be
-     * much faster, since it does not need to update a priority
-     * queue for each posting, instead performing constant-time
-     * operations per posting. The only downside is that it
-     * results in hits being delivered out-of-order within the
-     * window, which means it cannot be nested within other
-     * scorers. But it works well as a top-level scorer.
-     *
-     * The new BooleanScorer2 implementation instead works by
-     * merging priority queues of postings, albeit with some
-     * clever tricks. For example, a pure conjunction (all terms
-     * required) does not require a priority queue. Instead it
-     * sorts the posting streams at the start, then repeatedly
-     * skips the first to to the last. If the first ever equals
-     * the last, then there's a hit. When some terms are
-     * required and some terms are optional, the conjunction can
-     * be evaluated first, then the optional terms can all skip
-     * to the match and be added to the score. Thus the
-     * conjunction can reduce the number of priority queue
-     * updates for the optional terms. */
+    /// <summary>
+    /// Description from Doug Cutting (excerpted from
+    /// LUCENE-1483):
+    /// <para/>
+    /// <see cref="BooleanScorer"/> uses an array to score windows of
+    /// 2K docs. So it scores docs 0-2K first, then docs 2K-4K,
+    /// etc. For each window it iterates through all query terms
+    /// and accumulates a score in table[doc%2K]. It also stores
+    /// in the table a bitmask representing which terms
+    /// contributed to the score. Non-zero scores are chained in
+    /// a linked list. At the end of scoring each window it then
+    /// iterates through the linked list and, if the bitmask
+    /// matches the boolean constraints, collects a hit. For
+    /// boolean queries with lots of frequent terms this can be
+    /// much faster, since it does not need to update a priority
+    /// queue for each posting, instead performing constant-time
+    /// operations per posting. The only downside is that it
+    /// results in hits being delivered out-of-order within the
+    /// window, which means it cannot be nested within other
+    /// scorers. But it works well as a top-level scorer.
+    /// <para/>
+    /// The new BooleanScorer2 implementation instead works by
+    /// merging priority queues of postings, albeit with some
+    /// clever tricks. For example, a pure conjunction (all terms
+    /// required) does not require a priority queue. Instead it
+    /// sorts the posting streams at the start, then repeatedly
+    /// skips the first to to the last. If the first ever equals
+    /// the last, then there's a hit. When some terms are
+    /// required and some terms are optional, the conjunction can
+    /// be evaluated first, then the optional terms can all skip
+    /// to the match and be added to the score. Thus the
+    /// conjunction can reduce the number of priority queue
+    /// updates for the optional terms.
+    /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BooleanScorer2.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BooleanScorer2.cs b/src/Lucene.Net/Search/BooleanScorer2.cs
index 9cf27ae..b7e56de 100644
--- a/src/Lucene.Net/Search/BooleanScorer2.cs
+++ b/src/Lucene.Net/Search/BooleanScorer2.cs
@@ -23,14 +23,14 @@ namespace Lucene.Net.Search
 
     using BooleanWeight = Lucene.Net.Search.BooleanQuery.BooleanWeight;
 
-    /* See the description in BooleanScorer.java, comparing
-     * BooleanScorer & BooleanScorer2 */
-
     /// <summary>
-    /// An alternative to BooleanScorer that also allows a minimum number
+    /// See the description in <see cref="BooleanScorer"/> comparing
+    /// <see cref="BooleanScorer"/> &amp; <see cref="BooleanScorer2"/>.
+    /// <para/>
+    /// An alternative to <see cref="BooleanScorer"/> that also allows a minimum number
     /// of optional scorers that should match.
-    /// <br>Implements skipTo(), and has no limitations on the numbers of added scorers.
-    /// <br>Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer.
+    /// <para/>Implements SkipTo(), and has no limitations on the numbers of added scorers.
+    /// <para/>Uses <see cref="ConjunctionScorer"/>, <see cref="DisjunctionScorer"/>, <see cref="ReqOptSumScorer"/> and <see cref="ReqExclScorer"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -78,25 +78,27 @@ namespace Lucene.Net.Search
         private int doc = -1;
 
         /// <summary>
-        /// Creates a <seealso cref="Scorer"/> with the given similarity and lists of required,
+        /// Creates a <see cref="Scorer"/> with the given similarity and lists of required,
         /// prohibited and optional scorers. In no required scorers are added, at least
         /// one of the optional scorers will have to match during the search.
         /// </summary>
         /// <param name="weight">
-        ///          The BooleanWeight to be used. </param>
+        ///          The <see cref="BooleanWeight"/> to be used. </param>
         /// <param name="disableCoord">
-        ///          If this parameter is true, coordination level matching
-        ///          (<seealso cref="Similarity#coord(int, int)"/>) is not used. </param>
+        ///          If this parameter is <c>true</c>, coordination level matching
+        ///          (<see cref="Similarities.Similarity.Coord(int, int)"/>) is not used. </param>
         /// <param name="minNrShouldMatch">
         ///          The minimum number of optional added scorers that should match
         ///          during the search. In case no required scorers are added, at least
         ///          one of the optional scorers will have to match during the search. </param>
         /// <param name="required">
-        ///          the list of required scorers. </param>
+        ///          The list of required scorers. </param>
         /// <param name="prohibited">
-        ///          the list of prohibited scorers. </param>
+        ///          The list of prohibited scorers. </param>
         /// <param name="optional">
-        ///          the list of optional scorers. </param>
+        ///          The list of optional scorers. </param>
+        /// <param name="maxCoord">
+        ///          The max coord. </param>
         public BooleanScorer2(BooleanWeight weight, bool disableCoord, int minNrShouldMatch, IList<Scorer> required, IList<Scorer> prohibited, IList<Scorer> optional, int maxCoord)
             : base(weight)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BoostAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BoostAttribute.cs b/src/Lucene.Net/Search/BoostAttribute.cs
index 971c70e..a0e798c 100644
--- a/src/Lucene.Net/Search/BoostAttribute.cs
+++ b/src/Lucene.Net/Search/BoostAttribute.cs
@@ -20,17 +20,21 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Add this <seealso cref="Attribute"/> to a <seealso cref="TermsEnum"/> returned by <seealso cref="MultiTermQuery#getTermsEnum(Terms,AttributeSource)"/>
-    /// and update the boost on each returned term. this enables to control the boost factor
-    /// for each matching term in <seealso cref="MultiTermQuery#SCORING_BOOLEAN_QUERY_REWRITE"/> or
-    /// <seealso cref="TopTermsRewrite"/> mode.
-    /// <seealso cref="FuzzyQuery"/> is using this to take the edit distance into account.
-    /// <p><b>Please note:</b> this attribute is intended to be added only by the TermsEnum
-    /// to itself in its constructor and consumed by the <seealso cref="MultiTermQuery.RewriteMethod"/>.
+    /// Add this <see cref="IAttribute"/> to a <see cref="Index.TermsEnum"/> returned by <see cref="MultiTermQuery.GetTermsEnum(Index.Terms, AttributeSource)"/>
+    /// and update the boost on each returned term. This enables to control the boost factor
+    /// for each matching term in <see cref="MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE"/> or
+    /// <see cref="TopTermsRewrite{Q}"/> mode.
+    /// <see cref="FuzzyQuery"/> is using this to take the edit distance into account.
+    /// <para/><b>Please note:</b> this attribute is intended to be added only by the <see cref="Index.TermsEnum"/>
+    /// to itself in its constructor and consumed by the <see cref="MultiTermQuery.RewriteMethod"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public interface IBoostAttribute : IAttribute
     {
+        /// <summary>
+        /// Gets or Sets the boost in this attribute. Default is <c>1.0f</c>.
+        /// </summary>
         float Boost { get; set; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BoostAttributeImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BoostAttributeImpl.cs b/src/Lucene.Net/Search/BoostAttributeImpl.cs
index 0fabe03..6a8be3e 100644
--- a/src/Lucene.Net/Search/BoostAttributeImpl.cs
+++ b/src/Lucene.Net/Search/BoostAttributeImpl.cs
@@ -23,7 +23,8 @@ namespace Lucene.Net.Search
     using IAttribute = Lucene.Net.Util.IAttribute;
 
     /// <summary>
-    /// Implementation class for <seealso cref="IBoostAttribute"/>.
+    /// Implementation class for <see cref="IBoostAttribute"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -31,10 +32,11 @@ namespace Lucene.Net.Search
 #endif
     public sealed class BoostAttribute : Attribute, IBoostAttribute
     {
-        /// <summary>
-        /// Sets the boost in this attribute </summary>
         private float boost = 1.0f;
 
+        /// <summary>
+        /// Gets or Sets the boost in this attribute. Default is <c>1.0f</c>.
+        /// </summary>
         public float Boost
         {
             get { return boost; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/BulkScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/BulkScorer.cs b/src/Lucene.Net/Search/BulkScorer.cs
index ae1f160..3649ccc 100644
--- a/src/Lucene.Net/Search/BulkScorer.cs
+++ b/src/Lucene.Net/Search/BulkScorer.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// this class is used to score a range of documents at
-    ///  once, and is returned by <seealso cref="Weight#bulkScorer"/>.  Only
-    ///  queries that have a more optimized means of scoring
-    ///  across a range of documents need to override this.
-    ///  Otherwise, a default implementation is wrapped around
-    ///  the <seealso cref="Scorer"/> returned by <seealso cref="Weight#scorer"/>.
+    /// This class is used to score a range of documents at
+    /// once, and is returned by <see cref="Weight.GetBulkScorer(Index.AtomicReaderContext, bool, Util.IBits)"/>.  Only
+    /// queries that have a more optimized means of scoring
+    /// across a range of documents need to override this.
+    /// Otherwise, a default implementation is wrapped around
+    /// the <see cref="Scorer"/> returned by <see cref="Weight.GetScorer(Index.AtomicReaderContext, Util.IBits)"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -45,7 +45,7 @@ namespace Lucene.Net.Search
         /// </summary>
         /// <param name="collector"> The collector to which all matching documents are passed. </param>
         /// <param name="max"> Score up to, but not including, this doc </param>
-        /// <returns> true if more matching documents may remain. </returns>
+        /// <returns> <c>true</c> if more matching documents may remain. </returns>
         public abstract bool Score(ICollector collector, int max);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/CachingCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/CachingCollector.cs b/src/Lucene.Net/Search/CachingCollector.cs
index 4997efc..043d3be 100644
--- a/src/Lucene.Net/Search/CachingCollector.cs
+++ b/src/Lucene.Net/Search/CachingCollector.cs
@@ -27,22 +27,22 @@ namespace Lucene.Net.Search
     /// Caches all docs, and optionally also scores, coming from
     /// a search, and is then able to replay them to another
     /// collector.  You specify the max RAM this class may use.
-    /// Once the collection is done, call <seealso cref="#isCached"/>. If
-    /// this returns true, you can use <seealso cref="#replay(Collector)"/>
-    /// against a new collector.  If it returns false, this means
+    /// Once the collection is done, call <see cref="IsCached"/>. If
+    /// this returns <c>true</c>, you can use <see cref="Replay(ICollector)"/>
+    /// against a new collector.  If it returns <c>false</c>, this means
     /// too much RAM was required and you must instead re-run the
     /// original search.
     ///
-    /// <p><b>NOTE</b>: this class consumes 4 (or 8 bytes, if
+    /// <para/><b>NOTE</b>: this class consumes 4 (or 8 bytes, if
     /// scoring is cached) per collected document.  If the result
     /// set is large this can easily be a very substantial amount
     /// of RAM!
     ///
-    /// <p><b>NOTE</b>: this class caches at least 128 documents
+    /// <para/><b>NOTE</b>: this class caches at least 128 documents
     /// before checking RAM limits.
     ///
-    /// <p>See the Lucene <tt>modules/grouping</tt> module for more
-    /// details including a full code example.</p>
+    /// <para>See the Lucene <c>modules/grouping</c> module for more
+    /// details including a full code example.</para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -125,7 +125,9 @@ namespace Lucene.Net.Search
             }
         }
 
-        // A CachingCollector which caches scores
+        /// <summary>
+        /// A <see cref="CachingCollector"/> which caches scores
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -259,7 +261,9 @@ namespace Lucene.Net.Search
             }
         }
 
-        // A CachingCollector which does not cache scores
+        /// <summary>
+        /// A <see cref="CachingCollector"/> which does not cache scores
+        /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -383,9 +387,8 @@ namespace Lucene.Net.Search
         protected int m_lastDocBase;
 
         /// <summary>
-        /// Creates a <seealso cref="CachingCollector"/> which does not wrap another collector.
-        /// The cached documents and scores can later be {@link #replay(Collector)
-        /// replayed}.
+        /// Creates a <see cref="CachingCollector"/> which does not wrap another collector.
+        /// The cached documents and scores can later be replayed (<see cref="Replay(ICollector)"/>).
         /// </summary>
         /// <param name="acceptDocsOutOfOrder">
         ///          whether documents are allowed to be collected out-of-order </param>
@@ -426,16 +429,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Create a new <seealso cref="CachingCollector"/> that wraps the given collector and
+        /// Create a new <see cref="CachingCollector"/> that wraps the given collector and
         /// caches documents and scores up to the specified RAM threshold.
         /// </summary>
         /// <param name="other">
-        ///          the Collector to wrap and delegate calls to. </param>
+        ///          The <see cref="ICollector"/> to wrap and delegate calls to. </param>
         /// <param name="cacheScores">
-        ///          whether to cache scores in addition to document IDs. Note that
-        ///          this increases the RAM consumed per doc </param>
+        ///          Whether to cache scores in addition to document IDs. Note that
+        ///          this increases the RAM consumed per doc. </param>
         /// <param name="maxRAMMB">
-        ///          the maximum RAM in MB to consume for caching the documents and
+        ///          The maximum RAM in MB to consume for caching the documents and
         ///          scores. If the collector exceeds the threshold, no documents and
         ///          scores are cached. </param>
         public static CachingCollector Create(ICollector other, bool cacheScores, double maxRAMMB)
@@ -444,16 +447,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Create a new <seealso cref="CachingCollector"/> that wraps the given collector and
+        /// Create a new <see cref="CachingCollector"/> that wraps the given collector and
         /// caches documents and scores up to the specified max docs threshold.
         /// </summary>
         /// <param name="other">
-        ///          the Collector to wrap and delegate calls to. </param>
+        ///          The <see cref="ICollector"/> to wrap and delegate calls to. </param>
         /// <param name="cacheScores">
-        ///          whether to cache scores in addition to document IDs. Note that
-        ///          this increases the RAM consumed per doc </param>
+        ///          Whether to cache scores in addition to document IDs. Note that
+        ///          this increases the RAM consumed per doc. </param>
         /// <param name="maxDocsToCache">
-        ///          the maximum number of documents for caching the documents and
+        ///          The maximum number of documents for caching the documents and
         ///          possible the scores. If the collector exceeds the threshold,
         ///          no documents and scores are cached. </param>
         public static CachingCollector Create(ICollector other, bool cacheScores, int maxDocsToCache)
@@ -517,7 +520,7 @@ namespace Lucene.Net.Search
         /// Called before successive calls to <see cref="Collect(int)"/>. Implementations
         /// that need the score of the current document (passed-in to
         /// <also cref="Collect(int)"/>), should save the passed-in <see cref="Scorer"/> and call
-        /// scorer.Score() when needed.
+        /// <see cref="Scorer.GetScore()"/> when needed.
         /// </summary>
         public abstract void SetScorer(Scorer scorer);
 
@@ -559,15 +562,15 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Replays the cached doc IDs (and scores) to the given Collector. If this
-        /// instance does not cache scores, then Scorer is not set on
-        /// {@code other.setScorer} as well as scores are not replayed.
+        /// Replays the cached doc IDs (and scores) to the given <see cref="ICollector"/>. If this
+        /// instance does not cache scores, then <see cref="Scorer"/> is not set on
+        /// <c>other.SetScorer(Scorer)</c> as well as scores are not replayed.
         /// </summary>
         /// <exception cref="InvalidOperationException">
-        ///           if this collector is not cached (i.e., if the RAM limits were too
+        ///           If this collector is not cached (i.e., if the RAM limits were too
         ///           low for the number of documents + scores to cache). </exception>
-        /// <exception cref="IllegalArgumentException">
-        ///           if the given Collect's does not support out-of-order collection,
+        /// <exception cref="ArgumentException">
+        ///           If the given Collect's does not support out-of-order collection,
         ///           while the collector passed to the ctor does. </exception>
         public abstract void Replay(ICollector other);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/CachingWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/CachingWrapperFilter.cs b/src/Lucene.Net/Search/CachingWrapperFilter.cs
index c086373..a35526e 100644
--- a/src/Lucene.Net/Search/CachingWrapperFilter.cs
+++ b/src/Lucene.Net/Search/CachingWrapperFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Search
     using WAH8DocIdSet = Lucene.Net.Util.WAH8DocIdSet;
 
     /// <summary>
-    /// Wraps another <seealso cref="Filter"/>'s result and caches it.  The purpose is to allow
+    /// Wraps another <see cref="Search.Filter"/>'s result and caches it.  The purpose is to allow
     /// filters to simply filter, and then wrap with this class
     /// to add caching.
     /// </summary>
@@ -64,13 +64,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        ///  Provide the DocIdSet to be cached, using the DocIdSet provided
-        ///  by the wrapped Filter. <p>this implementation returns the given <seealso cref="DocIdSet"/>,
-        ///  if <seealso cref="DocIdSet#isCacheable"/> returns <code>true</code>, else it calls
-        ///  <seealso cref="#cacheImpl(DocIdSetIterator,AtomicReader)"/>
-        ///  <p>Note: this method returns <seealso cref="#EMPTY_DOCIDSET"/> if the given docIdSet
-        ///  is <code>null</code> or if <seealso cref="DocIdSet#iterator()"/> return <code>null</code>. The empty
-        ///  instance is use as a placeholder in the cache instead of the <code>null</code> value.
+        /// Provide the <see cref="DocIdSet"/> to be cached, using the <see cref="DocIdSet"/> provided
+        /// by the wrapped Filter. 
+        /// <para/>This implementation returns the given <see cref="DocIdSet"/>,
+        /// if <see cref="DocIdSet.IsCacheable"/> returns <c>true</c>, else it calls
+        /// <see cref="CacheImpl(DocIdSetIterator, AtomicReader)"/>
+        /// <para/>Note: this method returns <see cref="EMPTY_DOCIDSET"/> if the given <paramref name="docIdSet"/>
+        /// is <c>null</c> or if <see cref="DocIdSet.GetIterator()"/> return <c>null</c>. The empty
+        /// instance is use as a placeholder in the cache instead of the <c>null</c> value.
         /// </summary>
         protected virtual DocIdSet DocIdSetToCache(DocIdSet docIdSet, AtomicReader reader)
         {
@@ -101,7 +102,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Default cache implementation: uses <seealso cref="WAH8DocIdSet"/>.
+        /// Default cache implementation: uses <see cref="WAH8DocIdSet"/>.
         /// </summary>
         protected virtual DocIdSet CacheImpl(DocIdSetIterator iterator, AtomicReader reader)
         {
@@ -155,7 +156,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// An empty {@code DocIdSet} instance </summary>
+        /// An empty <see cref="DocIdSet"/> instance </summary>
         protected static readonly DocIdSet EMPTY_DOCIDSET = new DocIdSetAnonymousInnerClassHelper();
 
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/CollectionStatistics.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/CollectionStatistics.cs b/src/Lucene.Net/Search/CollectionStatistics.cs
index 6f08080..c65ae88 100644
--- a/src/Lucene.Net/Search/CollectionStatistics.cs
+++ b/src/Lucene.Net/Search/CollectionStatistics.cs
@@ -22,6 +22,7 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Contains statistics for a collection (field)
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -35,6 +36,9 @@ namespace Lucene.Net.Search
         private readonly long sumTotalTermFreq;
         private readonly long sumDocFreq;
 
+        /// <summary>
+        /// Sole constructor.
+        /// </summary>
         public CollectionStatistics(string field, long maxDoc, long docCount, long sumTotalTermFreq, long sumDocFreq)
         {
             Debug.Assert(maxDoc >= 0);
@@ -49,41 +53,41 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// returns the field name </summary>
+        /// Returns the field name </summary>
         public string Field
         {
             get { return field; }
         }
 
         /// <summary>
-        /// returns the total number of documents, regardless of
+        /// Returns the total number of documents, regardless of
         /// whether they all contain values for this field. </summary>
-        /// <seealso cref= IndexReader#maxDoc()  </seealso>
+        /// <seealso cref="Index.IndexReader.MaxDoc"/>
         public long MaxDoc
         {
             get { return maxDoc; }
         }
 
         /// <summary>
-        /// returns the total number of documents that
+        /// Returns the total number of documents that
         /// have at least one term for this field. </summary>
-        /// <seealso cref= Terms#getDocCount()  </seealso>
+        /// <seealso cref="Index.Terms.DocCount"/>
         public long DocCount
         {
             get { return docCount; }
         }
 
         /// <summary>
-        /// returns the total number of tokens for this field </summary>
-        /// <seealso cref= Terms#getSumTotalTermFreq()  </seealso>
+        /// Returns the total number of tokens for this field </summary>
+        /// <seealso cref="Index.Terms.SumTotalTermFreq"/>
         public long SumTotalTermFreq
         {
             get { return sumTotalTermFreq; }
         }
 
         /// <summary>
-        /// returns the total number of postings for this field </summary>
-        /// <seealso cref= Terms#getSumDocFreq()  </seealso>
+        /// Returns the total number of postings for this field </summary>
+        /// <seealso cref="Index.Terms.SumDocFreq"/>
         public long SumDocFreq
         {
             get { return sumDocFreq; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/CollectionTerminatedException.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/CollectionTerminatedException.cs b/src/Lucene.Net/Search/CollectionTerminatedException.cs
index 7e837c7..a03c181 100644
--- a/src/Lucene.Net/Search/CollectionTerminatedException.cs
+++ b/src/Lucene.Net/Search/CollectionTerminatedException.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Throw this exception in <seealso cref="ICollector#collect(int)"/> to prematurely
-    ///  terminate collection of the current leaf.
-    ///  <p>Note: IndexSearcher swallows this exception and never re-throws it.
-    ///  As a consequence, you should not catch it when calling
-    ///  <seealso cref="IndexSearcher#search"/> as it is unnecessary and might hide misuse
-    ///  of this exception.
+    /// Throw this exception in <see cref="ICollector.Collect(int)"/> to prematurely
+    /// terminate collection of the current leaf.
+    /// <para/>Note: <see cref="IndexSearcher"/> swallows this exception and never re-throws it.
+    /// As a consequence, you should not catch it when calling any overload of
+    /// <see cref="IndexSearcher.Search(Weight, FieldDoc, int, Sort, bool, bool, bool)"/> as it is unnecessary and might hide misuse
+    /// of this exception.
     /// </summary>
     // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Collector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Collector.cs b/src/Lucene.Net/Search/Collector.cs
index 0174bb7..f3f9c31 100644
--- a/src/Lucene.Net/Search/Collector.cs
+++ b/src/Lucene.Net/Search/Collector.cs
@@ -20,102 +20,117 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// <p>Expert: Collectors are primarily meant to be used to
+    /// <para>Expert: Collectors are primarily meant to be used to
     /// gather raw results from a search, and implement sorting
-    /// or custom result filtering, collation, etc. </p>
+    /// or custom result filtering, collation, etc. </para>
     ///
-    /// <p>Lucene's core collectors are derived from Collector.
+    /// <para>Lucene's core collectors are derived from Collector.
     /// Likely your application can use one of these classes, or
-    /// subclass <seealso cref="TopDocsCollector"/>, instead of
-    /// implementing Collector directly:
+    /// subclass <see cref="TopDocsCollector{T}"/>, instead of
+    /// implementing <see cref="ICollector"/> directly:
     ///
-    /// <ul>
+    /// <list type="bullet">
     ///
-    ///   <li><seealso cref="TopDocsCollector"/> is an abstract base class
+    ///   <item><description><see cref="TopDocsCollector{T}"/> is an abstract base class
     ///   that assumes you will retrieve the top N docs,
     ///   according to some criteria, after collection is
-    ///   done.  </li>
-    ///
-    ///   <li><seealso cref="TopScoreDocCollector"/> is a concrete subclass
-    ///   <seealso cref="TopDocsCollector"/> and sorts according to score +
-    ///   docID.  this is used internally by the {@link
-    ///   IndexSearcher} search methods that do not take an
-    ///   explicit <seealso cref="Sort"/>. It is likely the most frequently
-    ///   used collector.</li>
-    ///
-    ///   <li><seealso cref="TopFieldCollector"/> subclasses {@link
-    ///   TopDocsCollector} and sorts according to a specified
-    ///   <seealso cref="Sort"/> object (sort by field).  this is used
-    ///   internally by the <seealso cref="IndexSearcher"/> search methods
-    ///   that take an explicit <seealso cref="Sort"/>.
-    ///
-    ///   <li><seealso cref="TimeLimitingCollector"/>, which wraps any other
+    ///   done.  </description></item>
+    ///
+    ///   <item><description><see cref="TopScoreDocCollector"/> is a concrete subclass
+    ///   <see cref="TopDocsCollector{T}"/> and sorts according to score +
+    ///   docID.  This is used internally by the 
+    ///   <see cref="IndexSearcher"/> search methods that do not take an
+    ///   explicit <see cref="Sort"/>. It is likely the most frequently
+    ///   used collector.</description></item>
+    ///
+    ///   <item><description><see cref="TopFieldCollector"/> subclasses 
+    ///   <see cref="TopDocsCollector{T}"/> and sorts according to a specified
+    ///   <see cref="Sort"/> object (sort by field).  This is used
+    ///   internally by the <see cref="IndexSearcher"/> search methods
+    ///   that take an explicit <see cref="Sort"/>.</description></item>
+    ///
+    ///   <item><description><see cref="TimeLimitingCollector"/>, which wraps any other
     ///   Collector and aborts the search if it's taken too much
-    ///   time.</li>
+    ///   time.</description></item>
     ///
-    ///   <li><seealso cref="PositiveScoresOnlyCollector"/> wraps any other
-    ///   Collector and prevents collection of hits whose score
-    ///   is &lt;= 0.0</li>
+    ///   <item><description><see cref="PositiveScoresOnlyCollector"/> wraps any other
+    ///   <see cref="ICollector"/> and prevents collection of hits whose score
+    ///   is &lt;= 0.0</description></item>
     ///
-    /// </ul>
+    /// </list>
+    /// </para>
     ///
-    /// <p>Collector decouples the score from the collected doc:
+    /// <para><see cref="ICollector"/> decouples the score from the collected doc:
     /// the score computation is skipped entirely if it's not
     /// needed.  Collectors that do need the score should
-    /// implement the <seealso cref="#setScorer"/> method, to hold onto the
-    /// passed <seealso cref="Scorer"/> instance, and call {@link
-    /// Scorer#score()} within the collect method to compute the
+    /// implement the <see cref="SetScorer(Scorer)"/> method, to hold onto the
+    /// passed <see cref="Scorer"/> instance, and call 
+    /// <see cref="Scorer.GetScore()"/> within the collect method to compute the
     /// current hit's score.  If your collector may request the
     /// score for a single hit multiple times, you should use
-    /// <seealso cref="ScoreCachingWrappingScorer"/>. </p>
+    /// <see cref="ScoreCachingWrappingScorer"/>. </para>
     ///
-    /// <p><b>NOTE:</b> The doc that is passed to the collect
+    /// <para><b>NOTE:</b> The doc that is passed to the collect
     /// method is relative to the current reader. If your
     /// collector needs to resolve this to the docID space of the
     /// Multi*Reader, you must re-base it by recording the
-    /// docBase from the most recent setNextReader call.  Here's
-    /// a simple example showing how to collect docIDs into a
-    /// BitSet:</p>
-    ///
-    /// <pre class="prettyprint">
+    /// docBase from the most recent <see cref="SetNextReader(AtomicReaderContext)"/> call.  Here's
+    /// a simple example showing how to collect docIDs into an
+    /// <see cref="Util.OpenBitSet"/>:</para>
+    ///
+    /// <code>
+    /// private class MySearchCollector : ICollector
+    /// {
+    ///     private readonly OpenBitSet bits;
+    ///     private int docBase;
+    /// 
+    ///     public MySearchCollector(OpenBitSet bits)
+    ///     {
+    ///         if (bits == null) throw new ArgumentNullException("bits");
+    ///         this.bits = bits;
+    ///     }
+    /// 
+    ///     // ignore scorer
+    ///     public void SetScorer(Scorer scorer)
+    ///     { 
+    ///     }
+    ///     
+    ///     // accept docs out of order (for a BitSet it doesn't matter)
+    ///     public bool AcceptDocsOutOfOrder
+    ///     {
+    ///         get { return true; }
+    ///     }
+    ///     
+    ///     public void Collect(int doc)
+    ///     {
+    ///         bits.Set(doc + docBase);
+    ///     }
+    ///     
+    ///     public void SetNextReader(AtomicReaderContext context)
+    ///     {
+    ///         this.docBase = context.DocBase;
+    ///     }
+    /// }
+    /// 
     /// IndexSearcher searcher = new IndexSearcher(indexReader);
-    /// final BitSet bits = new BitSet(indexReader.maxDoc());
-    /// searcher.search(query, new Collector() {
-    ///   private int docBase;
-    ///
-    ///   <em>// ignore scorer</em>
-    ///   public void setScorer(Scorer scorer) {
-    ///   }
-    ///
-    ///   <em>// accept docs out of order (for a BitSet it doesn't matter)</em>
-    ///   public boolean acceptsDocsOutOfOrder() {
-    ///     return true;
-    ///   }
+    /// OpenBitSet bits = new OpenBitSet(indexReader.MaxDoc);
+    /// searcher.Search(query, new MySearchCollector(bits));
+    /// </code>
     ///
-    ///   public void collect(int doc) {
-    ///     bits.set(doc + docBase);
-    ///   }
-    ///
-    ///   public void setNextReader(AtomicReaderContext context) {
-    ///     this.docBase = context.docBase;
-    ///   }
-    /// });
-    /// </pre>
-    ///
-    /// <p>Not all collectors will need to rebase the docID.  For
+    /// <para>Not all collectors will need to rebase the docID.  For
     /// example, a collector that simply counts the total number
-    /// of hits would skip it.</p>
+    /// of hits would skip it.</para>
     ///
-    /// <p><b>NOTE:</b> Prior to 2.9, Lucene silently filtered
-    /// out hits with score &lt;= 0.  As of 2.9, the core Collectors
+    /// <para><b>NOTE:</b> Prior to 2.9, Lucene silently filtered
+    /// out hits with score &lt;= 0.  As of 2.9, the core <see cref="ICollector"/>s
     /// no longer do that.  It's very unusual to have such hits
     /// (a negative query boost, or function query returning
     /// negative custom scores, could cause it to happen).  If
-    /// you need that behavior, use {@link
-    /// PositiveScoresOnlyCollector}.</p>
+    /// you need that behavior, use 
+    /// <see cref="PositiveScoresOnlyCollector"/>.</para>
     ///
     /// @lucene.experimental
-    ///
+    /// <para/>
     /// @since 2.9
     /// </summary>
     public interface ICollector // LUCENENET NOTE: This was an abstract class in Lucene, but made into an interface since we need one for Grouping's covariance
@@ -123,8 +138,8 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Called before successive calls to <see cref="Collect(int)"/>. Implementations
         /// that need the score of the current document (passed-in to
-        /// <also cref="Collect(int)"/>), should save the passed-in <see cref="Scorer"/> and call
-        /// scorer.Score() when needed.
+        /// <see cref="Collect(int)"/>), should save the passed-in <see cref="Scorer"/> and call
+        /// <c>scorer.GetScore()</c> when needed.
         /// </summary>
         void SetScorer(Scorer scorer);
 
@@ -146,8 +161,8 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Called before collecting from each <see cref="AtomicReaderContext"/>. All doc ids in
         /// <see cref="Collect(int)"/> will correspond to <see cref="Index.IndexReaderContext.Reader"/>.
-        ///
-        /// Add <see cref="AtomicReaderContext#docBase"/> to the current <see cref="Index.IndexReaderContext.Reader"/>'s
+        /// <para/>
+        /// Add <see cref="AtomicReaderContext.DocBase"/> to the current <see cref="Index.IndexReaderContext.Reader"/>'s
         /// internal document id to re-base ids in <see cref="Collect(int)"/>.
         /// </summary>
         /// <param name="context">next atomic reader context </param>
@@ -171,4 +186,6 @@ namespace Lucene.Net.Search
         /// </summary>
         bool AcceptsDocsOutOfOrder { get; }
     }
-}
\ No newline at end of file
+}
+
+// LUCENENET TODO: API: Create Collector.NewAnonymous() static delegate method to allow creation of collectors inline.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ComplexExplanation.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ComplexExplanation.cs b/src/Lucene.Net/Search/ComplexExplanation.cs
index 92a5cc6..ae8be43 100644
--- a/src/Lucene.Net/Search/ComplexExplanation.cs
+++ b/src/Lucene.Net/Search/ComplexExplanation.cs
@@ -44,8 +44,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// The match status of this explanation node. </summary>
-        /// <returns> May be null if match status is unknown </returns>
+        /// Gets or Sets the match status assigned to this explanation node. 
+        /// May be <c>null</c> if match status is unknown.
+        /// </summary>
         public virtual bool? Match
         {
             get
@@ -59,13 +60,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Indicates whether or not this Explanation models a good match.
+        /// Indicates whether or not this <see cref="Explanation"/> models a good match.
         ///
-        /// <p>
+        /// <para>
         /// If the match status is explicitly set (i.e.: not null) this method
         /// uses it; otherwise it defers to the superclass.
-        /// </p> </summary>
-        /// <seealso cref= #getMatch </seealso>
+        /// </para> </summary>
+        /// <seealso cref="Match"/>
         public override bool IsMatch
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs b/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
index a1c0cff..7c4f2a4 100644
--- a/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
+++ b/src/Lucene.Net/Search/ConstantScoreAutoRewrite.cs
@@ -40,13 +40,17 @@ namespace Lucene.Net.Search
         // LUCENENET specific - making constructor internal since the class was meant to be internal
         internal ConstantScoreAutoRewrite() { }
 
-        // Defaults derived from rough tests with a 20.0 million
-        // doc Wikipedia index.  With more than 350 terms in the
-        // query, the filter method is fastest:
+        /// <summary>
+        /// Defaults derived from rough tests with a 20.0 million
+        /// doc Wikipedia index.  With more than 350 terms in the
+        /// query, the filter method is fastest:
+        /// </summary>
         public static int DEFAULT_TERM_COUNT_CUTOFF = 350;
 
-        // If the query will hit more than 1 in 1000 of the docs
-        // in the index (0.1%), the filter method is fastest:
+        /// <summary>
+        /// If the query will hit more than 1 in 1000 of the docs
+        /// in the index (0.1%), the filter method is fastest:
+        /// </summary>
         public static double DEFAULT_DOC_COUNT_PERCENT = 0.1;
 
         private int termCountCutoff = DEFAULT_TERM_COUNT_CUTOFF;
@@ -54,8 +58,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// If the number of terms in this query is equal to or
-        ///  larger than this setting then {@link
-        ///  MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} is used.
+        /// larger than this setting then 
+        /// <see cref="MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE"/> is used.
         /// </summary>
         public virtual int TermCountCutoff
         {
@@ -71,10 +75,11 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// If the number of documents to be visited in the
-        ///  postings exceeds this specified percentage of the
-        ///  maxDoc() for the index, then {@link
-        ///  MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} is used. </summary>
-        ///  <param name="percent"> 0.0 to 100.0  </param>
+        /// postings exceeds this specified percentage of the
+        /// <see cref="Index.IndexReader.MaxDoc"/> for the index, then
+        /// <see cref="MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE"/> is used. 
+        /// Value may be 0.0 to 100.0.
+        /// </summary>
         public virtual double DocCountPercent
         {
             set
@@ -226,7 +231,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Special implementation of BytesStartArray that keeps parallel arrays for <seealso cref="TermContext"/> </summary>
+        /// Special implementation of <see cref="BytesRefHash.BytesStartArray"/> that keeps parallel arrays for <see cref="TermContext"/> </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ConstantScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ConstantScoreQuery.cs b/src/Lucene.Net/Search/ConstantScoreQuery.cs
index 7ff56c2..254d267 100644
--- a/src/Lucene.Net/Search/ConstantScoreQuery.cs
+++ b/src/Lucene.Net/Search/ConstantScoreQuery.cs
@@ -43,7 +43,7 @@ namespace Lucene.Net.Search
         protected readonly Query m_query;
 
         /// <summary>
-        /// Strips off scores from the passed in Query. The hits will get a constant score
+        /// Strips off scores from the passed in <see cref="Search.Query"/>. The hits will get a constant score
         /// dependent on the boost factor of this query.
         /// </summary>
         public ConstantScoreQuery(Query query)
@@ -57,11 +57,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Wraps a Filter as a Query. The hits will get a constant score
+        /// Wraps a <see cref="Search.Filter"/> as a <see cref="Search.Query"/>. The hits will get a constant score
         /// dependent on the boost factor of this query.
-        /// If you simply want to strip off scores from a Query, no longer use
-        /// {@code new ConstantScoreQuery(new QueryWrapperFilter(query))}, instead
-        /// use <seealso cref="#ConstantScoreQuery(Query)"/>!
+        /// If you simply want to strip off scores from a <see cref="Search.Query"/>, no longer use
+        /// <c>new ConstantScoreQuery(new QueryWrapperFilter(query))</c>, instead
+        /// use <see cref="ConstantScoreQuery(Query)"/>!
         /// </summary>
         public ConstantScoreQuery(Filter filter)
         {
@@ -74,7 +74,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the encapsulated filter, returns {@code null} if a query is wrapped. </summary>
+        /// Returns the encapsulated filter, returns <c>null</c> if a query is wrapped. </summary>
         public virtual Filter Filter
         {
             get
@@ -84,7 +84,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the encapsulated query, returns {@code null} if a filter is wrapped. </summary>
+        /// Returns the encapsulated query, returns <c>null</c> if a filter is wrapped. </summary>
         public virtual Query Query
         {
             get
@@ -162,9 +162,9 @@ namespace Lucene.Net.Search
             public override float GetValueForNormalization()
             {
                 // we calculate sumOfSquaredWeights of the inner weight, but ignore it (just to initialize everything)
-                /*if (InnerWeight != null)
+                /*if (InnerWeight != null) // LUCENENET TODO: BUG This code was in the original
                 {
-                    InnerWeight.ValueForNormalization;
+                    return innerWeight.GetValueForNormalization();
                 }*/
                 queryWeight = outerInstance.Boost;
                 return queryWeight * queryWeight;
@@ -257,10 +257,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// We return this as our <seealso cref="bulkScorer"/> so that if the CSQ
-        ///  wraps a query with its own optimized top-level
-        ///  scorer (e.g. BooleanScorer) we can use that
-        ///  top-level scorer.
+        /// We return this as our <see cref="BulkScorer"/> so that if the CSQ
+        /// wraps a query with its own optimized top-level
+        /// scorer (e.g. <see cref="BooleanScorer"/>) we can use that
+        /// top-level scorer.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ControlledRealTimeReopenThread.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ControlledRealTimeReopenThread.cs b/src/Lucene.Net/Search/ControlledRealTimeReopenThread.cs
index 73a361b..9028c15 100644
--- a/src/Lucene.Net/Search/ControlledRealTimeReopenThread.cs
+++ b/src/Lucene.Net/Search/ControlledRealTimeReopenThread.cs
@@ -147,7 +147,7 @@ namespace Lucene.Net.Search
         /// until the searcher is reopened, by another via
         /// <see cref="ReferenceManager{T}.MaybeRefresh()"/> or until the <see cref="ReferenceManager{T}"/> is closed.
         /// </summary>
-        /// <param name="targetGen"> the generation to wait for </param>
+        /// <param name="targetGen"> The generation to wait for </param>
         public virtual void WaitForGeneration(long targetGen)
         {
             WaitForGeneration(targetGen, -1);
@@ -160,16 +160,16 @@ namespace Lucene.Net.Search
         /// generation, this method will block until the
         /// searcher has been reopened by another thread via
         /// <see cref="ReferenceManager{T}.MaybeRefresh()"/>, the given waiting time has elapsed, or until
-        /// the <seealso cref="ReferenceManager{T}"/> is closed.
+        /// the <see cref="ReferenceManager{T}"/> is closed.
         /// <para/>
         /// NOTE: if the waiting time elapses before the requested target generation is
         /// available the current <see cref="SearcherManager"/> is returned instead.
         /// </summary>
         /// <param name="targetGen">
-        ///          the generation to wait for </param>
+        ///          The generation to wait for </param>
         /// <param name="maxMS">
-        ///          maximum milliseconds to wait, or -1 to wait indefinitely </param>
-        /// <returns> true if the <paramref name="targetGen"/> is now available,
+        ///          Maximum milliseconds to wait, or -1 to wait indefinitely </param>
+        /// <returns> <c>true</c> if the <paramref name="targetGen"/> is now available,
         ///         or false if <paramref name="maxMS"/> wait time was exceeded </returns>
         public virtual bool WaitForGeneration(long targetGen, int maxMS)
         {


[08/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldComparator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldComparator.cs b/src/Lucene.Net/Search/FieldComparator.cs
index dd7b64e..1ff68b3 100644
--- a/src/Lucene.Net/Search/FieldComparator.cs
+++ b/src/Lucene.Net/Search/FieldComparator.cs
@@ -30,66 +30,63 @@ namespace Lucene.Net.Search
     using SortedDocValues = Lucene.Net.Index.SortedDocValues;
 
     /// <summary>
-    /// Expert: a FieldComparer compares hits so as to determine their
-    /// sort order when collecting the top results with {@link
-    /// TopFieldCollector}.  The concrete public FieldComparer
-    /// classes here correspond to the SortField types.
+    /// Expert: a <see cref="FieldComparer"/> compares hits so as to determine their
+    /// sort order when collecting the top results with
+    /// <see cref="TopFieldCollector"/>.  The concrete public <see cref="FieldComparer"/>
+    /// classes here correspond to the <see cref="SortField"/> types.
     ///
-    /// <p>this API is designed to achieve high performance
-    /// sorting, by exposing a tight interaction with {@link
-    /// FieldValueHitQueue} as it visits hits.  Whenever a hit is
+    /// <para>This API is designed to achieve high performance
+    /// sorting, by exposing a tight interaction with 
+    /// <see cref="FieldValueHitQueue"/> as it visits hits.  Whenever a hit is
     /// competitive, it's enrolled into a virtual slot, which is
-    /// an int ranging from 0 to numHits-1.  The {@link
-    /// FieldComparer} is made aware of segment transitions
+    /// an <see cref="int"/> ranging from 0 to numHits-1.  The 
+    /// <see cref="FieldComparer"/> is made aware of segment transitions
     /// during searching in case any internal state it's tracking
-    /// needs to be recomputed during these transitions.</p>
+    /// needs to be recomputed during these transitions.</para>
     ///
-    /// <p>A comparer must define these functions:</p>
+    /// <para>A comparer must define these functions:</para>
     ///
-    /// <ul>
+    /// <list type="bullet">
     ///
-    ///  <li> <seealso cref="#compare"/> Compare a hit at 'slot a'
-    ///       with hit 'slot b'.
+    ///  <item><term><see cref="Compare(int, int)"/></term> <description> Compare a hit at 'slot a'
+    ///       with hit 'slot b'.</description></item>
     ///
-    ///  <li> <seealso cref="#setBottom"/> this method is called by
-    ///       <seealso cref="FieldValueHitQueue"/> to notify the
-    ///       FieldComparer of the current weakest ("bottom")
+    ///  <item><term><see cref="SetBottom(int)"/></term> <description>This method is called by
+    ///       <see cref="FieldValueHitQueue"/> to notify the
+    ///       <see cref="FieldComparer"/> of the current weakest ("bottom")
     ///       slot.  Note that this slot may not hold the weakest
     ///       value according to your comparer, in cases where
     ///       your comparer is not the primary one (ie, is only
-    ///       used to break ties from the comparers before it).
+    ///       used to break ties from the comparers before it).</description></item>
     ///
-    ///  <li> <seealso cref="#compareBottom"/> Compare a new hit (docID)
-    ///       against the "weakest" (bottom) entry in the queue.
+    ///  <item><term><see cref="CompareBottom(int)"/></term> <description>Compare a new hit (docID)
+    ///       against the "weakest" (bottom) entry in the queue.</description></item>
     ///
-    ///  <li> <seealso cref="#setTopValue"/> this method is called by
-    ///       <seealso cref="TopFieldCollector"/> to notify the
-    ///       FieldComparer of the top most value, which is
-    ///       used by future calls to <seealso cref="#compareTop"/>.
+    ///  <item><term><see cref="SetTopValue(object)"/></term> <description>This method is called by
+    ///       <see cref="TopFieldCollector"/> to notify the
+    ///       <see cref="FieldComparer"/> of the top most value, which is
+    ///       used by future calls to <see cref="CompareTop(int)"/>.</description></item>
     ///
-    ///  <li> <seealso cref="#compareBottom"/> Compare a new hit (docID)
-    ///       against the "weakest" (bottom) entry in the queue.
-    ///
-    ///  <li> <seealso cref="#compareTop"/> Compare a new hit (docID)
+    ///  <item><term><see cref="CompareTop(int)"/></term> <description>Compare a new hit (docID)
     ///       against the top value previously set by a call to
-    ///       <seealso cref="#setTopValue"/>.
+    ///       <see cref="SetTopValue(object)"/>.</description></item>
     ///
-    ///  <li> <seealso cref="#copy"/> Installs a new hit into the
-    ///       priority queue.  The <seealso cref="FieldValueHitQueue"/>
-    ///       calls this method when a new hit is competitive.
+    ///  <item><term><see cref="Copy(int, int)"/></term> <description>Installs a new hit into the
+    ///       priority queue.  The <see cref="FieldValueHitQueue"/>
+    ///       calls this method when a new hit is competitive.</description></item>
     ///
-    ///  <li> <seealso cref="#setNextReader(AtomicReaderContext)"/> Invoked
+    ///  <item><term><see cref="SetNextReader(AtomicReaderContext)"/></term> <description>Invoked
     ///       when the search is switching to the next segment.
     ///       You may need to update internal state of the
     ///       comparer, for example retrieving new values from
-    ///       the <seealso cref="IFieldCache"/>.
-    ///
-    ///  <li> <seealso cref="#value"/> Return the sort value stored in
-    ///       the specified slot.  this is only called at the end
-    ///       of the search, in order to populate {@link
-    ///       FieldDoc#fields} when returning the top results.
-    /// </ul>
+    ///       the <see cref="IFieldCache"/>.</description></item>
     ///
+    ///  <item><term><see cref="FieldComparer.this[int]"/></term> <description>Return the sort value stored in
+    ///       the specified slot.  This is only called at the end
+    ///       of the search, in order to populate
+    ///       <see cref="FieldDoc.Fields"/> when returning the top results.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -98,93 +95,93 @@ namespace Lucene.Net.Search
     public abstract class FieldComparer<T> : FieldComparer
     {
         /// <summary>
-        /// Compare hit at slot1 with hit at slot2.
+        /// Compare hit at <paramref name="slot1"/> with hit at <paramref name="slot2"/>.
         /// </summary>
         /// <param name="slot1"> first slot to compare </param>
         /// <param name="slot2"> second slot to compare </param>
-        /// <returns> any N < 0 if slot2's value is sorted after
-        /// slot1, any N > 0 if the slot2's value is sorted before
-        /// slot1 and 0 if they are equal </returns>
+        /// <returns> any N &lt; 0 if <paramref name="slot2"/>'s value is sorted after
+        /// <paramref name="slot1"/>, any N &gt; 0 if the <paramref name="slot2"/>'s value is sorted before
+        /// <paramref name="slot1"/> and 0 if they are equal </returns>
         public abstract override int Compare(int slot1, int slot2);
 
         /// <summary>
         /// Set the bottom slot, ie the "weakest" (sorted last)
-        /// entry in the queue.  When <seealso cref="#compareBottom"/> is
-        /// called, you should compare against this slot.  this
-        /// will always be called before <seealso cref="#compareBottom"/>.
+        /// entry in the queue.  When <see cref="CompareBottom(int)"/> is
+        /// called, you should compare against this slot.  This
+        /// will always be called before <see cref="CompareBottom(int)"/>.
         /// </summary>
         /// <param name="slot"> the currently weakest (sorted last) slot in the queue </param>
         public abstract override void SetBottom(int slot);
 
         /// <summary>
-        /// Record the top value, for future calls to {@link
-        /// #compareTop}.  this is only called for searches that
-        /// use searchAfter (deep paging), and is called before any
-        /// calls to <seealso cref="#setNextReader"/>.
+        /// Record the top value, for future calls to 
+        /// <see cref="CompareTop(int)"/>.  This is only called for searches that
+        /// use SearchAfter (deep paging), and is called before any
+        /// calls to <see cref="SetNextReader(AtomicReaderContext)"/>.
         /// </summary>
         public abstract override void SetTopValue(object value);
 
         /// <summary>
-        /// Compare the bottom of the queue with this doc.  this will
-        /// only invoked after setBottom has been called.  this
-        /// should return the same result as {@link
-        /// #compare(int,int)}} as if bottom were slot1 and the new
+        /// Compare the bottom of the queue with this doc.  This will
+        /// only invoked after <see cref="SetBottom(int)"/> has been called.  This
+        /// should return the same result as 
+        /// <see cref="Compare(int, int)"/> as if bottom were slot1 and the new
         /// document were slot 2.
         ///
-        /// <p>For a search that hits many results, this method
+        /// <para>For a search that hits many results, this method
         /// will be the hotspot (invoked by far the most
-        /// frequently).</p>
+        /// frequently).</para>
         /// </summary>
-        /// <param name="doc"> that was hit </param>
-        /// <returns> any N < 0 if the doc's value is sorted after
-        /// the bottom entry (not competitive), any N > 0 if the
+        /// <param name="doc"> Doc that was hit </param>
+        /// <returns> Any N &lt; 0 if the doc's value is sorted after
+        /// the bottom entry (not competitive), any N &gt; 0 if the
         /// doc's value is sorted before the bottom entry and 0 if
         /// they are equal. </returns>
         public abstract override int CompareBottom(int doc);
 
         /// <summary>
-        /// Compare the top value with this doc.  this will
-        /// only invoked after setTopValue has been called.  this
-        /// should return the same result as {@link
-        /// #compare(int,int)}} as if topValue were slot1 and the new
-        /// document were slot 2.  this is only called for searches that
-        /// use searchAfter (deep paging).
+        /// Compare the top value with this doc.  This will
+        /// only invoked after <see cref="SetTopValue(object)"/> has been called.  This
+        /// should return the same result as 
+        /// <see cref="Compare(int, int)"/> as if topValue were slot1 and the new
+        /// document were slot 2.  This is only called for searches that
+        /// use SearchAfter (deep paging).
         /// </summary>
-        /// <param name="doc"> that was hit </param>
-        /// <returns> any N < 0 if the doc's value is sorted after
-        /// the bottom entry (not competitive), any N > 0 if the
+        /// <param name="doc"> Doc that was hit </param>
+        /// <returns> Any N &lt; 0 if the doc's value is sorted after
+        /// the bottom entry (not competitive), any N &gt; 0 if the
         /// doc's value is sorted before the bottom entry and 0 if
         /// they are equal. </returns>
         public abstract override int CompareTop(int doc);
 
         /// <summary>
-        /// this method is called when a new hit is competitive.
+        /// This method is called when a new hit is competitive.
         /// You should copy any state associated with this document
         /// that will be required for future comparisons, into the
         /// specified slot.
         /// </summary>
-        /// <param name="slot"> which slot to copy the hit to </param>
-        /// <param name="doc"> docID relative to current reader </param>
+        /// <param name="slot"> Which slot to copy the hit to </param>
+        /// <param name="doc"> DocID relative to current reader </param>
         public abstract override void Copy(int slot, int doc);
 
         /// <summary>
-        /// Set a new <seealso cref="AtomicReaderContext"/>. All subsequent docIDs are relative to
+        /// Set a new <see cref="AtomicReaderContext"/>. All subsequent docIDs are relative to
         /// the current reader (you must add docBase if you need to
         /// map it to a top-level docID).
         /// </summary>
-        /// <param name="context"> current reader context </param>
-        /// <returns> the comparer to use for this segment; most
+        /// <param name="context"> Current reader context </param>
+        /// <returns> The comparer to use for this segment; most
         ///   comparers can just return "this" to reuse the same
         ///   comparer across segments </returns>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level IO error </exception>
         public abstract override FieldComparer SetNextReader(AtomicReaderContext context);
 
         /// <summary>
         /// Returns -1 if first is less than second.  Default
-        ///  impl to assume the type implements Comparable and
-        ///  invoke .compareTo; be sure to override this method if
-        ///  your FieldComparer's type isn't a Comparable or
-        ///  if your values may sometimes be null
+        /// impl to assume the type implements <see cref="IComparable{T}"/> and
+        /// invoke <see cref="IComparable{T}.CompareTo(T)"/>; be sure to override this method if
+        /// your FieldComparer's type isn't a <see cref="IComparable{T}"/> or
+        /// if your values may sometimes be <c>null</c>
         /// </summary>
         public virtual int CompareValues(T first, T second)
         {
@@ -204,6 +201,13 @@ namespace Lucene.Net.Search
             }
         }
 
+        /// <summary>
+        /// Returns -1 if first is less than second.  Default
+        /// impl to assume the type implements <see cref="IComparable{T}"/> and
+        /// invoke <see cref="IComparable{T}.CompareTo(T)"/>; be sure to override this method if
+        /// your FieldComparer's type isn't a <see cref="IComparable{T}"/> or
+        /// if your values may sometimes be <c>null</c>
+        /// </summary>
         public override int CompareValues(object first, object second)
         {
             return CompareValues((T)first, (T)second);
@@ -221,92 +225,92 @@ namespace Lucene.Net.Search
 
         //Set up abstract methods
         /// <summary>
-        /// Compare hit at slot1 with hit at slot2.
+        /// Compare hit at <paramref name="slot1"/> with hit at <paramref name="slot2"/>.
         /// </summary>
         /// <param name="slot1"> first slot to compare </param>
         /// <param name="slot2"> second slot to compare </param>
-        /// <returns> any N < 0 if slot2's value is sorted after
-        /// slot1, any N > 0 if the slot2's value is sorted before
-        /// slot1 and 0 if they are equal </returns>
+        /// <returns> any N &lt; 0 if <paramref name="slot2"/>'s value is sorted after
+        /// <paramref name="slot1"/>, any N &gt; 0 if the <paramref name="slot2"/>'s value is sorted before
+        /// <paramref name="slot1"/> and 0 if they are equal </returns>
         public abstract int Compare(int slot1, int slot2);
 
         /// <summary>
         /// Set the bottom slot, ie the "weakest" (sorted last)
-        /// entry in the queue.  When <seealso cref="#compareBottom"/> is
-        /// called, you should compare against this slot.  this
-        /// will always be called before <seealso cref="#compareBottom"/>.
+        /// entry in the queue.  When <see cref="CompareBottom(int)"/> is
+        /// called, you should compare against this slot.  This
+        /// will always be called before <see cref="CompareBottom(int)"/>.
         /// </summary>
-        /// <param name="slot"> the currently weakest (sorted last) slot in the queue </param>
+        /// <param name="slot"> The currently weakest (sorted last) slot in the queue </param>
         public abstract void SetBottom(int slot);
 
         /// <summary>
-        /// Record the top value, for future calls to {@link
-        /// #compareTop}.  this is only called for searches that
-        /// use searchAfter (deep paging), and is called before any
-        /// calls to <seealso cref="#setNextReader"/>.
+        /// Record the top value, for future calls to 
+        /// <see cref="CompareTop(int)"/>.  This is only called for searches that
+        /// use SearchAfter (deep paging), and is called before any
+        /// calls to <see cref="SetNextReader(AtomicReaderContext)"/>.
         /// </summary>
         public abstract void SetTopValue(object value);
 
         /// <summary>
-        /// Compare the bottom of the queue with this doc.  this will
-        /// only invoked after setBottom has been called.  this
-        /// should return the same result as {@link
-        /// #compare(int,int)}} as if bottom were slot1 and the new
+        /// Compare the bottom of the queue with this doc.  This will
+        /// only invoked after setBottom has been called.  This
+        /// should return the same result as 
+        /// <see cref="Compare(int, int)"/> as if bottom were slot1 and the new
         /// document were slot 2.
         ///
-        /// <p>For a search that hits many results, this method
+        /// <para>For a search that hits many results, this method
         /// will be the hotspot (invoked by far the most
-        /// frequently).</p>
+        /// frequently).</para>
         /// </summary>
-        /// <param name="doc"> that was hit </param>
-        /// <returns> any N < 0 if the doc's value is sorted after
-        /// the bottom entry (not competitive), any N > 0 if the
+        /// <param name="doc"> Doc that was hit </param>
+        /// <returns> Any N &lt; 0 if the doc's value is sorted after
+        /// the bottom entry (not competitive), any N &gt; 0 if the
         /// doc's value is sorted before the bottom entry and 0 if
         /// they are equal. </returns>
         public abstract int CompareBottom(int doc);
 
         /// <summary>
-        /// Compare the top value with this doc.  this will
-        /// only invoked after setTopValue has been called.  this
-        /// should return the same result as {@link
-        /// #compare(int,int)}} as if topValue were slot1 and the new
-        /// document were slot 2.  this is only called for searches that
-        /// use searchAfter (deep paging).
+        /// Compare the top value with this doc.  This will
+        /// only invoked after <see cref="SetTopValue(object)"/> has been called.  This
+        /// should return the same result as 
+        /// <see cref="Compare(int, int)"/> as if topValue were slot1 and the new
+        /// document were slot 2.  This is only called for searches that
+        /// use SearchAfter (deep paging).
         /// </summary>
-        /// <param name="doc"> that was hit </param>
-        /// <returns> any N < 0 if the doc's value is sorted after
-        /// the bottom entry (not competitive), any N > 0 if the
+        /// <param name="doc"> Doc that was hit </param>
+        /// <returns> Any N &lt; 0 if the doc's value is sorted after
+        /// the bottom entry (not competitive), any N &gt; 0 if the
         /// doc's value is sorted before the bottom entry and 0 if
         /// they are equal. </returns>
         public abstract int CompareTop(int doc);
 
         /// <summary>
-        /// this method is called when a new hit is competitive.
+        /// This method is called when a new hit is competitive.
         /// You should copy any state associated with this document
         /// that will be required for future comparisons, into the
         /// specified slot.
         /// </summary>
-        /// <param name="slot"> which slot to copy the hit to </param>
-        /// <param name="doc"> docID relative to current reader </param>
+        /// <param name="slot"> Which slot to copy the hit to </param>
+        /// <param name="doc"> DocID relative to current reader </param>
         public abstract void Copy(int slot, int doc);
 
         /// <summary>
-        /// Set a new <seealso cref="AtomicReaderContext"/>. All subsequent docIDs are relative to
+        /// Set a new <see cref="AtomicReaderContext"/>. All subsequent docIDs are relative to
         /// the current reader (you must add docBase if you need to
         /// map it to a top-level docID).
         /// </summary>
-        /// <param name="context"> current reader context </param>
-        /// <returns> the comparer to use for this segment; most
+        /// <param name="context"> Current reader context </param>
+        /// <returns> The comparer to use for this segment; most
         ///   comparers can just return "this" to reuse the same
         ///   comparer across segments </returns>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         public abstract FieldComparer SetNextReader(AtomicReaderContext context);
 
         /// <summary>
-        /// Sets the Scorer to use in case a document's score is
-        ///  needed.
+        /// Sets the <see cref="Scorer"/> to use in case a document's score is
+        /// needed.
         /// </summary>
-        /// <param name="scorer"> Scorer instance that you should use to
+        /// <param name="scorer"> <see cref="Scorer"/> instance that you should use to
         /// obtain the current hit's score, if necessary.  </param>
         public virtual void SetScorer(Scorer scorer)
         {
@@ -318,15 +322,13 @@ namespace Lucene.Net.Search
         /// Return the actual value in the slot.
         /// LUCENENET NOTE: This was value(int) in Lucene.
         /// </summary>
-        /// <param name="slot"> the value </param>
-        /// <returns> value in this slot </returns>
+        /// <param name="slot"> The value </param>
+        /// <returns> Value in this slot </returns>
         public abstract IComparable this[int slot] { get; }
 
 
         internal static readonly IComparer<double> SIGNED_ZERO_COMPARER = new SignedZeroComparer();
 
-
-
         /// <summary>
         /// Base FieldComparer class for numeric types
         /// </summary>
@@ -366,8 +368,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as byte (using {@link
-        ///  FieldCache#getBytes} and sorts by ascending value
+        /// Parses field's values as <see cref="byte"/> (using 
+        /// <see cref="IFieldCache.GetBytes(Index.AtomicReader, string, FieldCache.IByteParser, bool)"/> and sorts by ascending value
         /// </summary>
         [Obsolete, CLSCompliant(false)] // LUCENENET NOTE: marking non-CLS compliant because of sbyte - it is obsolete, anyway
 #if FEATURE_SERIALIZABLE
@@ -457,8 +459,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as double (using {@link
-        ///  FieldCache#getDoubles} and sorts by ascending value
+        /// Parses field's values as <see cref="double"/> (using 
+        /// <see cref="IFieldCache.GetDoubles(Index.AtomicReader, string, FieldCache.IDoubleParser, bool)"/> and sorts by ascending value
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -559,8 +561,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as float (using {@link
-        ///  FieldCache#getFloats} and sorts by ascending value
+        /// Parses field's values as <see cref="float"/> (using 
+        /// <see cref="IFieldCache.GetSingles(Index.AtomicReader, string, FieldCache.ISingleParser, bool)"/>  and sorts by ascending value
         /// <para/>
         /// NOTE: This was FloatComparator in Lucene
         /// </summary>
@@ -664,8 +666,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as short (using {@link
-        /// FieldCache#getShorts} and sorts by ascending value
+        /// Parses field's values as <see cref="short"/> (using 
+        /// <see cref="IFieldCache.GetInt16s(Index.AtomicReader, string, FieldCache.IInt16Parser, bool)"/> and sorts by ascending value
         /// <para/>
         /// NOTE: This was ShortComparator in Lucene
         /// </summary>
@@ -758,8 +760,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as int (using {@link
-        /// FieldCache#getInts} and sorts by ascending value
+        /// Parses field's values as <see cref="int"/> (using 
+        /// <see cref="IFieldCache.GetInt32s(Index.AtomicReader, string, FieldCache.IInt32Parser, bool)"/> and sorts by ascending value
         /// <para/>
         /// NOTE: This was IntComparator in Lucene
         /// </summary>
@@ -848,8 +850,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Parses field's values as long (using {@link
-        /// FieldCache#getLongs} and sorts by ascending value
+        /// Parses field's values as <see cref="long"/> (using
+        /// <see cref="IFieldCache.GetInt64s(Index.AtomicReader, string, FieldCache.IInt64Parser, bool)"/> and sorts by ascending value
         /// <para/>
         /// NOTE: This was LongComparator in Lucene
         /// </summary>
@@ -944,11 +946,11 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Sorts by descending relevance.  NOTE: if you are
-        ///  sorting only by descending relevance and then
-        ///  secondarily by ascending docID, performance is faster
-        ///  using <seealso cref="TopScoreDocCollector"/> directly (which {@link
-        ///  IndexSearcher#search} uses when no <seealso cref="Sort"/> is
-        ///  specified).
+        /// sorting only by descending relevance and then
+        /// secondarily by ascending docID, performance is faster
+        /// using <see cref="TopScoreDocCollector"/> directly (which all overloads of
+        /// <see cref="IndexSearcher.Search(Query, int)"/> use when no <see cref="Sort"/> is
+        /// specified).
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -1101,63 +1103,90 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Sorts by field's natural Term sort order, using
-        ///  ordinals.  this is functionally equivalent to {@link
-        ///  Lucene.Net.Search.FieldComparer.TermValComparer}, but it first resolves the string
-        ///  to their relative ordinal positions (using the index
-        ///  returned by <seealso cref="IFieldCache#getTermsIndex"/>), and
-        ///  does most comparisons using the ordinals.  For medium
-        ///  to large results, this comparer will be much faster
-        ///  than <seealso cref="Lucene.Net.Search.FieldComparer.TermValComparer"/>.  For very small
-        ///  result sets it may be slower.
+        /// Sorts by field's natural <see cref="Index.Term"/> sort order, using
+        /// ordinals.  This is functionally equivalent to 
+        /// <see cref="Lucene.Net.Search.FieldComparer.TermValComparer"/>, but it first resolves the string
+        /// to their relative ordinal positions (using the index
+        /// returned by <see cref="IFieldCache.GetTermsIndex(Index.AtomicReader, string, float)"/>), and
+        /// does most comparisons using the ordinals.  For medium
+        /// to large results, this comparer will be much faster
+        /// than <see cref="Lucene.Net.Search.FieldComparer.TermValComparer"/>.  For very small
+        /// result sets it may be slower.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public class TermOrdValComparer : FieldComparer<BytesRef>
         {
-            /* Ords for each slot.
-	            @lucene.internal */
+            /// <summary>
+            /// Ords for each slot.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal readonly int[] ords;
 
-            /* Values for each slot.
-	            @lucene.internal */
+            /// <summary>
+            /// Values for each slot.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal readonly BytesRef[] values;
 
-            /* Which reader last copied a value into the slot. When
-	            we compare two slots, we just compare-by-ord if the
-	            readerGen is the same; else we must compare the
-	            values (slower).
-	            @lucene.internal */
+            /// <summary>
+            /// Which reader last copied a value into the slot. When
+            /// we compare two slots, we just compare-by-ord if the
+            /// readerGen is the same; else we must compare the
+            /// values(slower).
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal readonly int[] readerGen;
 
-            /* Gen of current reader we are on.
-	            @lucene.internal */
+            /// <summary>
+            /// Gen of current reader we are on.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal int currentReaderGen = -1;
 
-            /* Current reader's doc ord/values.
-	            @lucene.internal */
+            /// <summary>
+            /// Current reader's doc ord/values.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal SortedDocValues termsIndex;
 
             internal readonly string field;
 
-            /* Bottom slot, or -1 if queue isn't full yet
-	            @lucene.internal */
+            /// <summary>
+            /// Bottom slot, or -1 if queue isn't full yet
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal int bottomSlot = -1;
 
-            /* Bottom ord (same as ords[bottomSlot] once bottomSlot
-	            is set).  Cached for faster compares.
-	            @lucene.internal */
+            /// <summary>
+            /// Bottom ord (same as ords[bottomSlot] once bottomSlot
+            /// is set).  Cached for faster compares.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal int bottomOrd;
 
-            /* True if current bottom slot matches the current
-	            reader.
-	            @lucene.internal */
+            /// <summary>
+            /// True if current bottom slot matches the current
+            /// reader.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal bool bottomSameReader;
 
-            /* Bottom value (same as values[bottomSlot] once
-	            bottomSlot is set).  Cached for faster compares.
-	            @lucene.internal */
+            /// <summary>
+            /// Bottom value (same as values[bottomSlot] once
+            /// bottomSlot is set).  Cached for faster compares.
+            /// <para/>
+            /// @lucene.internal
+            /// </summary>
             internal BytesRef bottomValue;
 
             /// <summary>
@@ -1188,8 +1217,8 @@ namespace Lucene.Net.Search
 
             /// <summary>
             /// Creates this, with control over how missing values
-            ///  are sorted.  Pass sortMissingLast=true to put
-            ///  missing values at the end.
+            /// are sorted.  Pass true for <paramref name="sortMissingLast"/> to put
+            /// missing values at the end.
             /// </summary>
             public TermOrdValComparer(int numHits, string field, bool sortMissingLast)
             {
@@ -1281,7 +1310,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Retrieves the SortedDocValues for the field in this segment </summary>
+            /// Retrieves the <see cref="SortedDocValues"/> for the field in this segment </summary>
             protected virtual SortedDocValues GetSortedDocValues(AtomicReaderContext context, string field)
             {
                 return FieldCache.DEFAULT.GetTermsIndex((context.AtomicReader), field);
@@ -1422,10 +1451,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Sorts by field's natural Term sort order.  All
-        ///  comparisons are done using BytesRef.compareTo, which is
-        ///  slow for medium to large result sets but possibly
-        ///  very fast for very small results sets.
+        /// Sorts by field's natural <see cref="Index.Term"/> sort order.  All
+        /// comparisons are done using <see cref="BytesRef.CompareTo(BytesRef)"/>, which is
+        /// slow for medium to large result sets but possibly
+        /// very fast for very small results sets.
         /// </summary>
         // TODO: should we remove this?  who really uses it?
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldComparatorSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldComparatorSource.cs b/src/Lucene.Net/Search/FieldComparatorSource.cs
index a4b864b..2fb448b 100644
--- a/src/Lucene.Net/Search/FieldComparatorSource.cs
+++ b/src/Lucene.Net/Search/FieldComparatorSource.cs
@@ -20,10 +20,9 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Provides a <seealso cref="FieldComparer"/> for custom field sorting.
-    ///
+    /// Provides a <see cref="FieldComparer"/> for custom field sorting.
+    /// <para/>
     /// @lucene.experimental
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -35,8 +34,8 @@ namespace Lucene.Net.Search
         /// </summary>
         /// <param name="fieldname">
         ///          Name of the field to create comparer for. </param>
-        /// <returns> FieldComparer. </returns>
-        /// <exception cref="IOException">
+        /// <returns> <see cref="FieldComparer"/>. </returns>
+        /// <exception cref="System.IO.IOException">
         ///           If an error occurs reading the index. </exception>
         public abstract FieldComparer NewComparer(string fieldname, int numHits, int sortPos, bool reversed);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldDoc.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldDoc.cs b/src/Lucene.Net/Search/FieldDoc.cs
index d4c3f55..9b6b7e3 100644
--- a/src/Lucene.Net/Search/FieldDoc.cs
+++ b/src/Lucene.Net/Search/FieldDoc.cs
@@ -23,23 +23,23 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Expert: A ScoreDoc which also contains information about
+    /// Expert: A <see cref="ScoreDoc"/> which also contains information about
     /// how to sort the referenced document.  In addition to the
     /// document number and score, this object contains an array
     /// of values for the document from the field(s) used to sort.
     /// For example, if the sort criteria was to sort by fields
-    /// "a", "b" then "c", the <code>fields</code> object array
+    /// "a", "b" then "c", the <c>fields</c> object array
     /// will have three elements, corresponding respectively to
     /// the term values for the document in fields "a", "b" and "c".
     /// The class of each element in the array will be either
-    /// Integer, Float or String depending on the type of values
+    /// <see cref="int"/>, <see cref="float"/> or <see cref="string"/> depending on the type of values
     /// in the terms of each field.
     ///
-    /// <p>Created: Feb 11, 2004 1:23:38 PM
-    ///
+    /// <para/>Created: Feb 11, 2004 1:23:38 PM
+    /// <para/>
     /// @since   lucene 1.4 </summary>
-    /// <seealso cref= ScoreDoc </seealso>
-    /// <seealso cref= TopFieldDocs </seealso>
+    /// <seealso cref="ScoreDoc"/>
+    /// <seealso cref="TopFieldDocs"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -48,11 +48,11 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Expert: The values which are used to sort the referenced document.
         /// The order of these will match the original sort criteria given by a
-        /// Sort object.  Each Object will have been returned from
-        /// the <code>value</code> method corresponding
+        /// <see cref="Sort"/> object.  Each Object will have been returned from
+        /// the <see cref="FieldComparer.this[int]"/> method corresponding
         /// FieldComparer used to sort this field. </summary>
-        /// <seealso cref= Sort </seealso>
-        /// <seealso cref= IndexSearcher#search(Query,Filter,int,Sort) </seealso>
+        /// <seealso cref="Sort"/>
+        /// <seealso cref="IndexSearcher.Search(Query,Filter,int,Sort)"/>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
         public object[] Fields
@@ -85,7 +85,9 @@ namespace Lucene.Net.Search
             this.fields = fields;
         }
 
-        // A convenience method for debugging.
+        /// <summary>
+        /// A convenience method for debugging.
+        /// </summary>
         public override string ToString()
         {
             // super.toString returns the doc and score information, so just add the

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldValueFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldValueFilter.cs b/src/Lucene.Net/Search/FieldValueFilter.cs
index 906720a..90abf55 100644
--- a/src/Lucene.Net/Search/FieldValueFilter.cs
+++ b/src/Lucene.Net/Search/FieldValueFilter.cs
@@ -26,9 +26,9 @@ namespace Lucene.Net.Search
     using MatchNoBits = Lucene.Net.Util.Bits.MatchNoBits;
 
     /// <summary>
-    /// A <seealso cref="Filter"/> that accepts all documents that have one or more values in a
-    /// given field. this <seealso cref="Filter"/> request <seealso cref="IBits"/> from the
-    /// <seealso cref="IFieldCache"/> and build the bits if not present.
+    /// A <see cref="Filter"/> that accepts all documents that have one or more values in a
+    /// given field. this <see cref="Filter"/> request <see cref="IBits"/> from the
+    /// <see cref="IFieldCache"/> and build the bits if not present.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -39,22 +39,22 @@ namespace Lucene.Net.Search
         private readonly bool negate;
 
         /// <summary>
-        /// Creates a new <seealso cref="FieldValueFilter"/>
+        /// Creates a new <see cref="FieldValueFilter"/>
         /// </summary>
         /// <param name="field">
-        ///          the field to filter </param>
+        ///          The field to filter </param>
         public FieldValueFilter(string field)
             : this(field, false)
         {
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="FieldValueFilter"/>
+        /// Creates a new <see cref="FieldValueFilter"/>
         /// </summary>
         /// <param name="field">
-        ///          the field to filter </param>
+        ///          The field to filter </param>
         /// <param name="negate">
-        ///          iff <code>true</code> all documents with no value in the given
+        ///          If <c>true</c> all documents with no value in the given
         ///          field are accepted.
         ///  </param>
         public FieldValueFilter(string field, bool negate)
@@ -65,15 +65,15 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the field this filter is applied on. </summary>
-        /// <returns> the field this filter is applied on. </returns>
+        /// <returns> The field this filter is applied on. </returns>
         public virtual string Field
         {
             get { return field; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> iff this filter is negated, otherwise <code>false</code> </summary>
-        /// <returns> <code>true</code> iff this filter is negated, otherwise <code>false</code> </returns>
+        /// Returns <c>true</c> if this filter is negated, otherwise <c>false</c> </summary>
+        /// <returns> <c>true</c> if this filter is negated, otherwise <c>false</c> </returns>
         public virtual bool Negate
         {
             get { return negate; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FieldValueHitQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FieldValueHitQueue.cs b/src/Lucene.Net/Search/FieldValueHitQueue.cs
index 439a755..b0d29ac 100644
--- a/src/Lucene.Net/Search/FieldValueHitQueue.cs
+++ b/src/Lucene.Net/Search/FieldValueHitQueue.cs
@@ -135,17 +135,15 @@ namespace Lucene.Net.Search
         }
 
         /// <summary> Creates a hit queue sorted by the given list of fields.
-        ///
-        /// <p/><b>NOTE</b>: The instances returned by this method
+        /// <para/><b>NOTE</b>: The instances returned by this method
         /// pre-allocate a full array of length <c>numHits</c>.
-        ///
         /// </summary>
-        /// <param name="fields">SortField array we are sorting by in priority order (highest
+        /// <param name="fields"><see cref="SortField"/> array we are sorting by in priority order (highest
         /// priority first); cannot be <c>null</c> or empty
         /// </param>
         /// <param name="size">The number of hits to retain. Must be greater than zero.
         /// </param>
-        /// <throws>  IOException </throws>
+        /// <exception cref="System.IO.IOException">If there is a low-level IO error</exception>
         public static FieldValueHitQueue<T> Create<T>(SortField[] fields, int size)
             where T : FieldValueHitQueue.Entry
         {
@@ -167,13 +165,13 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Expert: A hit queue for sorting by hits by terms in more than one field.
-    /// Uses <code>FieldCache.DEFAULT</code> for maintaining
+    /// Uses <c>FieldCache.DEFAULT</c> for maintaining
     /// internal term lookup tables.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// @since 2.9 </summary>
-    /// <seealso cref= IndexSearcher#search(Query,Filter,int,Sort) </seealso>
-    /// <seealso cref= FieldCache </seealso>
+    /// <seealso cref="IndexSearcher.Search(Query,Filter,int,Sort)"/>
+    /// <seealso cref="FieldCache"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -237,15 +235,15 @@ namespace Lucene.Net.Search
         //public abstract bool LessThan(FieldValueHitQueue.Entry a, FieldValueHitQueue.Entry b);
 
         /// <summary>
-        /// Given a queue Entry, creates a corresponding FieldDoc
+        /// Given a queue <see cref="FieldValueHitQueue.Entry"/>, creates a corresponding <see cref="FieldDoc"/>
         /// that contains the values used to sort the given document.
         /// These values are not the raw values out of the index, but the internal
-        /// representation of them. this is so the given search hit can be collated by
+        /// representation of them. This is so the given search hit can be collated by
         /// a MultiSearcher with other search hits.
         /// </summary>
-        /// <param name="entry"> The Entry used to create a FieldDoc </param>
-        /// <returns> The newly created FieldDoc </returns>
-        /// <seealso cref= IndexSearcher#search(Query,Filter,int,Sort) </seealso>
+        /// <param name="entry"> The <see cref="FieldValueHitQueue.Entry"/> used to create a <see cref="FieldDoc"/> </param>
+        /// <returns> The newly created <see cref="FieldDoc"/> </returns>
+        /// <seealso cref="IndexSearcher.Search(Query,Filter,int,Sort)"/>
         internal virtual FieldDoc FillFields(FieldValueHitQueue.Entry entry)
         {
             int n = m_comparers.Length;
@@ -259,7 +257,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the SortFields being used by this hit queue. </summary>
+        /// Returns the <see cref="SortField"/>s being used by this hit queue. </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
         internal virtual SortField[] Fields

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Filter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Filter.cs b/src/Lucene.Net/Search/Filter.cs
index b2d7aa6..ad0d726 100644
--- a/src/Lucene.Net/Search/Filter.cs
+++ b/src/Lucene.Net/Search/Filter.cs
@@ -24,40 +24,40 @@ namespace Lucene.Net.Search
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    ///  Abstract base class for restricting which documents may
-    ///  be returned during searching.
+    /// Abstract base class for restricting which documents may
+    /// be returned during searching.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
-    public abstract class Filter
+    public abstract class Filter // LUCENENET TODO: API - Make static NewAnonymous() factory method with delegate method for GetDocIdSet()
     {
         /// <summary>
-        /// Creates a <seealso cref="DocIdSet"/> enumerating the documents that should be
-        /// permitted in search results. <b>NOTE:</b> null can be
-        /// returned if no documents are accepted by this Filter.
-        /// <p>
+        /// Creates a <see cref="DocIdSet"/> enumerating the documents that should be
+        /// permitted in search results. <b>NOTE:</b> <c>null</c> can be
+        /// returned if no documents are accepted by this <see cref="Filter"/>.
+        /// <para/>
         /// Note: this method will be called once per segment in
-        /// the index during searching.  The returned <seealso cref="DocIdSet"/>
+        /// the index during searching.  The returned <see cref="DocIdSet"/>
         /// must refer to document IDs for that segment, not for
         /// the top-level reader.
         /// </summary>
-        /// <param name="context"> a <seealso cref="AtomicReaderContext"/> instance opened on the index currently
+        /// <param name="context"> a <see cref="AtomicReaderContext"/> instance opened on the index currently
         ///         searched on. Note, it is likely that the provided reader info does not
         ///         represent the whole underlying index i.e. if the index has more than
         ///         one segment the given reader only represents a single segment.
         ///         The provided context is always an atomic context, so you can call
-        ///         <seealso cref="AtomicReader#fields()"/>
+        ///         <see cref="AtomicReader.Fields"/>
         ///         on the context's reader, for example.
         /// </param>
         /// <param name="acceptDocs">
-        ///          Bits that represent the allowable docs to match (typically deleted docs
+        ///          <see cref="IBits"/> that represent the allowable docs to match (typically deleted docs
         ///          but possibly filtering other documents)
         /// </param>
-        /// <returns> a DocIdSet that provides the documents which should be permitted or
-        ///         prohibited in search results. <b>NOTE:</b> <code>null</code> should be returned if
+        /// <returns> A <see cref="DocIdSet"/> that provides the documents which should be permitted or
+        ///         prohibited in search results. <b>NOTE:</b> <c>null</c> should be returned if
         ///         the filter doesn't accept any documents otherwise internal optimization might not apply
-        ///         in the case an <i>empty</i> <seealso cref="DocIdSet"/> is returned. </returns>
+        ///         in the case an <i>empty</i> <see cref="DocIdSet"/> is returned. </returns>
         public abstract DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FilteredDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FilteredDocIdSet.cs b/src/Lucene.Net/Search/FilteredDocIdSet.cs
index a528232..53e1a98 100644
--- a/src/Lucene.Net/Search/FilteredDocIdSet.cs
+++ b/src/Lucene.Net/Search/FilteredDocIdSet.cs
@@ -22,22 +22,22 @@ namespace Lucene.Net.Search
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    /// Abstract decorator class for a DocIdSet implementation
+    /// Abstract decorator class for a <see cref="DocIdSet"/> implementation
     /// that provides on-demand filtering/validation
-    /// mechanism on a given DocIdSet.
+    /// mechanism on a given <see cref="DocIdSet"/>.
     ///
-    /// <p/>
+    /// <para/>
     ///
     /// Technically, this same functionality could be achieved
     /// with ChainedFilter (under queries/), however the
     /// benefit of this class is it never materializes the full
-    /// bitset for the filter.  Instead, the <seealso cref="#match"/>
+    /// bitset for the filter.  Instead, the <see cref="Match(int)"/>
     /// method is invoked on-demand, per docID visited during
     /// searching.  If you know few docIDs will be visited, and
-    /// the logic behind <seealso cref="#match"/> is relatively costly,
+    /// the logic behind <see cref="Match(int)"/> is relatively costly,
     /// this may be a better way to filter than ChainedFilter.
     /// </summary>
-    /// <seealso cref= DocIdSet </seealso>
+    /// <seealso cref="DocIdSet"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -47,14 +47,14 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructor. </summary>
-        /// <param name="innerSet"> Underlying DocIdSet </param>
+        /// <param name="innerSet"> Underlying <see cref="DocIdSet"/> </param>
         public FilteredDocIdSet(DocIdSet innerSet)
         {
             this.innerSet = innerSet;
         }
 
         /// <summary>
-        /// this DocIdSet implementation is cacheable if the inner set is cacheable. </summary>
+        /// This <see cref="DocIdSet"/> implementation is cacheable if the inner set is cacheable. </summary>
         public override bool IsCacheable
         {
             get
@@ -101,13 +101,13 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Validation method to determine whether a docid should be in the result set. </summary>
         /// <param name="docid"> docid to be tested </param>
-        /// <returns> true if input docid should be in the result set, false otherwise. </returns>
+        /// <returns> <c>true</c> if input docid should be in the result set, false otherwise. </returns>
         protected abstract bool Match(int docid);
 
         /// <summary>
-        /// Implementation of the contract to build a DocIdSetIterator. </summary>
-        /// <seealso cref= DocIdSetIterator </seealso>
-        /// <seealso cref= FilteredDocIdSetIterator </seealso>
+        /// Implementation of the contract to build a <see cref="DocIdSetIterator"/>. </summary>
+        /// <seealso cref="DocIdSetIterator"/>
+        /// <seealso cref="FilteredDocIdSetIterator"/>
         public override DocIdSetIterator GetIterator()
         {
             DocIdSetIterator iterator = innerSet.GetIterator();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FilteredDocIdSetIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FilteredDocIdSetIterator.cs b/src/Lucene.Net/Search/FilteredDocIdSetIterator.cs
index 8f4881b..eb2f2bf 100644
--- a/src/Lucene.Net/Search/FilteredDocIdSetIterator.cs
+++ b/src/Lucene.Net/Search/FilteredDocIdSetIterator.cs
@@ -20,10 +20,10 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Abstract decorator class of a DocIdSetIterator
+    /// Abstract decorator class of a <see cref="DocIdSetIterator"/>
     /// implementation that provides on-demand filter/validation
-    /// mechanism on an underlying DocIdSetIterator.  See {@link
-    /// FilteredDocIdSet}.
+    /// mechanism on an underlying <see cref="DocIdSetIterator"/>.  See 
+    /// <see cref="DocIdSetIterator"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -35,7 +35,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructor. </summary>
-        /// <param name="innerIter"> Underlying DocIdSetIterator. </param>
+        /// <param name="innerIter"> Underlying <see cref="DocIdSetIterator"/>. </param>
         public FilteredDocIdSetIterator(DocIdSetIterator innerIter)
         {
             if (innerIter == null)
@@ -49,8 +49,8 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Validation method to determine whether a docid should be in the result set. </summary>
         /// <param name="doc"> docid to be tested </param>
-        /// <returns> true if input docid should be in the result set, false otherwise. </returns>
-        /// <seealso cref= #FilteredDocIdSetIterator(DocIdSetIterator) </seealso>
+        /// <returns> <c>true</c> if input docid should be in the result set, <c>false</c> otherwise. </returns>
+        /// <seealso cref="FilteredDocIdSetIterator(DocIdSetIterator)"/>
         protected abstract bool Match(int doc);
 
         public override int DocID

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FilteredQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FilteredQuery.cs b/src/Lucene.Net/Search/FilteredQuery.cs
index 654cc72..ebae4de 100644
--- a/src/Lucene.Net/Search/FilteredQuery.cs
+++ b/src/Lucene.Net/Search/FilteredQuery.cs
@@ -31,11 +31,12 @@ namespace Lucene.Net.Search
     /// <summary>
     /// A query that applies a filter to the results of another query.
     ///
-    /// <p>Note: the bits are retrieved from the filter each time this
-    /// query is used in a search - use a CachingWrapperFilter to avoid
+    /// <para/>Note: the bits are retrieved from the filter each time this
+    /// query is used in a search - use a <see cref="CachingWrapperFilter"/> to avoid
     /// regenerating the bits every time.
+    /// <para/>
     /// @since   1.4 </summary>
-    /// <seealso cref=     CachingWrapperFilter </seealso>
+    /// <seealso cref="CachingWrapperFilter"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -47,9 +48,9 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructs a new query which applies a filter to the results of the original query.
-        /// <seealso cref="Filter#getDocIdSet"/> will be called every time this query is used in a search. </summary>
-        /// <param name="query">  Query to be filtered, cannot be <code>null</code>. </param>
-        /// <param name="filter"> Filter to apply to query results, cannot be <code>null</code>. </param>
+        /// <see cref="Filter.GetDocIdSet(AtomicReaderContext, IBits)"/> will be called every time this query is used in a search. </summary>
+        /// <param name="query">  Query to be filtered, cannot be <c>null</c>. </param>
+        /// <param name="filter"> Filter to apply to query results, cannot be <c>null</c>. </param>
         public FilteredQuery(Query query, Filter filter)
             : this(query, filter, RANDOM_ACCESS_FILTER_STRATEGY)
         {
@@ -57,12 +58,12 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: Constructs a new query which applies a filter to the results of the original query.
-        /// <seealso cref="Filter#getDocIdSet"/> will be called every time this query is used in a search. </summary>
-        /// <param name="query">  Query to be filtered, cannot be <code>null</code>. </param>
-        /// <param name="filter"> Filter to apply to query results, cannot be <code>null</code>. </param>
-        /// <param name="strategy"> a filter strategy used to create a filtered scorer.
+        /// <see cref="Filter.GetDocIdSet(AtomicReaderContext, IBits)"/> will be called every time this query is used in a search. </summary>
+        /// <param name="query">  Query to be filtered, cannot be <c>null</c>. </param>
+        /// <param name="filter"> Filter to apply to query results, cannot be <c>null</c>. </param>
+        /// <param name="strategy"> A filter strategy used to create a filtered scorer.
         /// </param>
-        /// <seealso cref= FilterStrategy </seealso>
+        /// <seealso cref="FilterStrategy"/>
         public FilteredQuery(Query query, Filter filter, FilterStrategy strategy)
         {
             if (query == null || filter == null)
@@ -79,8 +80,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns a Weight that applies the filter to the enclosed query's Weight.
-        /// this is accomplished by overriding the Scorer returned by the Weight.
+        /// Returns a <see cref="Weight"/> that applies the filter to the enclosed query's <see cref="Weight"/>.
+        /// this is accomplished by overriding the <see cref="Scorer"/> returned by the <see cref="Weight"/>.
         /// </summary>
         public override Weight CreateWeight(IndexSearcher searcher)
         {
@@ -181,8 +182,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A scorer that consults the filter iff a document was matched by the
-        /// delegate scorer. this is useful if the filter computation is more expensive
+        /// A scorer that consults the filter if a document was matched by the
+        /// delegate scorer. This is useful if the filter computation is more expensive
         /// than document scoring or if the filter has a linear running time to compute
         /// the next matching doc like exact geo distances.
         /// </summary>
@@ -299,7 +300,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A Scorer that uses a "leap-frog" approach (also called "zig-zag join"). The scorer and the filter
+        /// A <see cref="Scorer"/> that uses a "leap-frog" approach (also called "zig-zag join"). The scorer and the filter
         /// take turns trying to advance to each other's next matching document, often
         /// jumping past the target document. When both land on the same document, it's
         /// collected.
@@ -418,8 +419,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Rewrites the query. If the wrapped is an instance of
-        /// <seealso cref="MatchAllDocsQuery"/> it returns a <seealso cref="ConstantScoreQuery"/>. Otherwise
-        /// it returns a new {@code FilteredQuery} wrapping the rewritten query.
+        /// <see cref="MatchAllDocsQuery"/> it returns a <see cref="ConstantScoreQuery"/>. Otherwise
+        /// it returns a new <see cref="FilteredQuery"/> wrapping the rewritten query.
         /// </summary>
         public override Query Rewrite(IndexReader reader)
         {
@@ -440,7 +441,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns this FilteredQuery's (unfiltered) Query </summary>
+        /// Returns this <see cref="FilteredQuery"/>'s (unfiltered) <see cref="Query"/> </summary>
         public Query Query
         {
             get
@@ -450,7 +451,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns this FilteredQuery's filter </summary>
+        /// Returns this <see cref="FilteredQuery"/>'s filter </summary>
         public Filter Filter
         {
             get
@@ -460,7 +461,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns this FilteredQuery's <seealso cref="FilterStrategy"/> </summary>
+        /// Returns this <see cref="FilteredQuery"/>'s <seealso cref="FilterStrategy"/> </summary>
         public virtual FilterStrategy Strategy
         {
             get
@@ -469,7 +470,11 @@ namespace Lucene.Net.Search
             }
         }
 
-        // inherit javadoc
+        /// <summary>
+        /// Expert: adds all terms occurring in this query to the terms set. Only
+        /// works if this query is in its rewritten (<see cref="Rewrite(IndexReader)"/>) form.
+        /// </summary>
+        /// <exception cref="InvalidOperationException"> If this query is not yet rewritten </exception>
         public override void ExtractTerms(ISet<Term> terms)
         {
             Query.ExtractTerms(terms);
@@ -489,7 +494,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns true if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (o == this)
@@ -517,16 +522,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A <seealso cref="FilterStrategy"/> that conditionally uses a random access filter if
-        /// the given <seealso cref="DocIdSet"/> supports random access (returns a non-null value
-        /// from <seealso cref="DocIdSet#bits()"/>) and
-        /// <seealso cref="RandomAccessFilterStrategy#useRandomAccess(Bits, int)"/> returns
-        /// <code>true</code>. Otherwise this strategy falls back to a "zig-zag join" (
-        /// <seealso cref="FilteredQuery#LEAP_FROG_FILTER_FIRST_STRATEGY"/>) strategy.
+        /// A <see cref="FilterStrategy"/> that conditionally uses a random access filter if
+        /// the given <see cref="DocIdSet"/> supports random access (returns a non-null value
+        /// from <see cref="DocIdSet.Bits"/>) and
+        /// <see cref="RandomAccessFilterStrategy.UseRandomAccess(IBits, int)"/> returns
+        /// <c>true</c>. Otherwise this strategy falls back to a "zig-zag join" (
+        /// <see cref="FilteredQuery.LEAP_FROG_FILTER_FIRST_STRATEGY"/>) strategy.
         ///
-        /// <p>
-        /// Note: this strategy is the default strategy in <seealso cref="FilteredQuery"/>
-        /// </p>
+        /// <para>
+        /// Note: this strategy is the default strategy in <see cref="FilteredQuery"/>
+        /// </para>
         /// </summary>
         public static readonly FilterStrategy RANDOM_ACCESS_FILTER_STRATEGY = new RandomAccessFilterStrategy();
 
@@ -536,9 +541,9 @@ namespace Lucene.Net.Search
         /// take turns trying to advance to each other's next matching document, often
         /// jumping past the target document. When both land on the same document, it's
         /// collected.
-        /// <p>
+        /// <para>
         /// Note: this strategy uses the filter to lead the iteration.
-        /// </p>
+        /// </para>
         /// </summary>
         public static readonly FilterStrategy LEAP_FROG_FILTER_FIRST_STRATEGY = new LeapFrogFilterStrategy(false);
 
@@ -548,55 +553,56 @@ namespace Lucene.Net.Search
         /// take turns trying to advance to each other's next matching document, often
         /// jumping past the target document. When both land on the same document, it's
         /// collected.
-        /// <p>
+        /// <para>
         /// Note: this strategy uses the query to lead the iteration.
-        /// </p>
+        /// </para>
         /// </summary>
         public static readonly FilterStrategy LEAP_FROG_QUERY_FIRST_STRATEGY = new LeapFrogFilterStrategy(true);
 
         /// <summary>
-        /// A filter strategy that advances the Query or rather its <seealso cref="Scorer"/> first and consults the
-        /// filter <seealso cref="DocIdSet"/> for each matched document.
-        /// <p>
-        /// Note: this strategy requires a <seealso cref="DocIdSet#bits()"/> to return a non-null value. Otherwise
-        /// this strategy falls back to <seealso cref="FilteredQuery#LEAP_FROG_QUERY_FIRST_STRATEGY"/>
-        /// </p>
-        /// <p>
+        /// A filter strategy that advances the <see cref="Search.Query"/> or rather its <see cref="Scorer"/> first and consults the
+        /// filter <see cref="DocIdSet"/> for each matched document.
+        /// <para>
+        /// Note: this strategy requires a <see cref="DocIdSet.Bits"/> to return a non-null value. Otherwise
+        /// this strategy falls back to <see cref="FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY"/>
+        /// </para>
+        /// <para>
         /// Use this strategy if the filter computation is more expensive than document
         /// scoring or if the filter has a linear running time to compute the next
         /// matching doc like exact geo distances.
-        /// </p>
+        /// </para>
         /// </summary>
         public static readonly FilterStrategy QUERY_FIRST_FILTER_STRATEGY = new QueryFirstFilterStrategy();
 
         /// <summary>
-        /// Abstract class that defines how the filter (<seealso cref="DocIdSet"/>) applied during document collection. </summary>
+        /// Abstract class that defines how the filter (<see cref="DocIdSet"/>) applied during document collection. </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public abstract class FilterStrategy
         {
             /// <summary>
-            /// Returns a filtered <seealso cref="Scorer"/> based on this strategy.
+            /// Returns a filtered <see cref="Scorer"/> based on this strategy.
             /// </summary>
             /// <param name="context">
-            ///          the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param>
-            /// <param name="weight"> the <seealso cref="FilteredQuery"/> <seealso cref="Weight"/> to create the filtered scorer. </param>
-            /// <param name="docIdSet"> the filter <seealso cref="DocIdSet"/> to apply </param>
+            ///          the <see cref="AtomicReaderContext"/> for which to return the <see cref="Scorer"/>. </param>
+            /// <param name="weight"> the <see cref="FilteredQuery"/> <see cref="Weight"/> to create the filtered scorer. </param>
+            /// <param name="docIdSet"> the filter <see cref="DocIdSet"/> to apply </param>
             /// <returns> a filtered scorer
             /// </returns>
-            /// <exception cref="IOException"> if an <seealso cref="IOException"/> occurs </exception>
+            /// <exception cref="System.IO.IOException"> if an <see cref="System.IO.IOException"/> occurs </exception>
             public abstract Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet);
 
             /// <summary>
-            /// Returns a filtered <seealso cref="BulkScorer"/> based on this
+            /// Returns a filtered <see cref="BulkScorer"/> based on this
             /// strategy.  this is an optional method: the default
-            /// implementation just calls <seealso cref="#filteredScorer"/> and
-            /// wraps that into a BulkScorer.
+            /// implementation just calls <see cref="FilteredScorer(AtomicReaderContext, Weight, DocIdSet)"/> and
+            /// wraps that into a <see cref="BulkScorer"/>.
             /// </summary>
             /// <param name="context">
             ///          the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param>
             /// <param name="weight"> the <seealso cref="FilteredQuery"/> <seealso cref="Weight"/> to create the filtered scorer. </param>
+            /// <param name="scoreDocsInOrder"> <c>true</c> to score docs in order </param>
             /// <param name="docIdSet"> the filter <seealso cref="DocIdSet"/> to apply </param>
             /// <returns> a filtered top scorer </returns>
             public virtual BulkScorer FilteredBulkScorer(AtomicReaderContext context, Weight weight, bool scoreDocsInOrder, DocIdSet docIdSet)
@@ -613,12 +619,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A <seealso cref="FilterStrategy"/> that conditionally uses a random access filter if
-        /// the given <seealso cref="DocIdSet"/> supports random access (returns a non-null value
-        /// from <seealso cref="DocIdSet#bits()"/>) and
-        /// <seealso cref="RandomAccessFilterStrategy#useRandomAccess(Bits, int)"/> returns
+        /// A <see cref="FilterStrategy"/> that conditionally uses a random access filter if
+        /// the given <see cref="DocIdSet"/> supports random access (returns a non-null value
+        /// from <see cref="DocIdSet.Bits"/>) and
+        /// <see cref="RandomAccessFilterStrategy.UseRandomAccess(IBits, int)"/> returns
         /// <code>true</code>. Otherwise this strategy falls back to a "zig-zag join" (
-        /// <seealso cref="FilteredQuery#LEAP_FROG_FILTER_FIRST_STRATEGY"/>) strategy .
+        /// <see cref="FilteredQuery.LEAP_FROG_FILTER_FIRST_STRATEGY"/>) strategy .
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -661,14 +667,14 @@ namespace Lucene.Net.Search
 
             /// <summary>
             /// Expert: decides if a filter should be executed as "random-access" or not.
-            /// random-access means the filter "filters" in a similar way as deleted docs are filtered
-            /// in Lucene. this is faster when the filter accepts many documents.
+            /// Random-access means the filter "filters" in a similar way as deleted docs are filtered
+            /// in Lucene. This is faster when the filter accepts many documents.
             /// However, when the filter is very sparse, it can be faster to execute the query+filter
             /// as a conjunction in some cases.
-            ///
-            /// The default implementation returns <code>true</code> if the first document accepted by the
-            /// filter is < 100.
-            ///
+            /// <para/>
+            /// The default implementation returns <c>true</c> if the first document accepted by the
+            /// filter is &lt; 100.
+            /// <para/>
             /// @lucene.internal
             /// </summary>
             protected virtual bool UseRandomAccess(IBits bits, int firstFilterDoc)
@@ -717,17 +723,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A filter strategy that advances the <seealso cref="Scorer"/> first and consults the
-        /// <seealso cref="DocIdSet"/> for each matched document.
-        /// <p>
-        /// Note: this strategy requires a <seealso cref="DocIdSet#bits()"/> to return a non-null value. Otherwise
-        /// this strategy falls back to <seealso cref="FilteredQuery#LEAP_FROG_QUERY_FIRST_STRATEGY"/>
-        /// </p>
-        /// <p>
+        /// A filter strategy that advances the <see cref="Scorer"/> first and consults the
+        /// <see cref="DocIdSet"/> for each matched document.
+        /// <para>
+        /// Note: this strategy requires a <see cref="DocIdSet.Bits"/> to return a non-null value. Otherwise
+        /// this strategy falls back to <see cref="FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY"/>
+        /// </para>
+        /// <para>
         /// Use this strategy if the filter computation is more expensive than document
         /// scoring or if the filter has a linear running time to compute the next
         /// matching doc like exact geo distances.
-        /// </p>
+        /// </para>
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FuzzyQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FuzzyQuery.cs b/src/Lucene.Net/Search/FuzzyQuery.cs
index 9da7082..fa11a18 100644
--- a/src/Lucene.Net/Search/FuzzyQuery.cs
+++ b/src/Lucene.Net/Search/FuzzyQuery.cs
@@ -31,26 +31,26 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Implements the fuzzy search query. The similarity measurement
     /// is based on the Damerau-Levenshtein (optimal string alignment) algorithm,
-    /// though you can explicitly choose classic Levenshtein by passing <code>false</code>
-    /// to the <code>transpositions</code> parameter.
+    /// though you can explicitly choose classic Levenshtein by passing <c>false</c>
+    /// to the <c>transpositions</c> parameter.
     ///
-    /// <p>this query uses <seealso cref="MultiTermQuery.TopTermsScoringBooleanQueryRewrite"/>
+    /// <para/>this query uses <see cref="MultiTermQuery.TopTermsScoringBooleanQueryRewrite"/>
     /// as default. So terms will be collected and scored according to their
-    /// edit distance. Only the top terms are used for building the <seealso cref="BooleanQuery"/>.
+    /// edit distance. Only the top terms are used for building the <see cref="BooleanQuery"/>.
     /// It is not recommended to change the rewrite mode for fuzzy queries.
     ///
-    /// <p>At most, this query will match terms up to
-    /// {@value Lucene.Net.Util.Automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} edits.
+    /// <para/>At most, this query will match terms up to
+    /// <see cref="Lucene.Net.Util.Automaton.LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE"/> edits.
     /// Higher distances (especially with transpositions enabled), are generally not useful and
     /// will match a significant amount of the term dictionary. If you really want this, consider
     /// using an n-gram indexing technique (such as the SpellChecker in the
     /// <a href="{@docRoot}/../suggest/overview-summary.html">suggest module</a>) instead.
     ///
-    /// <p>NOTE: terms of length 1 or 2 will sometimes not match because of how the scaled
+    /// <para/>NOTE: terms of length 1 or 2 will sometimes not match because of how the scaled
     /// distance between two terms is computed.  For a term to match, the edit distance between
     /// the terms must be less than the minimum length term (either the input term, or
-    /// the candidate term).  For example, FuzzyQuery on term "abcd" with maxEdits=2 will
-    /// not match an indexed term "ab", and FuzzyQuery on term "a" with maxEdits=2 will not
+    /// the candidate term).  For example, <see cref="FuzzyQuery"/> on term "abcd" with maxEdits=2 will
+    /// not match an indexed term "ab", and <see cref="FuzzyQuery"/> on term "a" with maxEdits=2 will not
     /// match an indexed term "abc".
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -70,19 +70,19 @@ namespace Lucene.Net.Search
         private readonly Term term;
 
         /// <summary>
-        /// Create a new FuzzyQuery that will match terms with an edit distance
-        /// of at most <code>maxEdits</code> to <code>term</code>.
-        /// If a <code>prefixLength</code> &gt; 0 is specified, a common prefix
+        /// Create a new <see cref="FuzzyQuery"/> that will match terms with an edit distance
+        /// of at most <paramref name="maxEdits"/> to <paramref name="term"/>.
+        /// If a <paramref name="prefixLength"/> &gt; 0 is specified, a common prefix
         /// of that length is also required.
         /// </summary>
-        /// <param name="term"> the term to search for </param>
-        /// <param name="maxEdits"> must be >= 0 and <= <seealso cref="LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE"/>. </param>
-        /// <param name="prefixLength"> length of common (non-fuzzy) prefix </param>
-        /// <param name="maxExpansions"> the maximum number of terms to match. If this number is
-        ///  greater than <seealso cref="BooleanQuery#getMaxClauseCount"/> when the query is rewritten,
-        ///  then the maxClauseCount will be used instead. </param>
-        /// <param name="transpositions"> true if transpositions should be treated as a primitive
-        ///        edit operation. If this is false, comparisons will implement the classic
+        /// <param name="term"> The term to search for </param>
+        /// <param name="maxEdits"> Must be &gt;= 0 and &lt;= <see cref="LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE"/>. </param>
+        /// <param name="prefixLength"> Length of common (non-fuzzy) prefix </param>
+        /// <param name="maxExpansions"> The maximum number of terms to match. If this number is
+        /// greater than <see cref="BooleanQuery.MaxClauseCount"/> when the query is rewritten,
+        /// then the maxClauseCount will be used instead. </param>
+        /// <param name="transpositions"> <c>true</c> if transpositions should be treated as a primitive
+        ///        edit operation. If this is <c>false</c>, comparisons will implement the classic
         ///        Levenshtein algorithm. </param>
         public FuzzyQuery(Term term, int maxEdits, int prefixLength, int maxExpansions, bool transpositions)
             : base(term.Field)
@@ -109,8 +109,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Calls {@link #FuzzyQuery(Term, int, int, int, boolean)
-        /// FuzzyQuery(term, maxEdits, prefixLength, defaultMaxExpansions, defaultTranspositions)}.
+        /// Calls <see cref="FuzzyQuery.FuzzyQuery(Term, int, int, int, bool)">
+        /// FuzzyQuery(term, maxEdits, prefixLength, defaultMaxExpansions, defaultTranspositions)</see>.
         /// </summary>
         public FuzzyQuery(Term term, int maxEdits, int prefixLength)
             : this(term, maxEdits, prefixLength, DefaultMaxExpansions, DefaultTranspositions)
@@ -118,7 +118,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Calls <seealso cref="#FuzzyQuery(Term, int, int) FuzzyQuery(term, maxEdits, defaultPrefixLength)"/>.
+        /// Calls <see cref="FuzzyQuery(Term, int, int)">FuzzyQuery(term, maxEdits, defaultPrefixLength)</see>.
         /// </summary>
         public FuzzyQuery(Term term, int maxEdits)
             : this(term, maxEdits, DefaultPrefixLength)
@@ -126,14 +126,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Calls <seealso cref="#FuzzyQuery(Term, int) FuzzyQuery(term, defaultMaxEdits)"/>.
+        /// Calls <see cref="FuzzyQuery(Term, int)">FuzzyQuery(term, defaultMaxEdits)</see>.
         /// </summary>
         public FuzzyQuery(Term term)
             : this(term, DefaultMaxEdits)
         {
         }
 
-        /// <returns> the maximum number of edit distances allowed for this query to match. </returns>
+        /// <returns> The maximum number of edit distances allowed for this query to match. </returns>
         public virtual int MaxEdits
         {
             get
@@ -143,7 +143,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the non-fuzzy prefix length. this is the number of characters at the start
+        /// Returns the non-fuzzy prefix length. This is the number of characters at the start
         /// of a term that must be identical (not fuzzy) to the query term if the query
         /// is to match that term.
         /// </summary>
@@ -156,8 +156,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if transpositions should be treated as a primitive edit operation.
-        /// If this is false, comparisons will implement the classic Levenshtein algorithm.
+        /// Returns <c>true</c> if transpositions should be treated as a primitive edit operation.
+        /// If this is <c>false</c>, comparisons will implement the classic Levenshtein algorithm.
         /// </summary>
         public virtual bool Transpositions
         {
@@ -269,10 +269,9 @@ namespace Lucene.Net.Search
         /// <para/>
         /// NOTE: this was floatToEdits() in Lucene
         /// </summary>
-        /// <param name="minimumSimilarity"> scaled similarity </param>
-        /// <param name="termLen"> length (in unicode codepoints) of the term. </param>
-        /// <returns> equivalent number of maxEdits </returns>
-        /// @deprecated pass integer edit distances instead.
+        /// <param name="minimumSimilarity"> Scaled similarity </param>
+        /// <param name="termLen"> Length (in unicode codepoints) of the term. </param>
+        /// <returns> Equivalent number of maxEdits </returns>
         [Obsolete("pass integer edit distances instead.")]
         public static int SingleToEdits(float minimumSimilarity, int termLen)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FuzzyTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FuzzyTermsEnum.cs b/src/Lucene.Net/Search/FuzzyTermsEnum.cs
index f4bbd5d..e29aa59 100644
--- a/src/Lucene.Net/Search/FuzzyTermsEnum.cs
+++ b/src/Lucene.Net/Search/FuzzyTermsEnum.cs
@@ -45,12 +45,12 @@ namespace Lucene.Net.Search
     using UnicodeUtil = Lucene.Net.Util.UnicodeUtil;
 
     /// <summary>
-    /// Subclass of TermsEnum for enumerating all terms that are similar
+    /// Subclass of <see cref="TermsEnum"/> for enumerating all terms that are similar
     /// to the specified filter term.
     ///
-    /// <p>Term enumerations are always ordered by
-    /// <seealso cref="#getComparer"/>.  Each term in the enumeration is
-    /// greater than all that precede it.</p>
+    /// <para>Term enumerations are always ordered by
+    /// <see cref="Comparer"/>.  Each term in the enumeration is
+    /// greater than all that precede it.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -92,22 +92,23 @@ namespace Lucene.Net.Search
         private readonly bool transpositions;
 
         /// <summary>
-        /// Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
-        /// length <code>prefixLength</code> with <code>term</code> and which have a fuzzy similarity &gt;
-        /// <code>minSimilarity</code>.
-        /// <p>
+        /// Constructor for enumeration of all terms from specified <c>reader</c> which share a prefix of
+        /// length <paramref name="prefixLength"/> with <paramref name="term"/> and which have a fuzzy similarity &gt;
+        /// <paramref name="minSimilarity"/>.
+        /// <para/>
         /// After calling the constructor the enumeration is already pointing to the first
         /// valid term if such a term exists.
         /// </summary>
         /// <param name="terms"> Delivers terms. </param>
-        /// <param name="atts"> <seealso cref="AttributeSource"/> created by the rewrite method of <seealso cref="MultiTermQuery"/>
+        /// <param name="atts"> <see cref="AttributeSource"/> created by the rewrite method of <see cref="MultiTermQuery"/>
         /// thats contains information about competitive boosts during rewrite. It is also used
         /// to cache DFAs between segment transitions. </param>
         /// <param name="term"> Pattern term. </param>
         /// <param name="minSimilarity"> Minimum required similarity for terms from the reader. Pass an integer value
         ///        representing edit distance. Passing a fraction is deprecated. </param>
         /// <param name="prefixLength"> Length of required common prefix. Default value is 0. </param>
-        /// <exception cref="IOException"> if there is a low-level IO error </exception>
+        /// <param name="transpositions"> Transpositions </param>
+        /// <exception cref="System.IO.IOException"> if there is a low-level IO error </exception>
         public FuzzyTermsEnum(Terms terms, AttributeSource atts, Term term, float minSimilarity, int prefixLength, bool transpositions)
         {
             InitializeInstanceFields();
@@ -167,8 +168,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// return an automata-based enum for matching up to editDistance from
-        /// lastTerm, if possible
+        /// Return an automata-based enum for matching up to <paramref name="editDistance"/> from
+        /// <paramref name="lastTerm"/>, if possible
         /// </summary>
         protected virtual TermsEnum GetAutomatonEnum(int editDistance, BytesRef lastTerm)
         {
@@ -186,7 +187,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// initialize levenshtein DFAs up to maxDistance, if possible </summary>
+        /// Initialize levenshtein DFAs up to maxDistance, if possible </summary>
         private IList<CompiledAutomaton> InitAutomata(int maxDistance)
         {
             IList<CompiledAutomaton> runAutomata = dfaAtt.Automata;
@@ -212,7 +213,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// swap in a new actual enum to proxy to </summary>
+        /// Swap in a new actual enum to proxy to </summary>
         protected virtual void SetEnum(TermsEnum actualEnum)
         {
             this.actualEnum = actualEnum;
@@ -220,7 +221,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// fired when the max non-competitive boost has changed. this is the hook to
+        /// Fired when the max non-competitive boost has changed. This is the hook to
         /// swap in a smarter actualEnum
         /// </summary>
         private void BottomChanged(BytesRef lastTerm, bool init)
@@ -361,9 +362,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Implement fuzzy enumeration with Terms.intersect.
-        /// <p>
-        /// this is the fastest method as opposed to LinearFuzzyTermsEnum:
+        /// Implement fuzzy enumeration with <see cref="Terms.Intersect(CompiledAutomaton, BytesRef)"/>.
+        /// <para/>
+        /// This is the fastest method as opposed to LinearFuzzyTermsEnum:
         /// as enumeration is logarithmic to the number of terms (instead of linear)
         /// and comparison is linear to length of the term (rather than quadratic)
         /// </summary>
@@ -400,7 +401,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// finds the smallest Lev(n) DFA that accepts the term. </summary>
+            /// Finds the smallest Lev(n) DFA that accepts the term. </summary>
             protected override AcceptStatus Accept(BytesRef term)
             {
                 //System.out.println("AFTE.accept term=" + term);
@@ -447,7 +448,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// returns true if term is within k edits of the query term </summary>
+            /// Returns <c>true</c> if <paramref name="term"/> is within <paramref name="k"/> edits of the query term </summary>
             internal bool Matches(BytesRef term, int k)
             {
                 return k == 0 ? term.Equals(termRef) : matchers[k].Run(term.Bytes, term.Offset, term.Length);
@@ -475,8 +476,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// reuses compiled automata across different segments,
+        /// Reuses compiled automata across different segments,
         /// because they are independent of the index
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public interface ILevenshteinAutomataAttribute : IAttribute
@@ -486,6 +488,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Stores compiled automata as a list (indexed by edit distance)
+        /// <para/>
         /// @lucene.internal
         /// </summary>
 #if FEATURE_SERIALIZABLE


[46/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/666de32b
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/666de32b
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/666de32b

Branch: refs/heads/master
Commit: 666de32b0fbe555f5d635c3c867c500f25d0cbdf
Parents: 6f22b5a
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Tue Jun 6 06:40:12 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:43 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  21 +---
 .../Appending/AppendingCodec.cs                 |   4 +-
 .../Appending/AppendingPostingsFormat.cs        |   2 +-
 .../Appending/AppendingTermsReader.cs           |   2 +-
 .../BlockTerms/BlockTermsReader.cs              |  12 +-
 .../BlockTerms/BlockTermsWriter.cs              |  15 ++-
 .../BlockTerms/FixedGapTermsIndexReader.cs      |  11 +-
 .../BlockTerms/FixedGapTermsIndexWriter.cs      |  10 +-
 .../BlockTerms/TermsIndexReaderBase.cs          |  34 +++---
 .../BlockTerms/TermsIndexWriterBase.cs          |  10 +-
 .../BlockTerms/VariableGapTermsIndexReader.cs   |   8 +-
 .../BlockTerms/VariableGapTermsIndexWriter.cs   |  30 ++---
 .../Bloom/BloomFilterFactory.cs                 |  32 +++--
 .../Bloom/BloomFilteringPostingsFormat.cs       |  66 +++++-----
 .../Bloom/DefaultBloomFilterFactory.cs          |   4 +-
 src/Lucene.Net.Codecs/Bloom/FuzzySet.cs         |  66 +++++-----
 src/Lucene.Net.Codecs/Bloom/HashFunction.cs     |  10 +-
 src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs      |  18 +--
 .../DiskDV/DiskDocValuesFormat.cs               |   2 +-
 src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs |   2 +-
 .../IntBlock/FixedIntBlockIndexInput.cs         |  19 +--
 .../IntBlock/FixedIntBlockIndexOutput.cs        |  22 ++--
 .../IntBlock/VariableIntBlockIndexInput.cs      |  17 +--
 .../IntBlock/VariableIntBlockIndexOutput.cs     |  30 +++--
 .../Memory/DirectDocValuesConsumer.cs           |   2 +-
 .../Memory/DirectDocValuesFormat.cs             |  36 +++---
 .../Memory/DirectDocValuesProducer.cs           |   2 +-
 .../Memory/DirectPostingsFormat.cs              |  45 ++++---
 .../Memory/FSTOrdPulsing41PostingsFormat.cs     |   3 +-
 .../Memory/FSTOrdTermsReader.cs                 |  28 ++---
 .../Memory/FSTOrdTermsWriter.cs                 | 120 ++++++++++---------
 .../Memory/FSTPulsing41PostingsFormat.cs        |   5 +-
 src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs  |  17 ++-
 src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs  |  46 +++----
 src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs  |  85 ++++++-------
 .../Memory/MemoryDocValuesConsumer.cs           |   2 +-
 .../Memory/MemoryDocValuesFormat.cs             |  19 +--
 .../Memory/MemoryDocValuesProducer.cs           |   2 +-
 .../Memory/MemoryPostingsFormat.cs              |  13 +-
 .../Pulsing/Pulsing41PostingsFormat.cs          |   8 +-
 .../Pulsing/PulsingPostingsFormat.cs            |   3 +-
 .../Pulsing/PulsingPostingsReader.cs            |  29 +++--
 .../Pulsing/PulsingPostingsWriter.cs            |  38 +++---
 src/Lucene.Net.Codecs/Sep/IntIndexInput.cs      |  13 +-
 src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs     |  27 ++---
 src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs   |   8 +-
 src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs  |  11 +-
 src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs  |  16 +--
 src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs  |   4 +-
 src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs  |  13 +-
 .../SimpleText/SimpleTextCodec.cs               |   6 +-
 .../SimpleText/SimpleTextDocValuesFormat.cs     |  67 ++++++-----
 .../SimpleText/SimpleTextDocValuesWriter.cs     |   2 +-
 .../SimpleText/SimpleTextFieldInfosFormat.cs    |   6 +-
 .../SimpleText/SimpleTextFieldInfosReader.cs    |   6 +-
 .../SimpleText/SimpleTextFieldInfosWriter.cs    |   8 +-
 .../SimpleText/SimpleTextFieldsReader.cs        |   2 +-
 .../SimpleText/SimpleTextLiveDocsFormat.cs      |   8 +-
 .../SimpleText/SimpleTextNormsFormat.cs         |  18 ++-
 .../SimpleText/SimpleTextPostingsFormat.cs      |  13 +-
 .../SimpleText/SimpleTextSegmentInfoFormat.cs   |   6 +-
 .../SimpleText/SimpleTextSegmentInfoReader.cs   |   6 +-
 .../SimpleText/SimpleTextSegmentInfoWriter.cs   |   6 +-
 .../SimpleText/SimpleTextStoredFieldsFormat.cs  |   6 +-
 .../SimpleText/SimpleTextStoredFieldsReader.cs  |  12 +-
 .../SimpleText/SimpleTextStoredFieldsWriter.cs  |   4 +-
 .../SimpleText/SimpleTextTermVectorsFormat.cs   |   6 +-
 .../SimpleText/SimpleTextTermVectorsReader.cs   |   6 +-
 .../SimpleText/SimpleTextTermVectorsWriter.cs   |   4 +-
 69 files changed, 606 insertions(+), 628 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5736674..eda09e5 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -48,23 +48,6 @@ with minimal cleaning up. We are working on tools and code
 helpers to help with that, see for examples see our [Java style methods to avoid many search-replace in porting tests](https://github.com/apache/lucenenet/tree/master/src/Lucene.Net.TestFramework/JavaCompatibility), and a
 [R# plugin that will help making some stuff auto-port when pasting](https://resharper-plugins.jetbrains.com/packages/ReSharper.ExJava/).
 
-### Documentation Comments == up for grabs:
-
-1. Lucene.Net.Codecs (project)
-   1. Appending (namespace)
-   2. BlockTerms (namespace)
-   3. Bloom (namespace)
-   4. DiskDV (namespace)
-   5. IntBlock (namespace)
-   6. Memory (namespace)
-   7. Pulsing (namespace)
-   8. Sep (namespace)
-   9. SimpleText (namespace)
-
-See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 
-
-> While it is assumed that the documentation comments for the other projects are finished, they could probably all use a review. Also be sure to check the comments against [Lucene 4.8.0](https://github.com/apache/lucene-solr/tree/releases/lucene-solr/4.8.0/lucene) to ensure they are correct and complete!
-
 ### Code that is currently pending being ported from scratch (+ tests) == up for grabs:
 
 * [Lucene.Net.Demo](https://github.com/apache/lucene-solr/tree/releases/lucene-solr/4.8.0/lucene/demo) (might be a good learning experience)
@@ -92,7 +75,7 @@ probably may also want to set a constant seed for working locally. See
 and
 <https://github.com/apache/lucenenet/blob/master/src/Lucene.Net.TestFramework/Util/LuceneTestCase.cs#L610>
 
-* Note that tests should be run both on .NET Framework and .NET Core. Currently, we have 2 different solutions (Lucene.Net.sln for .NET Framework and Lucene.Net.Portable.sln for .NET Core) that only run in Visual Studio 2015 and onwards. We are setup to use NUnit 3.x and you will need the appropriate [test adapter](https://marketplace.visualstudio.com/items?itemName=NUnitDevelopers.NUnit3TestAdapter) for Visual Studio to detect the tests. Tests can also be run from the command line using the [dotnet test]() command
+* Note that tests should be run both on .NET Framework and .NET Core. Currently, we have 2 different solutions (Lucene.Net.sln for .NET Framework and Lucene.Net.Portable.sln for .NET Core) that only run in Visual Studio 2015 and onwards. We are setup to use NUnit 3.x and you will need the appropriate [test adapter](https://marketplace.visualstudio.com/items?itemName=NUnitDevelopers.NUnit3TestAdapter) for Visual Studio to detect the tests. Tests can also be run from the command line using the [dotnet test](https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-test) command
 
 * Run, debug, iterate. When you think you fixed a bug or a test, please
 send a PR as fast as possible. There are multiple people working in this
@@ -105,7 +88,7 @@ you will receive notifications also via this list.
 
 ## Other types of help
 
-We will definitely need more help (like optimizing code, normalizing tabs/spaces, license headers, automating stuff, etc) but we are not there yet!
+We will definitely need more help (like optimizing code, normalizing tabs/spaces, writing tutorials, helping with API documentation, automating stuff, etc) but we are not there yet!
 
 Also, check out the [JIRA issue tracker](https://issues.apache.org/jira/browse/LUCENENET-586?jql=project%20%3D%20LUCENENET%20AND%20status%20%3D%20Open%20AND%20assignee%20in%20(EMPTY)) for any other issues that you might be interested in helping with. You can signup for a JIRA account [here](https://cwiki.apache.org/confluence/signup.action) (it just takes a minute).
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
index f074237..96d2f0f 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
@@ -21,10 +21,10 @@ namespace Lucene.Net.Codecs.Appending
      */
 
     /// <summary>
-    /// This codec uses an index format that is very similar to Lucene40Codec 
+    /// This codec uses an index format that is very similar to <see cref="Lucene40Codec"/> 
     /// but works on append-only outputs, such as plain output streams and 
     /// append-only filesystems.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [Obsolete(

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
index 252c6e1..0eb57dd 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Codecs.Appending
      */
 
     /// <summary>
-    /// Appending Postigns Implementation
+    /// Appending Postings Implementation
     /// </summary>
     [PostingsFormatName("Appending")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     internal class AppendingPostingsFormat : PostingsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
index ae95ef3..cdde1a4 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Codecs.Appending
 
     /// <summary>
     /// Reads append-only terms from AppendingTermsWriter.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [Obsolete("Only for reading old Appending segments")]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
index 631349c..fc848b5 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
@@ -27,16 +27,16 @@ namespace Lucene.Net.Codecs.BlockTerms
 
     /// <summary>
     /// Handles a terms dict, but decouples all details of
-    /// doc/freqs/positions reading to an instance of {@link
-    /// PostingsReaderBase}.  This class is reusable for
+    /// doc/freqs/positions reading to an instance of 
+    /// <see cref="PostingsReaderBase"/>.  This class is reusable for
     /// codecs that use a different format for
     /// docs/freqs/positions (though codecs are also free to
     /// make their own terms dict impl).
-    ///
-    /// This class also interacts with an instance of {@link
-    /// TermsIndexReaderBase}, to abstract away the specific
+    /// <para/>
+    /// This class also interacts with an instance of
+    /// <see cref="TermsIndexReaderBase"/>, to abstract away the specific
     /// implementation of the terms dict index. 
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class BlockTermsReader : FieldsProducer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
index 7c25d01..149a8f7 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
@@ -25,16 +25,15 @@ namespace Lucene.Net.Codecs.BlockTerms
      * limitations under the License.
      */
 
+    // TODO: Currently we encode all terms between two indexed terms as a block
+    // But we could decouple the two, ie allow several blocks in between two indexed terms
+
     /// <summary>
     /// Writes terms dict, block-encoding (column stride) each term's metadata 
-    /// for each set of terms between two index terms
-    /// 
-    /// lucene.experimental
+    /// for each set of terms between two index terms.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <remarks>
-    /// TODO Currently we encode all terms between two indexed terms as a block
-    /// But we could decouple the two, ie allow several blocks in between two indexed terms
-    /// </remarks>
     public class BlockTermsWriter : FieldsConsumer
     {
         internal readonly static string CODEC_NAME = "BLOCK_TERMS_DICT";
@@ -64,7 +63,7 @@ namespace Lucene.Net.Codecs.BlockTerms
             public long SumDocFreq { get; private set; }
             public int DocCount { get; private set; }
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             public int Int64sSize { get; private set; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
index 92f6a95..1265661 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
@@ -28,12 +28,11 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    /// TermsIndexReader for simple every Nth terms indexes
-    /// 
-    /// See FixedGapTermsIndexWriter
-    /// 
-    /// lucene.experimental
+    /// <see cref="TermsIndexReaderBase"/> for simple every Nth terms indexes.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
+    /// <seealso cref="FixedGapTermsIndexWriter"/>
     public class FixedGapTermsIndexReader : TermsIndexReaderBase
     {
         // NOTE: long is overkill here, since this number is 128
@@ -438,7 +437,7 @@ namespace Lucene.Net.Codecs.BlockTerms
                     }
                 }
 
-                /// <summary>Returns approximate RAM bytes Used</summary>
+                /// <summary>Returns approximate RAM bytes used.</summary>
                 public long RamBytesUsed()
                 {
                     return ((termOffsets != null) ? termOffsets.RamBytesUsed() : 0) +

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
index b27938e..41ed974 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
@@ -28,11 +28,11 @@ namespace Lucene.Net.Codecs.BlockTerms
     /// <summary>
     /// Selects every Nth term as and index term, and hold term
     /// bytes (mostly) fully expanded in memory.  This terms index
-    /// supports seeking by ord.  See {@link
-    /// VariableGapTermsIndexWriter} for a more memory efficient
+    /// supports seeking by ord.  See 
+    /// <see cref="VariableGapTermsIndexWriter"/> for a more memory efficient
     /// terms index that does not support seeking by ord.
-    ///
-    /// @lucene.experimental */    
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public class FixedGapTermsIndexWriter : TermsIndexWriterBase
     {
@@ -90,7 +90,7 @@ namespace Lucene.Net.Codecs.BlockTerms
         /// <summary>
         /// NOTE: if your codec does not sort in unicode code
         /// point order, you must override this method, to simply
-        /// return indexedTerm.Length.
+        /// return <c>indexedTerm.Length</c>.
         /// </summary>
         protected virtual int IndexedTermPrefixLength(BytesRef priorTerm, BytesRef indexedTerm)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
index d7b905f..766385b 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
@@ -21,20 +21,20 @@ namespace Lucene.Net.Codecs.BlockTerms
      * limitations under the License.
      */
 
+    // TODO
+    //   - allow for non-regular index intervals?  eg with a
+    //     long string of rare terms, you don't need such
+    //     frequent indexing
+
     /// <summary>
-    /// TODO
-    ///   - allow for non-regular index intervals?  eg with a
-    ///     long string of rare terms, you don't need such
-    ///     frequent indexing
-    /// 
-    /// {@link BlockTermsReader} interacts with an instance of this class
+    /// <see cref="BlockTermsReader"/> interacts with an instance of this class
     /// to manage its terms index.  The writer must accept
-    /// indexed terms (many pairs of BytesRef text + long
+    /// indexed terms (many pairs of <see cref="BytesRef"/> text + long
     /// fileOffset), and then this reader must be able to
     /// retrieve the nearest index term to a provided term
     /// text. 
-    ///
-    ///  @lucene.experimental */
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public abstract class TermsIndexReaderBase : IDisposable
     {
@@ -54,7 +54,7 @@ namespace Lucene.Net.Codecs.BlockTerms
         public abstract int Divisor { get; }
 
         /// <summary>
-        /// Similar to TermsEnum, except, the only "metadata" it
+        /// Similar to <see cref="TermsEnum"/>, except, the only "metadata" it
         /// reports for a given indexed term is the long fileOffset
         /// into the main terms dictionary file.
         /// </summary>
@@ -63,29 +63,29 @@ namespace Lucene.Net.Codecs.BlockTerms
             /// <summary> 
             /// Seeks to "largest" indexed term that's less than or equal
             /// to term; returns file pointer index (into the main
-            /// terms index file) for that term 
+            /// terms index file) for that term.
             /// </summary>
             public abstract long Seek(BytesRef term);
 
-            /// <summary>Returns -1 at end</summary>
+            /// <summary>Returns -1 at end/</summary>
             public abstract long Next();
 
             public abstract BytesRef Term { get; }
 
             /// <summary></summary>
-            /// <remarks>Only implemented if {@link TermsIndexReaderBase.supportsOrd()} 
-            /// returns true</remarks>
+            /// <remarks>Only implemented if <see cref="TermsIndexReaderBase.SupportsOrd"/>
+            /// returns <c>true</c></remarks>
             /// <returns></returns>
             public abstract long Seek(long ord);
 
             /// <summary></summary>
-            /// <remarks>Only implemented if {@link TermsIndexReaderBase.supportsOrd()} 
-            /// returns true</remarks>
+            /// <remarks>Only implemented if <see cref="TermsIndexReaderBase.SupportsOrd"/> 
+            /// returns <c>true</c></remarks>
             /// <returns></returns>
             public abstract long Ord { get; }
         }
 
-        /// <summary>Returns approximate RAM bytes used</summary>
+        /// <summary>Returns approximate RAM bytes used.</summary>
         public abstract long RamBytesUsed();
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
index 5568252..4ac3745 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    ///  Base class for terms index implementations to plug
-    /// into {@link BlockTermsWriter}.
-    /// 
-    /// @see TermsIndexReaderBase
+    /// Base class for terms index implementations to plug
+    /// into <see cref="BlockTermsWriter"/>.
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
+    /// <seealso cref="TermsIndexReaderBase"/>
     public abstract class TermsIndexWriterBase : IDisposable
     {
-        /// <summary>Terms index API for a single field</summary>
+        /// <summary>Terms index API for a single field.</summary>
         public abstract class FieldWriter
         {
             public abstract bool CheckIndexTerm(BytesRef text, TermStats stats);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
index ca773c0..a9e1199 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    /// See VariableGapTermsIndexWriter
-    /// 
-    /// lucene.experimental
+    /// See <see cref="VariableGapTermsIndexWriter"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public class VariableGapTermsIndexReader : TermsIndexReaderBase
     {
@@ -258,7 +258,7 @@ namespace Lucene.Net.Codecs.BlockTerms
                 }
             }
 
-            /// <summary>Returns approximate RAM bytes used</summary>
+            /// <summary>Returns approximate RAM bytes used.</summary>
             public virtual long RamBytesUsed()
             {
                 return fst == null ? 0 : fst.GetSizeInBytes();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
index 9e05531..47a46cc 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
@@ -27,18 +27,18 @@ namespace Lucene.Net.Codecs.BlockTerms
 
     /// <summary>
     /// Selects index terms according to provided pluggable
-    /// {@link IndexTermSelector}, and stores them in a prefix trie that's
-    /// loaded entirely in RAM stored as an FST.  This terms
+    /// <see cref="IndexTermSelector"/>, and stores them in a prefix trie that's
+    /// loaded entirely in RAM stored as an <see cref="FST{T}"/>.  This terms
     /// index only supports unsigned byte term sort order
     /// (unicode codepoint order when the bytes are UTF8).
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class VariableGapTermsIndexWriter : TermsIndexWriterBase
     {
         protected IndexOutput m_output;
 
-        /// <summary>Extension of terms index file</summary>
+        /// <summary>Extension of terms index file.</summary>
         internal readonly static string TERMS_INDEX_EXTENSION = "tiv";
 
         internal readonly static string CODEC_NAME = "VARIABLE_GAP_TERMS_INDEX";
@@ -53,27 +53,27 @@ namespace Lucene.Net.Codecs.BlockTerms
         private readonly IndexTermSelector policy;
 
         /// <summary>
-        /// Hook for selecting which terms should be placed in the terms index
-        /// 
-        /// IsIndexTerm for each term in that field
-        /// NewField is called at the start of each new field
-        /// 
+        /// Hook for selecting which terms should be placed in the terms index.
+        /// <para/>
+        /// <see cref="NewField(FieldInfo)"/> is called at the start of each new field, and
+        /// <see cref="IsIndexTerm(BytesRef, TermStats)"/> for each term in that field.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public abstract class IndexTermSelector
         {
             /// <summary>
-            /// Called sequentially on every term being written
-            /// returning true if this term should be indexed
+            /// Called sequentially on every term being written,
+            /// returning <c>true</c> if this term should be indexed.
             /// </summary>
             public abstract bool IsIndexTerm(BytesRef term, TermStats stats);
 
-            /// <summary>Called when a new field is started</summary>
+            /// <summary>Called when a new field is started.</summary>
             public abstract void NewField(FieldInfo fieldInfo);
         }
 
         /// <remarks>
-        /// Same policy as {@link FixedGapTermsIndexWriter}
+        /// Same policy as <see cref="FixedGapTermsIndexWriter"/>
         /// </remarks>
         public sealed class EveryNTermSelector : IndexTermSelector
         {
@@ -216,8 +216,8 @@ namespace Lucene.Net.Codecs.BlockTerms
         }
 
         /// <remarks>
-        /// Note: If your codec does not sort in unicode code point order,
-        /// you must override this method to simplly return IndexedTerm.Length
+        /// NOTE: If your codec does not sort in unicode code point order,
+        /// you must override this method to simply return <c>indexedTerm.Length</c>.
         /// </remarks>
         protected virtual int IndexedTermPrefixLength(BytesRef priorTerm, BytesRef indexedTerm)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
index 377806c..3bfe3ac 100644
--- a/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
+++ b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
@@ -20,27 +20,25 @@ namespace Lucene.Net.Codecs.Bloom
      */
 
     /// <summary>
-    /// Class used to create index-time {@link FuzzySet} appropriately configured for
+    /// Class used to create index-time <see cref="FuzzySet"/> appropriately configured for
     /// each field. Also called to right-size bitsets for serialization.
-    ///
-    ///  @lucene.experimental
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public abstract class BloomFilterFactory
     {
-        /// <summary>
-        /// 
-        /// </summary>
-        /// <param name="state">The content to be indexed</param>
-        /// <param name="info">The field requiring a BloomFilter</param>
-        /// <returns>An appropriately sized set or null if no BloomFiltering required</returns>
+
+        /// <param name="state">The content to be indexed.</param>
+        /// <param name="info">The field requiring a BloomFilter.</param>
+        /// <returns>An appropriately sized set or <c>null</c> if no BloomFiltering required.</returns>
         public abstract FuzzySet GetSetForField(SegmentWriteState state, FieldInfo info);
 
         /// <summary>
-        /// Called when downsizing bitsets for serialization
+        /// Called when downsizing bitsets for serialization.
         /// </summary>
-        /// <param name="fieldInfo">The field with sparse set bits</param>
-        /// <param name="initialSet">The bits accumulated</param>
-        /// <returns> null or a hopefully more densely packed, smaller bitset</returns>
+        /// <param name="fieldInfo">The field with sparse set bits.</param>
+        /// <param name="initialSet">The bits accumulated.</param>
+        /// <returns> <c>null</c> or a hopefully more densely packed, smaller bitset.</returns>
         public virtual FuzzySet Downsize(FieldInfo fieldInfo, FuzzySet initialSet)
         {
             // Aim for a bitset size that would have 10% of bits set (so 90% of searches
@@ -50,11 +48,11 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        /// Used to determine if the given filter has reached saturation and should be retired i.e. not saved any more
+        /// Used to determine if the given filter has reached saturation and should be retired i.e. not saved any more.
         /// </summary>
-        /// <param name="bloomFilter">The bloomFilter being tested</param>
-        /// <param name="fieldInfo">The field with which this filter is associated</param>
-        /// <returns>true if the set has reached saturation and should be retired</returns>
+        /// <param name="bloomFilter">The bloomFilter being tested.</param>
+        /// <param name="fieldInfo">The field with which this filter is associated.</param>
+        /// <returns>true if the set has reached saturation and should be retired.</returns>
         public abstract bool IsSaturated(FuzzySet bloomFilter, FieldInfo fieldInfo);
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
index 8ab4952..cf0ca01 100644
--- a/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
@@ -29,33 +29,33 @@ namespace Lucene.Net.Codecs.Bloom
 
     /// <summary>
     /// 
-    /// A {@link PostingsFormat} useful for low doc-frequency fields such as primary
+    /// A <see cref="PostingsFormat"/> useful for low doc-frequency fields such as primary
     /// keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail"
     /// for reads in segments known to have no record of the key. A choice of
-    /// delegate PostingsFormat is used to record all other Postings data.
-    /// 
-    /// A choice of {@link BloomFilterFactory} can be passed to tailor Bloom Filter
+    /// delegate <see cref="PostingsFormat"/> is used to record all other Postings data.
+    /// <para/>
+    /// A choice of <see cref="BloomFilterFactory"/> can be passed to tailor Bloom Filter
     /// settings on a per-field basis. The default configuration is
-    /// {@link DefaultBloomFilterFactory} which allocates a ~8mb bitset and hashes
-    /// values using {@link MurmurHash2}. This should be suitable for most purposes.
-    ///
+    /// <see cref="DefaultBloomFilterFactory"/> which allocates a ~8mb bitset and hashes
+    /// values using <see cref="MurmurHash2"/>. This should be suitable for most purposes.
+    /// <para/>
     /// The format of the blm file is as follows:
     ///
-    /// <ul>
-    /// <li>BloomFilter (.blm) --&gt; Header, DelegatePostingsFormatName,
-    /// NumFilteredFields, Filter<sup>NumFilteredFields</sup>, Footer</li>
-    /// <li>Filter --&gt; FieldNumber, FuzzySet</li>
-    /// <li>FuzzySet --&gt;See {@link FuzzySet#serialize(DataOutput)}</li>
-    /// <li>Header --&gt; {@link CodecUtil#writeHeader CodecHeader}</li>
-    /// <li>DelegatePostingsFormatName --&gt; {@link DataOutput#writeString(String)
-    /// String} The name of a ServiceProvider registered {@link PostingsFormat}</li>
-    /// <li>NumFilteredFields --&gt; {@link DataOutput#writeInt Uint32}</li>
-    /// <li>FieldNumber --&gt; {@link DataOutput#writeInt Uint32} The number of the
-    /// field in this segment</li>
-    /// <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
-    /// </ul>
-    ///
-    ///  @lucene.experimental
+    /// <list type="bullet">
+    ///     <item><description>BloomFilter (.blm) --&gt; Header, DelegatePostingsFormatName,
+    ///         NumFilteredFields, Filter<sup>NumFilteredFields</sup>, Footer</description></item>
+    ///     <item><description>Filter --&gt; FieldNumber, FuzzySet</description></item>
+    ///     <item><description>FuzzySet --&gt;See <see cref="FuzzySet.Serialize(DataOutput)"/></description></item>
+    ///     <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(DataOutput, string, int)"/>) </description></item>
+    ///     <item><description>DelegatePostingsFormatName --&gt; String (<see cref="DataOutput.WriteString(string)"/>)
+    ///         The name of a ServiceProvider registered <see cref="PostingsFormat"/></description></item>
+    ///     <item><description>NumFilteredFields --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) </description></item>
+    ///     <item><description>FieldNumber --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The number of the
+    ///         field in this segment</description></item>
+    ///     <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     [PostingsFormatName("BloomFilter")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class BloomFilteringPostingsFormat : PostingsFormat
@@ -66,20 +66,20 @@ namespace Lucene.Net.Codecs.Bloom
         public static readonly int VERSION_CHECKSUM = 2;
         public static readonly int VERSION_CURRENT = VERSION_CHECKSUM;
 
-        /// <summary>Extension of Bloom Filters file</summary>
+        /// <summary>Extension of Bloom Filters file.</summary>
         private const string BLOOM_EXTENSION = "blm";
 
         private readonly BloomFilterFactory _bloomFilterFactory = new DefaultBloomFilterFactory();
         private readonly PostingsFormat _delegatePostingsFormat;
-        
+
         /// <summary>
-        ///  Creates Bloom filters for a selection of fields created in the index. This
+        /// Creates Bloom filters for a selection of fields created in the index. This
         /// is recorded as a set of Bitsets held as a segment summary in an additional
-        /// "blm" file. This PostingsFormat delegates to a choice of delegate
-        /// PostingsFormat for encoding all other postings data.
+        /// "blm" file. This <see cref="PostingsFormat"/> delegates to a choice of delegate
+        /// <see cref="PostingsFormat"/> for encoding all other postings data.
         /// </summary>
-        /// <param name="delegatePostingsFormat">The PostingsFormat that records all the non-bloom filter data i.e. postings info.</param>
-        /// <param name="bloomFilterFactory">The {@link BloomFilterFactory} responsible for sizing BloomFilters appropriately</param>
+        /// <param name="delegatePostingsFormat">The <see cref="PostingsFormat"/> that records all the non-bloom filter data i.e. postings info.</param>
+        /// <param name="bloomFilterFactory">The <see cref="BloomFilterFactory"/> responsible for sizing BloomFilters appropriately.</param>
         public BloomFilteringPostingsFormat(PostingsFormat delegatePostingsFormat,
             BloomFilterFactory bloomFilterFactory) : base()
         {
@@ -90,12 +90,12 @@ namespace Lucene.Net.Codecs.Bloom
         /// <summary>
         /// Creates Bloom filters for a selection of fields created in the index. This
         /// is recorded as a set of Bitsets held as a segment summary in an additional
-        /// "blm" file. This PostingsFormat delegates to a choice of delegate
-        /// PostingsFormat for encoding all other postings data. This choice of
-        /// constructor defaults to the {@link DefaultBloomFilterFactory} for
+        /// "blm" file. This <see cref="PostingsFormat"/> delegates to a choice of delegate
+        /// <see cref="PostingsFormat"/> for encoding all other postings data. This choice of
+        /// constructor defaults to the <see cref="DefaultBloomFilterFactory"/> for
         /// configuring per-field BloomFilters.
         /// </summary>
-        /// <param name="delegatePostingsFormat">The PostingsFormat that records all the non-bloom filter data i.e. postings info.</param>
+        /// <param name="delegatePostingsFormat">The <see cref="PostingsFormat"/> that records all the non-bloom filter data i.e. postings info.</param>
         public BloomFilteringPostingsFormat(PostingsFormat delegatePostingsFormat)
             : this(delegatePostingsFormat, new DefaultBloomFilterFactory())
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
index 0b2e1ad..5d51999 100644
--- a/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
+++ b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Codecs.Bloom
 
     /// <summary>
     /// Default policy is to allocate a bitset with 10% saturation given a unique term per document.
-    /// Bits are set via MurmurHash2 hashing function.
-    ///
+    /// Bits are set via <see cref="MurmurHash2"/> hashing function.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class DefaultBloomFilterFactory : BloomFilterFactory

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
index 282c790..36a6f29 100644
--- a/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
+++ b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
@@ -26,20 +26,20 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// A class used to represent a set of many, potentially large, values (e.g. many
     /// long strings such as URLs), using a significantly smaller amount of memory.
-    ///
+    /// <para/>
     /// The set is "lossy" in that it cannot definitively state that is does contain
     /// a value but it <em>can</em> definitively say if a value is <em>not</em> in
     /// the set. It can therefore be used as a Bloom Filter.
-    /// 
+    /// <para/>
     /// Another application of the set is that it can be used to perform fuzzy counting because
     /// it can estimate reasonably accurately how many unique values are contained in the set. 
-    ///
+    /// <para/>
     /// This class is NOT threadsafe.
-    ///
+    /// <para/>
     /// Internally a Bitset is used to record values and once a client has finished recording
-    /// a stream of values the {@link #downsize(float)} method can be used to create a suitably smaller set that
+    /// a stream of values the <see cref="Downsize(float)"/> method can be used to create a suitably smaller set that
     /// is sized appropriately for the number of values recorded and desired saturation levels. 
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FuzzySet
@@ -62,7 +62,7 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <remarks>
-        /// Result from {@link FuzzySet#contains(BytesRef)}:
+        /// Result from <see cref="FuzzySet.Contains(BytesRef)"/>:
         /// can never return definitively YES (always MAYBE), 
         /// but can sometimes definitely return NO.
         /// </remarks>
@@ -99,7 +99,7 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        /// Rounds down required maxNumberOfBits to the nearest number that is made up
+        /// Rounds down required <paramref name="maxNumberOfBits"/> to the nearest number that is made up
         /// of all ones as a binary number.  
         /// Use this method where controlling memory use is paramount.
         /// </summary>
@@ -118,8 +118,8 @@ namespace Lucene.Net.Codecs.Bloom
         /// than deciding how much memory to throw at the problem.
         /// </summary>
         /// <param name="maxNumberOfValuesExpected"></param>
-        /// <param name="desiredSaturation">A number between 0 and 1 expressing the % of bits set once all values have been recorded</param>
-        /// <returns>The size of the set nearest to the required size</returns>
+        /// <param name="desiredSaturation">A number between 0 and 1 expressing the % of bits set once all values have been recorded.</param>
+        /// <returns>The size of the set nearest to the required size.</returns>
         public static int GetNearestSetSize(int maxNumberOfValuesExpected,
             float desiredSaturation)
         {
@@ -156,9 +156,10 @@ namespace Lucene.Net.Codecs.Bloom
 
         /// <summary>
         /// The main method required for a Bloom filter which, given a value determines set membership.
-        /// Unlike a conventional set, the fuzzy set returns NO or MAYBE rather than true or false.
+        /// Unlike a conventional set, the fuzzy set returns <see cref="ContainsResult.NO"/> or 
+        /// <see cref="ContainsResult.MAYBE"/> rather than <c>true</c> or <c>false</c>.
         /// </summary>
-        /// <returns>NO or MAYBE</returns>
+        /// <returns><see cref="ContainsResult.NO"/> or <see cref="ContainsResult.MAYBE"/></returns>
         public virtual ContainsResult Contains(BytesRef value)
         {
             var hash = _hashFunction.Hash(value);
@@ -170,23 +171,23 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        ///  Serializes the data set to file using the following format:
-        ///  <ul>
-        ///   <li>FuzzySet --&gt;FuzzySetVersion,HashFunctionName,BloomSize,
-        ///  NumBitSetWords,BitSetWord<sup>NumBitSetWords</sup></li> 
-        ///  <li>HashFunctionName --&gt; {@link DataOutput#writeString(String) String} The
-        ///  name of a ServiceProvider registered {@link HashFunction}</li>
-        ///  <li>FuzzySetVersion --&gt; {@link DataOutput#writeInt Uint32} The version number of the {@link FuzzySet} class</li>
-        ///  <li>BloomSize --&gt; {@link DataOutput#writeInt Uint32} The modulo value used
-        ///  to project hashes into the field's Bitset</li>
-        ///  <li>NumBitSetWords --&gt; {@link DataOutput#writeInt Uint32} The number of
-        ///  longs (as returned from {@link FixedBitSet#getBits})</li>
-        ///  <li>BitSetWord --&gt; {@link DataOutput#writeLong Long} A long from the array
-        ///  returned by {@link FixedBitSet#getBits}</li>
-        ///  </ul>
-        ///  @param out Data output stream
-        ///  @ If there is a low-level I/O error
+        /// Serializes the data set to file using the following format:
+        /// <list type="bullet">
+        ///     <item><description>FuzzySet --&gt;FuzzySetVersion,HashFunctionName,BloomSize,
+        ///         NumBitSetWords,BitSetWord<sup>NumBitSetWords</sup></description></item> 
+        ///     <item><description>HashFunctionName --&gt; String (<see cref="DataOutput.WriteString(string)"/>) The
+        ///         name of a ServiceProvider registered <see cref="HashFunction"/></description></item>
+        ///     <item><description>FuzzySetVersion --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The version number of the {@link FuzzySet} class</description></item>
+        ///     <item><description>BloomSize --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The modulo value used
+        ///         to project hashes into the field's Bitset</description></item>
+        ///     <item><description>NumBitSetWords --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The number of
+        ///         longs (as returned from <see cref="FixedBitSet.GetBits()"/>)</description></item>
+        ///     <item><description>BitSetWord --&gt; Long (<see cref="DataOutput.WriteInt64(long)"/>) A long from the array
+        ///         returned by <see cref="FixedBitSet.GetBits()"/></description></item>
+        /// </list>
         /// </summary>
+        /// <param name="output">Data output stream.</param>
+        /// <exception cref="System.IO.IOException">If there is a low-level I/O error.</exception>
         public virtual void Serialize(DataOutput output)
         {
             output.WriteInt32(VERSION_CURRENT);
@@ -232,7 +233,8 @@ namespace Lucene.Net.Codecs.Bloom
         /// Records a value in the set. The referenced bytes are hashed and then modulo n'd where n is the
         /// chosen size of the internal bitset.
         /// </summary>
-        /// <param name="value">The Key value to be hashed</param>
+        /// <param name="value">The Key value to be hashed.</param>
+        /// <exception cref="System.IO.IOException">If there is a low-level I/O error.</exception>
         public virtual void AddValue(BytesRef value)
         {
             var hash = _hashFunction.Hash(value);
@@ -249,7 +251,7 @@ namespace Lucene.Net.Codecs.Bloom
         /// A number between 0 and 1 describing the % of bits that would ideally be set in the result. 
         /// Lower values have better accuracy but require more space.
         /// </param>
-        /// <return>A smaller FuzzySet or null if the current set is already over-saturated</return>
+        /// <return>A smaller <see cref="FuzzySet"/> or <c>null</c> if the current set is already over-saturated.</return>
         public virtual FuzzySet Downsize(float targetMaxSaturation)
         {
             var numBitsSet = _filter.Cardinality();
@@ -295,7 +297,9 @@ namespace Lucene.Net.Codecs.Bloom
             return GetEstimatedNumberUniqueValuesAllowingForCollisions(_bloomSize, _filter.Cardinality());
         }
 
-        // Given a set size and a the number of set bits, produces an estimate of the number of unique values recorded
+        /// <summary>
+        /// Given a <paramref name="setSize"/> and a the number of set bits, produces an estimate of the number of unique values recorded.
+        /// </summary>
         public static int GetEstimatedNumberUniqueValuesAllowingForCollisions(
             int setSize, int numRecordedBits)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/HashFunction.cs b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
index 9be4889..d523d70 100644
--- a/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
+++ b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
@@ -22,18 +22,18 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// Base class for hashing functions that can be referred to by name.
     /// Subclasses are expected to provide threadsafe implementations of the hash function
-    /// on the range of bytes referenced in the provided {@link BytesRef}
-    /// 
+    /// on the range of bytes referenced in the provided <see cref="BytesRef"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class HashFunction
     {
 
         /// <summary>
-        /// Hashes the contents of the referenced bytes
-        /// @param bytes the data to be hashed
-        /// @return the hash of the bytes referenced by bytes.offset and length bytes.length
+        /// Hashes the contents of the referenced bytes.
         /// </summary>
+        /// <param name="bytes">The data to be hashed.</param>
+        /// <returns>The hash of the bytes referenced by bytes.offset and length bytes.Length.</returns>
         public abstract int Hash(BytesRef bytes);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
index 83504fc..5e4254e 100644
--- a/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
+++ b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// This is a very fast, non-cryptographic hash suitable for general hash-based
     /// lookup. See http://murmurhash.googlepages.com/ for more details.
-    ///
+    /// <para/>
     /// The C version of MurmurHash 2.0 found at that site was ported to Java by
     /// Andrzej Bialecki (ab at getopt org).
-    ///
-    ///  The code from getopt.org was adapted by Mark Harwood in the form here as one of a pluggable choice of 
-    /// hashing functions as the core function had to be adapted to work with BytesRefs with offsets and lengths
+    /// <para/>
+    /// The code from getopt.org was adapted by Mark Harwood in the form here as one of a pluggable choice of 
+    /// hashing functions as the core function had to be adapted to work with <see cref="BytesRef"/>s with offsets and lengths
     /// rather than raw byte arrays.  
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class MurmurHash2 : HashFunction
@@ -89,10 +89,10 @@ namespace Lucene.Net.Codecs.Bloom
         /// <summary>
         /// Generates 32 bit hash from byte array with default seed value.
         /// </summary>
-        /// <param name="data">byte array to hash</param>
-        /// <param name="offset">the start position in the array to hash</param>
-        /// <param name="len">length of the array elements to hash</param>
-        /// <returns>32 bit hash of the given array</returns>
+        /// <param name="data">Byte array to hash.</param>
+        /// <param name="offset">The start position in the array to hash.</param>
+        /// <param name="len">Length of the array elements to hash.</param>
+        /// <returns>32 bit hash of the given array.</returns>
         public static int Hash32(byte[] data, int offset, int len)
         {
             return Hash(data, unchecked((int)0x9747b28c), offset, len);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
index e121942..4ff48ab 100644
--- a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Codecs.DiskDV
     /// <summary>
     /// DocValues format that keeps most things on disk.
     /// Only things like disk offsets are loaded into ram.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [DocValuesFormatName("Disk")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
index 4ba4e5e..01a1949 100644
--- a/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
+++ b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Codecs.DiskDV
      */
 
     /// <summary>
-    /// Norms format that keeps all norms on disk
+    /// Norms format that keeps all norms on disk.
     /// </summary>
     public sealed class DiskNormsFormat : NormsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
index 0d9ec7e..ce02b7e 100644
--- a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
@@ -21,21 +21,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
+
 
     /// <summary>
     /// Abstract base class that reads fixed-size blocks of ints
-    ///  from an IndexInput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexInput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// from an <see cref="IndexInput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexInput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexInput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was FixedIntBlockIndexInput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead.
+    /// </remarks>
     public abstract class FixedInt32BlockIndexInput : Int32IndexInput
     {
         private readonly IndexInput input;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
index a0d7eb7..bc2a0c7 100644
--- a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
@@ -21,24 +21,22 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-
-    // Naive int block API that writes vInts.  This is
-    //  expected to give poor performance; it's really only for
-    //  testing the pluggability.  One should typically use pfor instead. 
-
-
-
     /// <summary>
     /// Abstract base class that writes fixed-size blocks of ints
-    ///  to an IndexOutput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexOutput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// to an <see cref="IndexOutput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexOutput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexOutput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was FixedIntBlockIndexOutput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead.
+    /// </remarks>
     public abstract class FixedInt32BlockIndexOutput : Int32IndexOutput
     {
         protected readonly IndexOutput m_output;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
index 3bae6fd..acb7a1f 100644
--- a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
@@ -23,23 +23,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
-
     // TODO: much of this can be shared code w/ the fixed case
 
     /// <summary>
     /// Abstract base class that reads variable-size blocks of ints
-    /// from an IndexInput.  While this is a simple approach, a
+    /// from an <see cref="IndexInput"/>.  While this is a simple approach, a
     /// more performant approach would directly create an impl
-    /// of IntIndexInput inside Directory.  Wrapping a generic
-    /// IndexInput will likely cost performance.
+    /// of <see cref="Int32IndexInput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexInput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was VariableIntBlockIndexInput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead. 
+    /// </remarks>
     public abstract class VariableInt32BlockIndexInput : Int32IndexInput
     {
         private readonly IndexInput input;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
index f26a3bc..0b4eb5f 100644
--- a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
@@ -21,26 +21,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
-
-
-
     // TODO: much of this can be shared code w/ the fixed case
 
     /// <summary>
     /// Abstract base class that writes variable-size blocks of ints
-    ///  to an IndexOutput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexOutput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// to an <see cref="IndexOutput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexOutput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexOutput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was VariableIntBlockIndexOutput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead. 
+    /// </remarks>
     public abstract class VariableInt32BlockIndexOutput : Int32IndexOutput
     {
         protected readonly IndexOutput m_output;
@@ -52,10 +50,10 @@ namespace Lucene.Net.Codecs.IntBlock
         // if its less than 128 we should set that as max and use byte?
 
         /// <summary>
-        /// NOTE: maxBlockSize must be the maximum block size 
-        ///  plus the max non-causal lookahead of your codec.  EG Simple9
-        ///  requires lookahead=1 because on seeing the Nth value
-        ///  it knows it must now encode the N-1 values before it. 
+        /// NOTE: <paramref name="maxBlockSize"/> must be the maximum block size 
+        /// plus the max non-causal lookahead of your codec.  EG Simple9
+        /// requires lookahead=1 because on seeing the Nth value
+        /// it knows it must now encode the N-1 values before it. 
         /// </summary>
         protected VariableInt32BlockIndexOutput(IndexOutput output, int maxBlockSize)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
index 0b8f258..ca2c405 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Codecs.Memory
     using SegmentWriteState = Index.SegmentWriteState;
 
     /// <summary>
-    /// Writer for <seealso cref="DirectDocValuesFormat"/>
+    /// Writer for <see cref="DirectDocValuesFormat"/>.
     /// </summary>
     internal class DirectDocValuesConsumer : DocValuesConsumer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
index e5a9312..959906c 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
@@ -23,27 +23,27 @@
 
     /// <summary>
     /// In-memory docvalues format that does no (or very little)
-    ///  compression.  Indexed values are stored on disk, but
-    ///  then at search time all values are loaded into memory as
-    ///  simple java arrays.  For numeric values, it uses
-    ///  byte[], short[], int[], long[] as necessary to fit the
-    ///  range of the values.  For binary values, there is an int
-    ///  (4 bytes) overhead per value.
+    /// compression.  Indexed values are stored on disk, but
+    /// then at search time all values are loaded into memory as
+    /// simple .NET arrays.  For numeric values, it uses
+    /// byte[], short[], int[], long[] as necessary to fit the
+    /// range of the values.  For binary values, there is an <see cref="int"/>
+    /// (4 bytes) overhead per value.
     /// 
-    ///  <para>Limitations:
-    ///  <ul>
-    ///    <li>For binary and sorted fields the total space
+    /// <para>Limitations:
+    /// <list type="bullet">
+    ///    <item><description>For binary and sorted fields the total space
     ///        required for all binary values cannot exceed about
-    ///        2.1 GB (see #MAX_TOTAL_BYTES_LENGTH).</li>
+    ///        2.1 GB (see <see cref="MAX_TOTAL_BYTES_LENGTH"/>).</description></item>
     /// 
-    ///    <li>For sorted set fields, the sum of the size of each
+    ///    <item><description>For sorted set fields, the sum of the size of each
     ///        document's set of values cannot exceed about 2.1 B
-    ///        values (see #MAX_SORTED_SET_ORDS).  For example,
+    ///        values (see <see cref="MAX_SORTED_SET_ORDS"/>).  For example,
     ///        if every document has 10 values (10 instances of
-    ///        <seealso cref="SortedSetDocValuesField"/>) added, then no
+    ///        <see cref="Documents.SortedSetDocValuesField"/>) added, then no
     ///        more than ~210 M documents can be added to one
-    ///        segment. </li>
-    ///  </ul> 
+    ///        segment. </description></item>
+    /// </list> 
     /// </para>
     /// </summary>
     [DocValuesFormatName("Direct")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
@@ -51,14 +51,14 @@
     {
         /// <summary>
         /// The sum of all byte lengths for binary field, or for
-        ///  the unique values in sorted or sorted set fields, cannot
-        ///  exceed this. 
+        /// the unique values in sorted or sorted set fields, cannot
+        /// exceed this. 
         /// </summary>
         public static readonly int MAX_TOTAL_BYTES_LENGTH = ArrayUtil.MAX_ARRAY_LENGTH;
 
         /// <summary>
         /// The sum of the number of values across all documents
-        ///  in a sorted set field cannot exceed this. 
+        /// in a sorted set field cannot exceed this. 
         /// </summary>
         public static readonly int MAX_SORTED_SET_ORDS = ArrayUtil.MAX_ARRAY_LENGTH;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
index a216109..fe4c95a 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Codecs.Memory
 	 */
 
     /// <summary>
-    /// TextReader for <seealso cref="DirectDocValuesFormat"/>
+    /// TextReader for <see cref="DirectDocValuesFormat"/>.
     /// </summary>
     internal class DirectDocValuesProducer : DocValuesProducer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
index 95211d2..084c733 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
@@ -51,27 +51,26 @@ namespace Lucene.Net.Codecs.Memory
     //   - or: longer dense skip lists than just next byte?
 
     /// <summary>
-    /// Wraps <seealso cref="Lucene41PostingsFormat"/> format for on-disk
-    ///  storage, but then at read time loads and stores all
-    ///  terms & postings directly in RAM as byte[], int[].
+    /// Wraps <see cref="Lucene41.Lucene41PostingsFormat"/> format for on-disk
+    /// storage, but then at read time loads and stores all
+    /// terms &amp; postings directly in RAM as byte[], int[].
     /// 
-    ///  <para><b>WARNING</b>: This is
-    ///  exceptionally RAM intensive: it makes no effort to
-    ///  compress the postings data, storing terms as separate
-    ///  byte[] and postings as separate int[], but as a result it 
-    ///  gives substantial increase in search performance.
+    /// <para><b>WARNING</b>: This is
+    /// exceptionally RAM intensive: it makes no effort to
+    /// compress the postings data, storing terms as separate
+    /// byte[] and postings as separate int[], but as a result it 
+    /// gives substantial increase in search performance.
     /// 
     /// </para>
-    ///  <para>This postings format supports <seealso cref="TermsEnum#ord"/>
-    ///  and <seealso cref="TermsEnum#seekExact(long)"/>.
+    /// <para>This postings format supports <see cref="TermsEnum.Ord"/>
+    /// and <see cref="TermsEnum.SeekExact(long)"/>.
     /// 
     /// </para>
-    ///  <para>Because this holds all term bytes as a single
-    ///  byte[], you cannot have more than 2.1GB worth of term
-    ///  bytes in a single segment.
-    /// 
-    /// @lucene.experimental 
+    /// <para>Because this holds all term bytes as a single
+    /// byte[], you cannot have more than 2.1GB worth of term
+    /// bytes in a single segment.
     /// </para>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("Direct")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class DirectPostingsFormat : PostingsFormat
@@ -90,11 +89,11 @@ namespace Lucene.Net.Codecs.Memory
         }
 
         /// <summary>
-        /// minSkipCount is how many terms in a row must have the
-        ///  same prefix before we put a skip pointer down.  Terms
-        ///  with docFreq less than or equal lowFreqCutoff will use a single int[]
-        ///  to hold all docs, freqs, position and offsets; terms
-        ///  with higher docFreq will use separate arrays. 
+        /// <paramref name="minSkipCount"/> is how many terms in a row must have the
+        /// same prefix before we put a skip pointer down.  Terms
+        /// with docFreq less than or equal <paramref name="lowFreqCutoff"/> will use a single int[]
+        /// to hold all docs, freqs, position and offsets; terms
+        /// with higher docFreq will use separate arrays. 
         /// </summary>
         public DirectPostingsFormat(int minSkipCount, int lowFreqCutoff) 
             : base()
@@ -207,7 +206,7 @@ namespace Lucene.Net.Codecs.Memory
                 private int[] skips;
 
                 /// <summary>
-                /// Returns the approximate number of RAM bytes used </summary>
+                /// Returns the approximate number of RAM bytes used. </summary>
                 public abstract long RamBytesUsed();
             }
 
@@ -362,7 +361,7 @@ namespace Lucene.Net.Codecs.Memory
             private readonly int minSkipCount;
 
             /// <summary>
-            /// NOTE: This was IntArrayWriter in Lucene
+            /// NOTE: This was IntArrayWriter in Lucene.
             /// </summary>
             private sealed class Int32ArrayWriter
             {
@@ -647,7 +646,7 @@ namespace Lucene.Net.Codecs.Memory
                 Debug.Assert(skipOffset == skipCount);
             }
 
-            /// <summary>Returns approximate RAM bytes used </summary>
+            /// <summary>Returns approximate RAM bytes used. </summary>
             public long RamBytesUsed()
             {
                 long sizeInBytes = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
index 9cc8776..01826bb 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
@@ -26,7 +26,8 @@
 
     /// <summary>
     /// FSTOrd + Pulsing41
-    ///  @lucene.experimental 
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("FSTOrdPulsing41")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class FSTOrdPulsing41PostingsFormat : PostingsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
index 9d7e778..84ad53c 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based terms dictionary reader.
-    /// 
+    /// <para/>
     /// The FST index maps each term and its ord, and during seek 
     /// the ord is used fetch metadata from a single block.
     /// The term dictionary is fully memory resident.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FSTOrdTermsReader : FieldsProducer
@@ -377,7 +377,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Decodes stats data into term state </summary>
+                /// Decodes stats data into term state. </summary>
                 internal virtual void DecodeStats()
                 {
                     int upto = (int)ord % INTERVAL;
@@ -392,7 +392,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Let PBF decode metadata </summary>
+                /// Let PBF decode metadata. </summary>
                 internal virtual void DecodeMetaData()
                 {
                     int upto = (int)ord % INTERVAL;
@@ -407,7 +407,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load current stats shard </summary>
+                /// Load current stats shard. </summary>
                 internal void RefillStats()
                 {
                     var offset = statsBlockOrd * outerInstance.numSkipInfo;
@@ -436,7 +436,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load current metadata shard </summary>
+                /// Load current metadata shard. </summary>
                 internal void RefillMetadata()
                 {
                     var offset = metaBlockOrd * outerInstance.numSkipInfo;
@@ -604,10 +604,10 @@ namespace Lucene.Net.Codecs.Memory
             {
                 private readonly FSTOrdTermsReader.TermsReader outerInstance;
 
-                /// <summary>True when current term's metadata is decoded</summary>
+                /// <summary>True when current term's metadata is decoded.</summary>
                 private bool decoded;
 
-                /// <summary>True when there is pending term when calling Next()</summary>
+                /// <summary>True when there is pending term when calling <see cref="Next()"/>.</summary>
                 private bool pending;
 
                 /// <summary>
@@ -624,7 +624,7 @@ namespace Lucene.Net.Codecs.Memory
                 private readonly FST.BytesReader fstReader;
                 private readonly Outputs<long?> fstOutputs;
 
-                /// <summary>query automaton to intersect with</summary>
+                /// <summary>Query automaton to intersect with.</summary>
                 private readonly ByteRunAutomaton fsa;
 
                 private sealed class Frame
@@ -792,7 +792,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Virtual frame, never pop </summary>
+                /// Virtual frame, never pop. </summary>
                 private Frame LoadVirtualFrame(Frame frame)
                 {
                     frame.arc.Output = fstOutputs.NoOutput;
@@ -802,7 +802,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for start arc(node) on fst </summary>
+                /// Load frame for start arc(node) on fst. </summary>
                 private Frame LoadFirstFrame(Frame frame)
                 {
                     frame.arc = fst.GetFirstArc(frame.arc);
@@ -811,7 +811,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for target arc(node) on fst </summary>
+                /// Load frame for target arc(node) on fst. </summary>
                 private Frame LoadExpandFrame(Frame top, Frame frame)
                 {
                     if (!CanGrow(top))
@@ -829,7 +829,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for sibling arc(node) on fst </summary>
+                /// Load frame for sibling arc(node) on fst. </summary>
                 private Frame LoadNextFrame(Frame top, Frame frame)
                 {
                     if (!CanRewind(frame))
@@ -855,7 +855,7 @@ namespace Lucene.Net.Codecs.Memory
 
                 /// <summary>
                 /// Load frame for target arc(node) on fst, so that 
-                ///  arc.label >= label and !fsa.reject(arc.label) 
+                /// arc.label >= label and !fsa.reject(arc.label) 
                 /// </summary>
                 private Frame LoadCeilFrame(int label, Frame top, Frame frame)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
index ecd90db..888cb12 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
@@ -38,22 +38,24 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based term dict, using ord as FST output.
-    /// 
+    /// <para/>
     /// The FST holds the mapping between &lt;term, ord&gt;, and 
     /// term's metadata is delta encoded into a single byte block.
-    /// 
+    /// <para/>
     /// Typically the byte block consists of four parts:
-    /// 1. term statistics: docFreq, totalTermFreq;
-    /// 2. monotonic long[], e.g. the pointer to the postings list for that term;
-    /// 3. generic byte[], e.g. other information customized by postings base.
-    /// 4. single-level skip list to speed up metadata decoding by ord.
-    /// 
+    /// <list type="number">
+    ///     <item><description>term statistics: docFreq, totalTermFreq;</description></item>
+    ///     <item><description>monotonic long[], e.g. the pointer to the postings list for that term;</description></item>
+    ///     <item><description>generic byte[], e.g. other information customized by postings base.</description></item>
+    ///     <item><description>single-level skip list to speed up metadata decoding by ord.</description></item>
+    /// </list>
+    /// <para/>
     /// <para>
     /// Files:
-    /// <ul>
-    ///  <li><tt>.tix</tt>: <a href="#Termindex">Term Index</a></li>
-    ///  <li><tt>.tbk</tt>: <a href="#Termblock">Term Block</a></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///  <item><description><c>.tix</c>: <a href="#Termindex">Term Index</a></description></item>
+    ///  <item><description><c>.tbk</c>: <a href="#Termblock">Term Block</a></description></item>
+    /// </list>
     /// </para>
     /// 
     /// <a name="Termindex" id="Termindex"></a>
@@ -63,76 +65,76 @@ namespace Lucene.Net.Codecs.Memory
     ///  The FST maps a term to its corresponding order in current field.
     /// </para>
     /// 
-    /// <ul>
-    ///  <li>TermIndex(.tix) --&gt; Header, TermFST<sup>NumFields</sup>, Footer</li>
-    ///  <li>TermFST --&gt; <seealso cref="FST"/></li>
-    ///  <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///  <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///  <item><description>TermIndex(.tix) --&gt; Header, TermFST<sup>NumFields</sup>, Footer</description></item>
+    ///  <item><description>TermFST --&gt; <see cref="FST{T}"/></description></item>
+    ///  <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///  <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
     /// 
     /// <para>Notes:</para>
-    /// <ul>
-    ///  <li>
+    /// <list type="bullet">
+    ///  <item><description>
     ///  Since terms are already sorted before writing to <a href="#Termblock">Term Block</a>, 
     ///  their ords can directly used to seek term metadata from term block.
-    ///  </li>
-    /// </ul>
+    ///  </description></item>
+    /// </list>
     /// 
     /// <a name="Termblock" id="Termblock"></a>
     /// <h3>Term Block</h3>
     /// <para>
-    ///  The .tbk contains all the statistics and metadata for terms, along with field summary (e.g. 
-    ///  per-field data like number of documents in current field). For each field, there are four blocks:
-    ///  <ul>
-    ///   <li>statistics bytes block: contains term statistics; </li>
-    ///   <li>metadata longs block: delta-encodes monotonic part of metadata; </li>
-    ///   <li>metadata bytes block: encodes other parts of metadata; </li>
-    ///   <li>skip block: contains skip data, to speed up metadata seeking and decoding</li>
-    ///  </ul>
+    /// The .tbk contains all the statistics and metadata for terms, along with field summary (e.g. 
+    /// per-field data like number of documents in current field). For each field, there are four blocks:
+    /// <list type="bullet">
+    ///   <item><description>statistics bytes block: contains term statistics; </description></item>
+    ///   <item><description>metadata longs block: delta-encodes monotonic part of metadata; </description></item>
+    ///   <item><description>metadata bytes block: encodes other parts of metadata; </description></item>
+    ///   <item><description>skip block: contains skip data, to speed up metadata seeking and decoding</description></item>
+    /// </list>
     /// </para>
     /// 
     /// <para>File Format:</para>
-    /// <ul>
-    ///  <li>TermBlock(.tbk) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</li>
-    ///  <li>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, SumDocFreq,
-    ///                                         DocCount, LongsSize, DataBlock &gt; <sup>NumFields</sup>, Footer</li>
+    /// <list type="bullet">
+    ///  <item><description>TermBlock(.tbk) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</description></item>
+    ///  <item><description>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, SumDocFreq,
+    ///                                         DocCount, LongsSize, DataBlock &gt; <sup>NumFields</sup>, Footer</description></item>
     /// 
-    ///  <li>DataBlock --&gt; StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength, 
-    ///                       SkipBlock, StatsBlock, MetaLongsBlock, MetaBytesBlock </li>
-    ///  <li>SkipBlock --&gt; &lt; StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, 
-    ///                            MetaLongsSkipDelta<sup>LongsSize</sup> &gt;<sup>NumTerms</sup></li>
-    ///  <li>StatsBlock --&gt; &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) ? &gt; <sup>NumTerms</sup></li>
-    ///  <li>MetaLongsBlock --&gt; &lt; LongDelta<sup>LongsSize</sup>, BytesSize &gt; <sup>NumTerms</sup></li>
-    ///  <li>MetaBytesBlock --&gt; Byte <sup>MetaBytesBlockLength</sup></li>
-    ///  <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///  <li>DirOffset --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    ///  <li>NumFields, FieldNumber, DocCount, DocFreq, LongsSize, 
-    ///        FieldNumber, DocCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///  <li>NumTerms, SumTotalTermFreq, SumDocFreq, StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength,
+    ///  <item><description>DataBlock --&gt; StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength, 
+    ///                       SkipBlock, StatsBlock, MetaLongsBlock, MetaBytesBlock </description></item>
+    ///  <item><description>SkipBlock --&gt; &lt; StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, 
+    ///                            MetaLongsSkipDelta<sup>LongsSize</sup> &gt;<sup>NumTerms</sup></description></item>
+    ///  <item><description>StatsBlock --&gt; &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) ? &gt; <sup>NumTerms</sup></description></item>
+    ///  <item><description>MetaLongsBlock --&gt; &lt; LongDelta<sup>LongsSize</sup>, BytesSize &gt; <sup>NumTerms</sup></description></item>
+    ///  <item><description>MetaBytesBlock --&gt; Byte <sup>MetaBytesBlockLength</sup></description></item>
+    ///  <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///  <item><description>DirOffset --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///  <item><description>NumFields, FieldNumber, DocCount, DocFreq, LongsSize, 
+    ///        FieldNumber, DocCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///  <item><description>NumTerms, SumTotalTermFreq, SumDocFreq, StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength,
     ///        StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, MetaLongsSkipStart, TotalTermFreq, 
-    ///        LongDelta,--&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///  <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
+    ///        LongDelta,--&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    ///  <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
     /// <para>Notes: </para>
-    /// <ul>
-    ///  <li>
+    /// <list type="bullet">
+    ///  <item><description>
     ///   The format of PostingsHeader and MetaBytes are customized by the specific postings implementation:
     ///   they contain arbitrary per-file data (such as parameters or versioning information), and per-term data 
     ///   (non-monotonic ones like pulsed postings data).
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   During initialization the reader will load all the blocks into memory. SkipBlock will be decoded, so that during seek
     ///   term dict can lookup file pointers directly. StatsFPDelta, MetaLongsSkipFPDelta, etc. are file offset
     ///   for every SkipInterval's term. MetaLongsSkipDelta is the difference from previous one, which indicates
     ///   the value of preceding metadata longs for every SkipInterval's term.
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   DocFreq is the count of documents which contain the term. TotalTermFreq is the total number of occurrences of the term. 
     ///   Usually these two values are the same for long tail terms, therefore one bit is stole from DocFreq to check this case,
     ///   so that encoding of TotalTermFreq may be omitted.
-    ///  </li>
-    /// </ul>
-    /// 
+    ///  </description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public class FSTOrdTermsWriter : FieldsConsumer
@@ -253,7 +255,7 @@ namespace Lucene.Net.Codecs.Memory
             public long SumDocFreq { get; set; }
             public int DocCount { get; set; }
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             public int Int64sSize { get; set; }
             public FST<long?> Dict { get; set; }
@@ -266,7 +268,7 @@ namespace Lucene.Net.Codecs.Memory
             public RAMOutputStream StatsOut { get; set; }
             // vint encode monotonic long[] and length for corresponding byte[]
             /// <summary>
-            /// NOTE: This was metaLongsOut (field) in Lucene
+            /// NOTE: This was metaLongsOut (field) in Lucene.
             /// </summary>
             public RAMOutputStream MetaInt64sOut { get; set; }
             // generic byte[]


[20/48] lucenenet git commit: Lucene.Net.Queries: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Queries: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/e4c37d39
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/e4c37d39
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/e4c37d39

Branch: refs/heads/master
Commit: e4c37d3972f7292091bf4249e1ffd379dcd6d61b
Parents: bed2088
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 04:06:13 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 04:06:13 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Queries/CustomScoreProvider.cs                  | 4 ++--
 src/Lucene.Net.Queries/CustomScoreQuery.cs                     | 4 ++--
 .../Function/ValueSources/ByteFieldSource.cs                   | 2 +-
 .../Function/ValueSources/DoubleFieldSource.cs                 | 2 +-
 .../Function/ValueSources/EnumFieldSource.cs                   | 2 +-
 .../Function/ValueSources/FloatFieldSource.cs                  | 2 +-
 src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs | 2 +-
 .../Function/ValueSources/LongFieldSource.cs                   | 2 +-
 .../Function/ValueSources/ReverseOrdFieldSource.cs             | 2 +-
 .../Function/ValueSources/ShortFieldSource.cs                  | 2 +-
 src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs                     | 6 +++---
 11 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/CustomScoreProvider.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/CustomScoreProvider.cs b/src/Lucene.Net.Queries/CustomScoreProvider.cs
index bef2eb1..75619ad 100644
--- a/src/Lucene.Net.Queries/CustomScoreProvider.cs
+++ b/src/Lucene.Net.Queries/CustomScoreProvider.cs
@@ -52,7 +52,7 @@ namespace Lucene.Net.Queries
         /// Subclasses can override this method to modify the custom score.  
         /// <para/>
         /// If your custom scoring is different than the default herein you 
-        /// should override at least one of the two <see cref="CustomScore"/> methods.
+        /// should override at least one of the two <see cref="CustomScore(int, float, float)"/> methods.
         /// If the number of <see cref="Function.FunctionQuery"/>s is always &lt; 2 it is 
         /// sufficient to override the other 
         /// <see cref="CustomScore(int, float, float)"/> 
@@ -86,7 +86,7 @@ namespace Lucene.Net.Queries
         /// Subclasses can override this method to modify the custom score.
         /// <para/>
         /// If your custom scoring is different than the default herein you 
-        /// should override at least one of the two <see cref="CustomScore"/> methods.
+        /// should override at least one of the two <see cref="CustomScore(int, float, float)"/> methods.
         /// If the number of <see cref="Function.FunctionQuery"/>s is always &lt; 2 it is 
         /// sufficient to override this <see cref="CustomScore(int, float, float)"/> method, which is simpler. 
         /// <para/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/CustomScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/CustomScoreQuery.cs b/src/Lucene.Net.Queries/CustomScoreQuery.cs
index e997f3c..d7bcaba 100644
--- a/src/Lucene.Net.Queries/CustomScoreQuery.cs
+++ b/src/Lucene.Net.Queries/CustomScoreQuery.cs
@@ -29,8 +29,8 @@ namespace Lucene.Net.Queries
     /// <summary>
     /// Query that sets document score as a programmatic function of several (sub) scores:
     /// <list type="bullet">
-    ///    <item>the score of its subQuery (any query)</description></item>
-    ///    <item>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</description></item>
+    ///    <item><description>the score of its subQuery (any query)</description></item>
+    ///    <item><description>(optional) the score of its <see cref="FunctionQuery"/> (or queries).</description></item>
     /// </list>
     /// Subclasses can modify the computation by overriding <see cref="GetCustomScoreProvider"/>.
     /// 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/ByteFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/ByteFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/ByteFieldSource.cs
index e7aa7c4..290c3a9 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/ByteFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/ByteFieldSource.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
     
     /// <summary>
     /// Obtains <see cref="int"/> field values from the <see cref="Search.FieldCache"/>
-    /// using <see cref="IFieldCache.GetInt32s"/>
+    /// using <see cref="IFieldCache.GetInt32s(AtomicReader, string, FieldCache.IInt32Parser, bool)"/>
     /// and makes those values available as other numeric types, casting as needed. *
     /// </summary>
     [Obsolete]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/DoubleFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/DoubleFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/DoubleFieldSource.cs
index c986142..3534f1f 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/DoubleFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/DoubleFieldSource.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains <see cref="double"/> field values from <see cref="IFieldCache.GetDoubles"/> and makes
+    /// Obtains <see cref="double"/> field values from <see cref="IFieldCache.GetDoubles(AtomicReader, string, FieldCache.IDoubleParser, bool)"/> and makes
     /// those values available as other numeric types, casting as needed.
     /// </summary>
     public class DoubleFieldSource : FieldCacheSource

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
index bb4f4d8..cddbb4c 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/EnumFieldSource.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains <see cref="int"/> field values from <see cref="IFieldCache.GetInt32s"/> and makes
+    /// Obtains <see cref="int"/> field values from <see cref="IFieldCache.GetInt32s(AtomicReader, string, FieldCache.IInt32Parser, bool)"/> and makes
     /// those values available as other numeric types, casting as needed.
     /// StrVal of the value is not the <see cref="int"/> value, but its <see cref="string"/> (displayed) value
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/FloatFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/FloatFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/FloatFieldSource.cs
index 26584e3..6f90cf6 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/FloatFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/FloatFieldSource.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains <see cref="float"/> field values from <see cref="IFieldCache.GetFloats"/> and makes those
+    /// Obtains <see cref="float"/> field values from <see cref="IFieldCache.GetSingles(AtomicReader, string, FieldCache.ISingleParser, bool)"/> and makes those
     /// values available as other numeric types, casting as needed.
     /// <para/>
     /// NOTE: This was FloatFieldSource in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs
index d021256..7498cda 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/IntFieldSource.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains <see cref="int"/> field values from <see cref="IFieldCache.GetInt32s"/> and makes those
+    /// Obtains <see cref="int"/> field values from <see cref="IFieldCache.GetInt32s(AtomicReader, string, FieldCache.IInt32Parser, bool)"/> and makes those
     /// values available as other numeric types, casting as needed.
     /// <para/>
     /// NOTE: This was IntFieldSource in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/LongFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/LongFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/LongFieldSource.cs
index 6c48b86..959da29 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/LongFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/LongFieldSource.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains <see cref="long"/> field values from <see cref="IFieldCache.GetInt64s"/> and makes those
+    /// Obtains <see cref="long"/> field values from <see cref="IFieldCache.GetInt64s(AtomicReader, string, FieldCache.IInt64Parser, bool)"/> and makes those
     /// values available as other numeric types, casting as needed.
     /// <para/>
     /// NOTE: This was LongFieldSource in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/ReverseOrdFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/ReverseOrdFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/ReverseOrdFieldSource.cs
index 3374c0c..e212dec 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/ReverseOrdFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/ReverseOrdFieldSource.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
      */
 
     /// <summary>
-    /// Obtains the ordinal of the field value from the default Lucene <see cref="FieldCache"/> using <see cref="IFieldCache.GetTermsIndex"/>
+    /// Obtains the ordinal of the field value from the default Lucene <see cref="FieldCache"/> using <see cref="IFieldCache.GetTermsIndex(AtomicReader, string, float)"/>
     /// and reverses the order.
     /// <para/>
     /// The native lucene index order is used to assign an ordinal value for each field value.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Function/ValueSources/ShortFieldSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Function/ValueSources/ShortFieldSource.cs b/src/Lucene.Net.Queries/Function/ValueSources/ShortFieldSource.cs
index dab833f..04fa3fd 100644
--- a/src/Lucene.Net.Queries/Function/ValueSources/ShortFieldSource.cs
+++ b/src/Lucene.Net.Queries/Function/ValueSources/ShortFieldSource.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Queries.Function.ValueSources
 
     /// <summary>
     /// Obtains <see cref="short"/> field values from the <see cref="FieldCache"/>
-    /// using <see cref="IFieldCache.GetInt16s"/>
+    /// using <see cref="IFieldCache.GetInt16s(AtomicReader, string, FieldCache.IInt16Parser, bool)"/>
     /// and makes those values available as other numeric types, casting as needed.
     /// <para/>
     /// NOTE: This was ShortFieldSource in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e4c37d39/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
index f9d0312..f9effad 100644
--- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
+++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs
@@ -89,7 +89,7 @@ namespace Lucene.Net.Queries.Mlt
     ///     <item><description>do your normal, Lucene setup for searching,</description></item>
     ///     <item><description>create a MoreLikeThis,</description></item>
     ///     <item><description>get the text of the doc you want to find similarities to</description></item>
-    ///     <item><description>then call one of the <see cref="Like"/> calls to generate a similarity query</description></item>
+    ///     <item><description>then call one of the <see cref="Like(TextReader, string)"/> calls to generate a similarity query</description></item>
     ///     <item><description>call the searcher to find the similar docs</description></item>
     /// </list>
     /// <para/>
@@ -659,13 +659,13 @@ namespace Lucene.Net.Queries.Mlt
         /// </list>
         /// This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
         /// This method is exposed so that you can identify the "interesting words" in a document.
-        /// For an easier method to call see <see cref="RetrieveInterestingTerms"/>.
+        /// For an easier method to call see <see cref="RetrieveInterestingTerms(TextReader, string)"/>.
         /// </summary>
         /// <param name="r"> the reader that has the content of the document </param>
         /// <param name="fieldName"> field passed to the analyzer to use when analyzing the content </param>
         /// <returns> the most interesting words in the document ordered by score, with the highest scoring, or best entry, first </returns>
         /// <exception cref="IOException"/>
-        /// <seealso cref="RetrieveInterestingTerms"/>
+        /// <seealso cref="RetrieveInterestingTerms(TextReader, string)"/>
         public Util.PriorityQueue<object[]> RetrieveTerms(TextReader r, string fieldName)
         {
             IDictionary<string, Int32> words = new Dictionary<string, Int32>();


[40/48] lucenenet git commit: Lucene.Net.Codecs.Lucene40: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsFormat.cs
index 81fea3e..0959ff0 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsFormat.cs
@@ -17,7 +17,6 @@ namespace Lucene.Net.Codecs.Lucene40
      * limitations under the License.
      */
 
-    // javadocs
     using Directory = Lucene.Net.Store.Directory;
     using FieldInfos = Lucene.Net.Index.FieldInfos;
     using IOContext = Lucene.Net.Store.IOContext;
@@ -25,53 +24,54 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Stored Fields Format.
-    /// <p>Stored fields are represented by two files:</p>
-    /// <ol>
-    /// <li><a name="field_index" id="field_index"></a>
-    /// <p>The field index, or <tt>.fdx</tt> file.</p>
-    /// <p>this is used to find the location within the field data file of the fields
-    /// of a particular document. Because it contains fixed-length data, this file may
-    /// be easily randomly accessed. The position of document <i>n</i> 's field data is
-    /// the <seealso cref="DataOutput#writeLong Uint64"/> at <i>n*8</i> in this file.</p>
-    /// <p>this contains, for each document, a pointer to its field data, as
-    /// follows:</p>
-    /// <ul>
-    /// <li>FieldIndex (.fdx) --&gt; &lt;Header&gt;, &lt;FieldValuesPosition&gt; <sup>SegSize</sup></li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>FieldValuesPosition --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    /// </ul>
-    /// </li>
-    /// <li>
-    /// <p><a name="field_data" id="field_data"></a>The field data, or <tt>.fdt</tt> file.</p>
-    /// <p>this contains the stored fields of each document, as follows:</p>
-    /// <ul>
-    /// <li>FieldData (.fdt) --&gt; &lt;Header&gt;, &lt;DocFieldData&gt; <sup>SegSize</sup></li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>DocFieldData --&gt; FieldCount, &lt;FieldNum, Bits, Value&gt;
-    /// <sup>FieldCount</sup></li>
-    /// <li>FieldCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>FieldNum --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>Bits --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    /// <ul>
-    /// <li>low order bit reserved.</li>
-    /// <li>second bit is one for fields containing binary data</li>
-    /// <li>third bit reserved.</li>
-    /// <li>4th to 6th bit (mask: 0x7&lt;&lt;3) define the type of a numeric field:
-    /// <ul>
-    /// <li>all bits in mask are cleared if no numeric field at all</li>
-    /// <li>1&lt;&lt;3: Value is Int</li>
-    /// <li>2&lt;&lt;3: Value is Long</li>
-    /// <li>3&lt;&lt;3: Value is Int as Float (as of <seealso cref="Float#intBitsToFloat(int)"/></li>
-    /// <li>4&lt;&lt;3: Value is Long as Double (as of <seealso cref="Double#longBitsToDouble(long)"/></li>
-    /// </ul>
-    /// </li>
-    /// </ul>
-    /// <li>Value --&gt; String | BinaryValue | Int | Long (depending on Bits)</li>
-    /// <li>BinaryValue --&gt; ValueSize, &lt;<seealso cref="DataOutput#writeByte Byte"/>&gt;^ValueSize</li>
-    /// <li>ValueSize --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// </li>
-    /// </ul>
-    /// </ol>
+    /// <para>Stored fields are represented by two files:</para>
+    /// <list type="number">
+    ///     <item><description><a name="field_index" id="field_index"></a>
+    ///         <para>The field index, or <c>.fdx</c> file.</para>
+    ///         <para>This is used to find the location within the field data file of the fields
+    ///         of a particular document. Because it contains fixed-length data, this file may
+    ///         be easily randomly accessed. The position of document <i>n</i> 's field data is
+    ///         the Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) at <i>n*8</i> in this file.</para>
+    ///         <para>This contains, for each document, a pointer to its field data, as
+    ///         follows:</para>
+    ///         <list type="bullet">
+    ///             <item><description>FieldIndex (.fdx) --&gt; &lt;Header&gt;, &lt;FieldValuesPosition&gt; <sup>SegSize</sup></description></item>
+    ///             <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///             <item><description>FieldValuesPosition --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///         </list>
+    ///     </description></item>
+    ///     <item><description>
+    ///         <para><a name="field_data" id="field_data"></a>The field data, or <c>.fdt</c> file.</para>
+    ///         <para>This contains the stored fields of each document, as follows:</para>
+    ///         <list type="bullet">
+    ///             <item><description>FieldData (.fdt) --&gt; &lt;Header&gt;, &lt;DocFieldData&gt; <sup>SegSize</sup></description></item>
+    ///             <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///             <item><description>DocFieldData --&gt; FieldCount, &lt;FieldNum, Bits, Value&gt;
+    ///                 <sup>FieldCount</sup></description></item>
+    ///             <item><description>FieldCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///             <item><description>FieldNum --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///             <item><description>Bits --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>)
+    ///                 <list type="bullet">
+    ///                     <item><description>low order bit reserved.</description></item>
+    ///                     <item><description>second bit is one for fields containing binary data</description></item>
+    ///                     <item><description>third bit reserved.</description></item>
+    ///                     <item><description>4th to 6th bit (mask: 0x7&lt;&lt;3) define the type of a numeric field:
+    ///                         <list type="bullet">
+    ///                             <item><description>all bits in mask are cleared if no numeric field at all</description></item>
+    ///                             <item><description>1&lt;&lt;3: Value is Int</description></item>
+    ///                             <item><description>2&lt;&lt;3: Value is Long</description></item>
+    ///                             <item><description>3&lt;&lt;3: Value is Int as Float (as of <see cref="Support.Number.Int32BitsToSingle(int)"/></description></item>
+    ///                             <item><description>4&lt;&lt;3: Value is Long as Double (as of <see cref="System.BitConverter.Int64BitsToDouble(long)"/></description></item>
+    ///                         </list>
+    ///                     </description></item>
+    ///                 </list>
+    ///             </description></item>
+    ///             <item><description>Value --&gt; String | BinaryValue | Int | Long (depending on Bits)</description></item>
+    ///             <item><description>BinaryValue --&gt; ValueSize, &lt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) &gt;^ValueSize</description></item>
+    ///             <item><description>ValueSize --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///         </list>
+    ///     </description></item>
+    /// </list>
     /// @lucene.experimental
     /// </summary>
     public class Lucene40StoredFieldsFormat : StoredFieldsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsReader.cs
index f3064fe..2ce0261 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsReader.cs
@@ -35,11 +35,12 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Class responsible for access to stored document fields.
-    /// <p/>
+    /// <para/>
     /// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
-    /// <seealso cref= Lucene40StoredFieldsFormat
-    /// @lucene.internal </seealso>
+    /// <seealso cref="Lucene40StoredFieldsFormat"/>
     public sealed class Lucene40StoredFieldsReader : StoredFieldsReader, IDisposable
     {
         private readonly FieldInfos fieldInfos;
@@ -51,10 +52,10 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns a cloned FieldsReader that shares open
-        ///  IndexInputs with the original one.  It is the caller's
-        ///  job not to close the original FieldsReader until all
-        ///  clones are called (eg, currently SegmentReader manages
-        ///  this logic).
+        /// <see cref="IndexInput"/>s with the original one.  It is the caller's
+        /// job not to dispose the original FieldsReader until all
+        /// clones are called (eg, currently <see cref="Index.SegmentReader"/> manages
+        /// this logic).
         /// </summary>
         public override object Clone()
         {
@@ -120,7 +121,7 @@ namespace Lucene.Net.Codecs.Lucene40
             }
         }
 
-        /// <exception cref="ObjectDisposedException"> if this FieldsReader is closed </exception>
+        /// <exception cref="ObjectDisposedException"> if this FieldsReader is disposed. </exception>
         private void EnsureOpen()
         {
             if (closed)
@@ -130,10 +131,10 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Closes the underlying <seealso cref="Lucene.Net.Store.IndexInput"/> streams.
-        /// this means that the Fields values will not be accessible.
+        /// Closes the underlying <see cref="Lucene.Net.Store.IndexInput"/> streams.
+        /// This means that the <see cref="Index.Fields"/> values will not be accessible.
         /// </summary>
-        /// <exception cref="IOException"> If an I/O error occurs </exception>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurs. </exception>
         protected override void Dispose(bool disposing)
         {
             if (disposing)
@@ -148,6 +149,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns number of documents.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -265,9 +267,9 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the length in bytes of each raw document in a
-        ///  contiguous range of length numDocs starting with
-        ///  startDocID.  Returns the IndexInput (the fieldStream),
-        ///  already seeked to the starting point for startDocID.
+        /// contiguous range of length <paramref name="numDocs"/> starting with
+        /// <paramref name="startDocID"/>.  Returns the <see cref="IndexInput"/> (the fieldStream),
+        /// already seeked to the starting point for <paramref name="startDocID"/>.
         /// </summary>
         public IndexInput RawDocs(int[] lengths, int startDocID, int numDocs)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsWriter.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsWriter.cs
index b33e43f..77eeced 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40StoredFieldsWriter.cs
@@ -39,11 +39,12 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Class responsible for writing stored document fields.
-    /// <p/>
+    /// <para/>
     /// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene40StoredFieldsFormat
-    /// @lucene.experimental  </seealso>
+    /// <seealso cref="Lucene40StoredFieldsFormat"/>
     public sealed class Lucene40StoredFieldsWriter : StoredFieldsWriter
     {
         // NOTE: bit 0 is free here!  You can steal it!
@@ -71,11 +72,11 @@ namespace Lucene.Net.Codecs.Lucene40
         internal static readonly long HEADER_LENGTH_DAT = CodecUtil.HeaderLength(CODEC_NAME_DAT);
 
         /// <summary>
-        /// Extension of stored fields file </summary>
+        /// Extension of stored fields file. </summary>
         public const string FIELDS_EXTENSION = "fdt";
 
         /// <summary>
-        /// Extension of stored fields index file </summary>
+        /// Extension of stored fields index file. </summary>
         public const string FIELDS_INDEX_EXTENSION = "fdx";
 
         private readonly Directory directory;
@@ -242,10 +243,10 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Bulk write a contiguous series of documents.  The
-        ///  lengths array is the length (in bytes) of each raw
-        ///  document.  The stream IndexInput is the
-        ///  fieldsStream from which we should bulk-copy all
-        ///  bytes.
+        /// <paramref name="lengths"/> array is the length (in bytes) of each raw
+        /// document.  The <paramref name="stream"/> <see cref="IndexInput"/> is the
+        /// fieldsStream from which we should bulk-copy all
+        /// bytes.
         /// </summary>
         public void AddRawDocuments(IndexInput stream, int[] lengths, int numDocs)
         {
@@ -309,7 +310,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Maximum number of contiguous documents to bulk-copy
-        ///    when merging stored fields
+        /// when merging stored fields.
         /// </summary>
         private const int MAX_RAW_MERGE_DOCS = 4192;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsFormat.cs
index ce91826..269e0f0 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsFormat.cs
@@ -17,7 +17,6 @@ namespace Lucene.Net.Codecs.Lucene40
      * limitations under the License.
      */
 
-    // javadocs
     using Directory = Lucene.Net.Store.Directory;
     using FieldInfos = Lucene.Net.Index.FieldInfos;
     using IOContext = Lucene.Net.Store.IOContext;
@@ -25,87 +24,87 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Term Vectors format.
-    /// <p>Term Vector support is an optional on a field by field basis. It consists of
-    /// 3 files.</p>
-    /// <ol>
-    /// <li><a name="tvx" id="tvx"></a>
-    /// <p>The Document Index or .tvx file.</p>
-    /// <p>For each document, this stores the offset into the document data (.tvd) and
-    /// field data (.tvf) files.</p>
-    /// <p>DocumentIndex (.tvx) --&gt; Header,&lt;DocumentPosition,FieldPosition&gt;
-    /// <sup>NumDocs</sup></p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>DocumentPosition --&gt; <seealso cref="DataOutput#writeLong UInt64"/> (offset in the .tvd file)</li>
-    ///   <li>FieldPosition --&gt; <seealso cref="DataOutput#writeLong UInt64"/> (offset in the .tvf file)</li>
-    /// </ul>
-    /// </li>
-    /// <li><a name="tvd" id="tvd"></a>
-    /// <p>The Document or .tvd file.</p>
-    /// <p>this contains, for each document, the number of fields, a list of the fields
+    /// <para>Term Vector support is an optional on a field by field basis. It consists of
+    /// 3 files.</para>
+    /// <list type="number">
+    /// <item><description><a name="tvx" id="tvx"></a>
+    /// <para>The Document Index or .tvx file.</para>
+    /// <para>For each document, this stores the offset into the document data (.tvd) and
+    /// field data (.tvf) files.</para>
+    /// <para>DocumentIndex (.tvx) --&gt; Header,&lt;DocumentPosition,FieldPosition&gt;
+    /// <sup>NumDocs</sup></para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>DocumentPosition --&gt; UInt64 (<see cref="Store.DataOutput.WriteInt64(long)"/>)  (offset in the .tvd file)</description></item>
+    ///   <item><description>FieldPosition --&gt; UInt64 (<see cref="Store.DataOutput.WriteInt64(long)"/>)  (offset in the .tvf file)</description></item>
+    /// </list>
+    /// </description></item>
+    /// <item><description><a name="tvd" id="tvd"></a>
+    /// <para>The Document or .tvd file.</para>
+    /// <para>This contains, for each document, the number of fields, a list of the fields
     /// with term vector info and finally a list of pointers to the field information
-    /// in the .tvf (Term Vector Fields) file.</p>
-    /// <p>The .tvd file is used to map out the fields that have term vectors stored
-    /// and where the field information is in the .tvf file.</p>
-    /// <p>Document (.tvd) --&gt; Header,&lt;NumFields, FieldNums,
-    /// FieldPositions&gt; <sup>NumDocs</sup></p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>NumFields --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>FieldNums --&gt; &lt;FieldNumDelta&gt; <sup>NumFields</sup></li>
-    ///   <li>FieldNumDelta --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>FieldPositions --&gt; &lt;FieldPositionDelta&gt; <sup>NumFields-1</sup></li>
-    ///   <li>FieldPositionDelta --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    /// </ul>
-    /// </li>
-    /// <li><a name="tvf" id="tvf"></a>
-    /// <p>The Field or .tvf file.</p>
-    /// <p>this file contains, for each field that has a term vector stored, a list of
+    /// in the .tvf (Term Vector Fields) file.</para>
+    /// <para>The .tvd file is used to map out the fields that have term vectors stored
+    /// and where the field information is in the .tvf file.</para>
+    /// <para>Document (.tvd) --&gt; Header,&lt;NumFields, FieldNums,
+    /// FieldPositions&gt; <sup>NumDocs</sup></para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>NumFields --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>FieldNums --&gt; &lt;FieldNumDelta&gt; <sup>NumFields</sup></description></item>
+    ///   <item><description>FieldNumDelta --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>FieldPositions --&gt; &lt;FieldPositionDelta&gt; <sup>NumFields-1</sup></description></item>
+    ///   <item><description>FieldPositionDelta --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    /// </list>
+    /// </description></item>
+    /// <item><description><a name="tvf" id="tvf"></a>
+    /// <para>The Field or .tvf file.</para>
+    /// <para>This file contains, for each field that has a term vector stored, a list of
     /// the terms, their frequencies and, optionally, position, offset, and payload
-    /// information.</p>
-    /// <p>Field (.tvf) --&gt; Header,&lt;NumTerms, Flags, TermFreqs&gt;
-    /// <sup>NumFields</sup></p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>NumTerms --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>Flags --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///   <li>TermFreqs --&gt; &lt;TermText, TermFreq, Positions?, PayloadData?, Offsets?&gt;
-    ///       <sup>NumTerms</sup></li>
-    ///   <li>TermText --&gt; &lt;PrefixLength, Suffix&gt;</li>
-    ///   <li>PrefixLength --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>Suffix --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>TermFreq --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>Positions --&gt; &lt;PositionDelta PayloadLength?&gt;<sup>TermFreq</sup></li>
-    ///   <li>PositionDelta --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>PayloadLength --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>PayloadData --&gt; <seealso cref="DataOutput#writeByte Byte"/><sup>NumPayloadBytes</sup></li>
-    ///   <li>Offsets --&gt; &lt;<seealso cref="DataOutput#writeVInt VInt"/>, <seealso cref="DataOutput#writeVInt VInt"/>&gt;<sup>TermFreq</sup></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    /// <li>Flags byte stores whether this term vector has position, offset, payload.
-    /// information stored.</li>
-    /// <li>Term byte prefixes are shared. The PrefixLength is the number of initial
+    /// information.</para>
+    /// <para>Field (.tvf) --&gt; Header,&lt;NumTerms, Flags, TermFreqs&gt;
+    /// <sup>NumFields</sup></para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>NumTerms --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>Flags --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    ///   <item><description>TermFreqs --&gt; &lt;TermText, TermFreq, Positions?, PayloadData?, Offsets?&gt;
+    ///       <sup>NumTerms</sup></description></item>
+    ///   <item><description>TermText --&gt; &lt;PrefixLength, Suffix&gt;</description></item>
+    ///   <item><description>PrefixLength --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>Suffix --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>) </description></item>
+    ///   <item><description>TermFreq --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>Positions --&gt; &lt;PositionDelta PayloadLength?&gt;<sup>TermFreq</sup></description></item>
+    ///   <item><description>PositionDelta --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>PayloadLength --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>PayloadData --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>NumPayloadBytes</sup></description></item>
+    ///   <item><description>Offsets --&gt; &lt;VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>), VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) &gt;<sup>TermFreq</sup></description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    /// <item><description>Flags byte stores whether this term vector has position, offset, payload.
+    /// information stored.</description></item>
+    /// <item><description>Term byte prefixes are shared. The PrefixLength is the number of initial
     /// bytes from the previous term which must be pre-pended to a term's suffix
     /// in order to form the term's bytes. Thus, if the previous term's text was "bone"
-    /// and the term is "boy", the PrefixLength is two and the suffix is "y".</li>
-    /// <li>PositionDelta is, if payloads are disabled for the term's field, the
+    /// and the term is "boy", the PrefixLength is two and the suffix is "y".</description></item>
+    /// <item><description>PositionDelta is, if payloads are disabled for the term's field, the
     /// difference between the position of the current occurrence in the document and
     /// the previous occurrence (or zero, if this is the first occurrence in this
     /// document). If payloads are enabled for the term's field, then PositionDelta/2
     /// is the difference between the current and the previous position. If payloads
     /// are enabled and PositionDelta is odd, then PayloadLength is stored, indicating
-    /// the length of the payload at the current term position.</li>
-    /// <li>PayloadData is metadata associated with a term position. If
+    /// the length of the payload at the current term position.</description></item>
+    /// <item><description>PayloadData is metadata associated with a term position. If
     /// PayloadLength is stored at the current position, then it indicates the length
     /// of this payload. If PayloadLength is not stored, then this payload has the same
     /// length as the payload at the previous position. PayloadData encodes the
-    /// concatenated bytes for all of a terms occurrences.</li>
-    /// <li>Offsets are stored as delta encoded VInts. The first VInt is the
-    /// startOffset, the second is the endOffset.</li>
-    /// </ul>
-    /// </li>
-    /// </ol>
+    /// concatenated bytes for all of a terms occurrences.</description></item>
+    /// <item><description>Offsets are stored as delta encoded VInts. The first VInt is the
+    /// startOffset, the second is the endOffset.</description></item>
+    /// </list>
+    /// </description></item>
+    /// </list>
     /// </summary>
     public class Lucene40TermVectorsFormat : TermVectorsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsReader.cs
index a0de655..0d44dec 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsReader.cs
@@ -41,10 +41,10 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Term Vectors reader.
-    /// <p>
+    /// <para/>
     /// It reads .tvd, .tvf, and .tvx files.
     /// </summary>
-    /// <seealso cref= Lucene40TermVectorsFormat </seealso>
+    /// <seealso cref="Lucene40TermVectorsFormat"/>
     public class Lucene40TermVectorsReader : TermVectorsReader, IDisposable
     {
         internal const sbyte STORE_POSITIONS_WITH_TERMVECTOR = 0x1;
@@ -54,15 +54,15 @@ namespace Lucene.Net.Codecs.Lucene40
         internal const sbyte STORE_PAYLOAD_WITH_TERMVECTOR = 0x4;
 
         /// <summary>
-        /// Extension of vectors fields file </summary>
+        /// Extension of vectors fields file. </summary>
         internal const string VECTORS_FIELDS_EXTENSION = "tvf";
 
         /// <summary>
-        /// Extension of vectors documents file </summary>
+        /// Extension of vectors documents file. </summary>
         internal const string VECTORS_DOCUMENTS_EXTENSION = "tvd";
 
         /// <summary>
-        /// Extension of vectors index file </summary>
+        /// Extension of vectors index file. </summary>
         internal const string VECTORS_INDEX_EXTENSION = "tvx";
 
         internal const string CODEC_NAME_FIELDS = "Lucene40TermVectorsFields";
@@ -181,11 +181,11 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Retrieve the length (in bytes) of the tvd and tvf
-        ///  entries for the next numDocs starting with
-        ///  startDocID.  this is used for bulk copying when
-        ///  merging segments, if the field numbers are
-        ///  congruent.  Once this returns, the tvf & tvd streams
-        ///  are seeked to the startDocID.
+        /// entries for the next <paramref name="numDocs"/> starting with
+        /// <paramref name="startDocID"/>.  This is used for bulk copying when
+        /// merging segments, if the field numbers are
+        /// congruent.  Once this returns, the tvf &amp; tvd streams
+        /// are seeked to the <paramref name="startDocID"/>.
         /// </summary>
         internal void RawDocs(int[] tvdLengths, int[] tvfLengths, int startDocID, int numDocs)
         {
@@ -238,7 +238,8 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// The number of documents in the reader 
+        /// The number of documents in the reader.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         internal virtual int Count

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
index 41bc759..5a8063e 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40TermVectorsWriter.cs
@@ -50,10 +50,10 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Term Vectors writer.
-    /// <p>
+    /// <para/>
     /// It writes .tvd, .tvf, and .tvx files.
     /// </summary>
-    /// <seealso cref= Lucene40TermVectorsFormat </seealso>
+    /// <seealso cref="Lucene40TermVectorsFormat"/>
     public sealed class Lucene40TermVectorsWriter : TermVectorsWriter
     {
         private readonly Directory directory;
@@ -335,7 +335,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Do a bulk copy of numDocs documents from reader to our
-        /// streams.  this is used to expedite merging, if the
+        /// streams.  This is used to expedite merging, if the
         /// field numbers are congruent.
         /// </summary>
         private void AddRawDocuments(Lucene40TermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs)
@@ -395,7 +395,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Maximum number of contiguous documents to bulk-copy
-        ///    when merging term vectors
+        /// when merging term vectors.
         /// </summary>
         private const int MAX_RAW_MERGE_DOCS = 4192;
 


[16/48] lucenenet git commit: Lucene.Net.Util: Fixed up documentation comments, types beginning with A-G

Posted by ni...@apache.org.
Lucene.Net.Util: Fixed up documentation comments, types beginning with A-G


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d7cb70c4
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d7cb70c4
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d7cb70c4

Branch: refs/heads/master
Commit: d7cb70c465b8320816f068b7f9604de21fffbac8
Parents: ef2d090
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:08:38 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:22:58 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   2 +-
 src/Lucene.Net/Lucene.Net.csproj                |   2 +-
 src/Lucene.Net/Util/Accountable.cs              |   2 +-
 src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs  |   5 +-
 src/Lucene.Net/Util/ArrayIntroSorter.cs         |   5 +-
 src/Lucene.Net/Util/ArrayTimSorter.cs           |   5 +-
 src/Lucene.Net/Util/ArrayUtil.cs                | 115 ++++++++-------
 src/Lucene.Net/Util/AttributeImpl.cs            |  52 +++++--
 src/Lucene.Net/Util/AttributeReflector.cs       |  42 ++++++
 src/Lucene.Net/Util/AttributeSource.cs          | 144 ++++++++++---------
 src/Lucene.Net/Util/BitUtil.cs                  |  45 +++---
 src/Lucene.Net/Util/Bits.cs                     |   7 +-
 src/Lucene.Net/Util/BroadWord.cs                |  54 +++----
 src/Lucene.Net/Util/ByteBlockPool.cs            |  58 ++++----
 src/Lucene.Net/Util/BytesRef.cs                 |  88 ++++++------
 src/Lucene.Net/Util/BytesRefArray.cs            |  52 +++----
 src/Lucene.Net/Util/BytesRefHash.cs             | 139 +++++++++---------
 src/Lucene.Net/Util/BytesRefIterator.cs         |  23 +--
 src/Lucene.Net/Util/CharsRef.cs                 |  48 ++++---
 src/Lucene.Net/Util/CloseableThreadLocal.cs     |  47 +++---
 src/Lucene.Net/Util/CollectionUtil.cs           |   8 +-
 src/Lucene.Net/Util/CommandLineUtil.cs          |  39 +++--
 src/Lucene.Net/Util/Constants.cs                |  18 ++-
 src/Lucene.Net/Util/Counter.cs                  |  16 +--
 src/Lucene.Net/Util/DocIdBitSet.cs              |   6 +-
 src/Lucene.Net/Util/DoubleBarrelLRUCache.cs     |  10 +-
 src/Lucene.Net/Util/FieldCacheSanityChecker.cs  |  86 +++++------
 src/Lucene.Net/Util/FilterIterator.cs           |   6 +-
 src/Lucene.Net/Util/FixedBitSet.cs              |  62 ++++----
 .../Util/GrowableByteArrayDataOutput.cs         |   5 +-
 src/Lucene.Net/Util/IAttributeReflector.cs      |  29 ----
 31 files changed, 643 insertions(+), 577 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ca36869..cec7578 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -53,7 +53,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
    2. Support (namespace)
-   3. Util (namespace) (Except for Util.Fst)
+   3. Util (namespace) Types starting with I-Z, Util.Automaton, 			Util.Mutable, and Util.Packed.
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Lucene.Net.csproj b/src/Lucene.Net/Lucene.Net.csproj
index 3352bf3..2a6b11d 100644
--- a/src/Lucene.Net/Lucene.Net.csproj
+++ b/src/Lucene.Net/Lucene.Net.csproj
@@ -733,6 +733,7 @@
     <Compile Include="Util\ArrayUtil.cs" />
     <Compile Include="Util\Attribute.cs" />
     <Compile Include="Util\AttributeImpl.cs" />
+    <Compile Include="Util\AttributeReflector.cs" />
     <Compile Include="Util\AttributeSource.cs" />
     <Compile Include="Util\Automaton\Automaton.cs" />
     <Compile Include="Util\Automaton\AutomatonProvider.cs" />
@@ -793,7 +794,6 @@
     <Compile Include="Util\Fst\ReverseBytesReader.cs" />
     <Compile Include="Util\Fst\Util.cs" />
     <Compile Include="Util\GrowableByteArrayDataOutput.cs" />
-    <Compile Include="Util\IAttributeReflector.cs" />
     <Compile Include="Util\IndexableBinaryStringTools.cs" />
     <Compile Include="Util\InfoStream.cs" />
     <Compile Include="Util\InPlaceMergeSorter.cs" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/Accountable.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Accountable.cs b/src/Lucene.Net/Util/Accountable.cs
index 9dda797..561669b 100644
--- a/src/Lucene.Net/Util/Accountable.cs
+++ b/src/Lucene.Net/Util/Accountable.cs
@@ -19,7 +19,7 @@
 
 	/// <summary>
 	/// An object whose RAM usage can be computed.
-	/// 
+	/// <para/>
 	/// @lucene.internal
 	/// </summary>
 	public interface IAccountable // LUCENENET NOTE: This interface wasn't added until Lucene 4.9

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs b/src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs
index 3ce6826..174099a 100644
--- a/src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs
+++ b/src/Lucene.Net/Util/ArrayInPlaceMergeSorter.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// An <seealso cref="InPlaceMergeSorter"/> for object arrays.
+    /// An <see cref="InPlaceMergeSorter"/> for object arrays.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class ArrayInPlaceMergeSorter<T> : InPlaceMergeSorter
@@ -29,7 +30,7 @@ namespace Lucene.Net.Util
         private readonly IComparer<T> comparer;
 
         /// <summary>
-        /// Create a new <seealso cref="ArrayInPlaceMergeSorter"/>. </summary>
+        /// Create a new <see cref="ArrayInPlaceMergeSorter{T}"/>. </summary>
         public ArrayInPlaceMergeSorter(T[] arr, IComparer<T> comparer)
         {
             this.arr = arr;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/ArrayIntroSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ArrayIntroSorter.cs b/src/Lucene.Net/Util/ArrayIntroSorter.cs
index 7c7c1da..5f71168 100644
--- a/src/Lucene.Net/Util/ArrayIntroSorter.cs
+++ b/src/Lucene.Net/Util/ArrayIntroSorter.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// An <seealso cref="IntroSorter"/> for object arrays.
+    /// An <see cref="IntroSorter"/> for object arrays.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class ArrayIntroSorter<T> : IntroSorter
@@ -30,7 +31,7 @@ namespace Lucene.Net.Util
         private T pivot;
 
         /// <summary>
-        /// Create a new <seealso cref="ArrayInPlaceMergeSorter"/>. </summary>
+        /// Create a new <see cref="ArrayIntroSorter{T}"/>. </summary>
         public ArrayIntroSorter(T[] arr, IComparer<T> comparer)
         {
             this.arr = arr;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/ArrayTimSorter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ArrayTimSorter.cs b/src/Lucene.Net/Util/ArrayTimSorter.cs
index 79b5cb2..79e1cd8 100644
--- a/src/Lucene.Net/Util/ArrayTimSorter.cs
+++ b/src/Lucene.Net/Util/ArrayTimSorter.cs
@@ -21,7 +21,8 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A <seealso cref="TimSorter"/> for object arrays.
+    /// A <see cref="TimSorter"/> for object arrays.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class ArrayTimSorter<T> : TimSorter
@@ -31,7 +32,7 @@ namespace Lucene.Net.Util
         private readonly T[] tmp;
 
         /// <summary>
-        /// Create a new <seealso cref="ArrayTimSorter"/>. </summary>
+        /// Create a new <see cref="ArrayTimSorter{T}"/>. </summary>
         public ArrayTimSorter(T[] arr, IComparer<T> comparer, int maxTempSlots)
             : base(maxTempSlots)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/ArrayUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ArrayUtil.cs b/src/Lucene.Net/Util/ArrayUtil.cs
index 2e45dfc..de8b2d4 100644
--- a/src/Lucene.Net/Util/ArrayUtil.cs
+++ b/src/Lucene.Net/Util/ArrayUtil.cs
@@ -24,18 +24,18 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Methods for manipulating arrays.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class ArrayUtil
     {
         /// <summary>
         /// Maximum length for an array; we set this to "a
-        ///  bit" below <see cref="int.MaxValue"/> because the exact max
-        ///  allowed byte[] is JVM dependent, so we want to avoid
-        ///  a case where a large value worked during indexing on
-        ///  one JVM but failed later at search time with a
-        ///  different JVM.
+        /// bit" below <see cref="int.MaxValue"/> because the exact max
+        /// allowed byte[] is JVM dependent, so we want to avoid
+        /// a case where a large value worked during indexing on
+        /// one JVM but failed later at search time with a
+        /// different JVM.
         /// </summary>
         public static readonly int MAX_ARRAY_LENGTH = int.MaxValue - 256;
 
@@ -51,47 +51,47 @@ namespace Lucene.Net.Util
          */
 
         /// <summary>
-        /// Parses the string argument as if it was an int value and returns the
-        /// result. Throws NumberFormatException if the string does not represent an
+        /// Parses the string argument as if it was an <see cref="int"/> value and returns the
+        /// result. Throws <see cref="FormatException"/> if the string does not represent an
         /// int quantity.
         /// <para/>
         /// NOTE: This was parseInt() in Lucene
         /// </summary>
-        /// <param name="chars"> a string representation of an int quantity. </param>
-        /// <returns> int the value represented by the argument </returns>
-        /// <exception cref="NumberFormatException"> if the argument could not be parsed as an int quantity. </exception>
+        /// <param name="chars"> A string representation of an int quantity. </param>
+        /// <returns> The value represented by the argument </returns>
+        /// <exception cref="FormatException"> If the argument could not be parsed as an int quantity. </exception>
         public static int ParseInt32(char[] chars)
         {
             return ParseInt32(chars, 0, chars.Length, 10);
         }
 
         /// <summary>
-        /// Parses a char array into an int. 
+        /// Parses a char array into an <see cref="int"/>. 
         /// <para/>
         /// NOTE: This was parseInt() in Lucene
         /// </summary>
-        /// <param name="chars"> the character array </param>
+        /// <param name="chars"> The character array </param>
         /// <param name="offset"> The offset into the array </param>
         /// <param name="len"> The length </param>
-        /// <returns> the int </returns>
-        /// <exception cref="NumberFormatException"> if it can't parse </exception>
+        /// <returns> the <see cref="int"/> </returns>
+        /// <exception cref="FormatException"> If it can't parse </exception>
         public static int ParseInt32(char[] chars, int offset, int len)
         {
             return ParseInt32(chars, offset, len, 10);
         }
 
         /// <summary>
-        /// Parses the string argument as if it was an int value and returns the
-        /// result. Throws NumberFormatException if the string does not represent an
-        /// int quantity. The second argument specifies the radix to use when parsing
+        /// Parses the string argument as if it was an <see cref="int"/> value and returns the
+        /// result. Throws <see cref="FormatException"/> if the string does not represent an
+        /// <see cref="int"/> quantity. The second argument specifies the radix to use when parsing
         /// the value.
         /// <para/>
         /// NOTE: This was parseInt() in Lucene
         /// </summary>
-        /// <param name="chars"> a string representation of an int quantity. </param>
-        /// <param name="radix"> the base to use for conversion. </param>
-        /// <returns> int the value represented by the argument </returns>
-        /// <exception cref="NumberFormatException"> if the argument could not be parsed as an int quantity. </exception>
+        /// <param name="chars"> A string representation of an int quantity. </param>
+        /// <param name="radix"> The base to use for conversion. </param>
+        /// <returns> The value represented by the argument </returns>
+        /// <exception cref="FormatException"> If the argument could not be parsed as an int quantity. </exception>
         public static int ParseInt32(char[] chars, int offset, int len, int radix)
         {
             int minRadix = 2, maxRadix = 36;
@@ -158,23 +158,22 @@ namespace Lucene.Net.Util
         */
 
         /// <summary>
-        /// Returns an array size >= minTargetSize, generally
-        ///  over-allocating exponentially to achieve amortized
-        ///  linear-time cost as the array grows.
-        ///
-        ///  NOTE: this was originally borrowed from Python 2.4.2
-        ///  listobject.c sources (attribution in LICENSE.txt), but
-        ///  has now been substantially changed based on
-        ///  discussions from java-dev thread with subject "Dynamic
-        ///  array reallocation algorithms", started on Jan 12
-        ///  2010.
+        /// Returns an array size &gt;= <paramref name="minTargetSize"/>, generally
+        /// over-allocating exponentially to achieve amortized
+        /// linear-time cost as the array grows.
+        /// <para/>
+        /// NOTE: this was originally borrowed from Python 2.4.2
+        /// listobject.c sources (attribution in LICENSE.txt), but
+        /// has now been substantially changed based on
+        /// discussions from java-dev thread with subject "Dynamic
+        /// array reallocation algorithms", started on Jan 12
+        /// 2010.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         /// <param name="minTargetSize"> Minimum required value to be returned. </param>
         /// <param name="bytesPerElement"> Bytes used by each element of
-        /// the array.  See constants in <seealso cref="RamUsageEstimator"/>.
-        ///
-        /// @lucene.internal </param>
-
+        /// the array.  See constants in <see cref="RamUsageEstimator"/>. </param>
         public static int Oversize(int minTargetSize, int bytesPerElement)
         {
             if (minTargetSize < 0)
@@ -661,9 +660,9 @@ namespace Lucene.Net.Util
         /// <param name="right">       The right array to compare </param>
         /// <param name="offsetRight"> the offset into the right array.  Must be positive </param>
         /// <param name="length">      The length of the section of the array to compare </param>
-        /// <returns> true if the two arrays, starting at their respective offsets, are equal
+        /// <returns> <c>true</c> if the two arrays, starting at their respective offsets, are equal
         /// </returns>
-        /// <seealso cref= java.util.Arrays#equals(char[], char[]) </seealso>
+        /// <seealso cref="Support.Arrays.Equals{T}(T[], T[])"/>
         public static bool Equals(char[] left, int offsetLeft, char[] right, int offsetRight, int length)
         {
             if ((offsetLeft + length <= left.Length) && (offsetRight + length <= right.Length))
@@ -689,9 +688,9 @@ namespace Lucene.Net.Util
         /// <param name="right">       The right array to compare </param>
         /// <param name="offsetRight"> the offset into the right array.  Must be positive </param>
         /// <param name="length">      The length of the section of the array to compare </param>
-        /// <returns> true if the two arrays, starting at their respective offsets, are equal
+        /// <returns> <c>true</c> if the two arrays, starting at their respective offsets, are equal
         /// </returns>
-        /// <seealso cref= java.util.Arrays#equals(byte[], byte[]) </seealso>
+        /// <seealso cref="Support.Arrays.Equals{T}(T[], T[])"/>
         public static bool Equals(byte[] left, int offsetLeft, byte[] right, int offsetRight, int length)
         {
             if ((offsetLeft + length <= left.Length) && (offsetRight + length <= right.Length))
@@ -746,9 +745,9 @@ namespace Lucene.Net.Util
         /// <param name="right">       The right array to compare </param>
         /// <param name="offsetRight"> the offset into the right array.  Must be positive </param>
         /// <param name="length">      The length of the section of the array to compare </param>
-        /// <returns> true if the two arrays, starting at their respective offsets, are equal
+        /// <returns> <c>true</c> if the two arrays, starting at their respective offsets, are equal
         /// </returns>
-        /// <seealso cref= java.util.Arrays#equals(char[], char[]) </seealso>
+        /// <seealso cref="Support.Arrays.Equals{T}(T[], T[])"/>
         public static bool Equals(int[] left, int offsetLeft, int[] right, int offsetRight, int length)
         {
             if ((offsetLeft + length <= left.Length) && (offsetRight + length <= right.Length))
@@ -809,7 +808,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Get the natural <seealso cref="Comparer"/> for the provided object class.
+        /// Get the natural <see cref="IComparer{T}"/> for the provided object class.
         /// <para/>
         /// The comparer returned depends on the <typeparam name="T"/> argument:
         /// <list type="number">
@@ -849,7 +848,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Swap values stored in slots <code>i</code> and <code>j</code> </summary>
+        /// Swap values stored in slots <paramref name="i"/> and <paramref name="j"/> </summary>
         public static void Swap<T>(T[] arr, int i, int j)
         {
             T tmp = arr[i];
@@ -860,10 +859,10 @@ namespace Lucene.Net.Util
         // intro-sorts
 
         /// <summary>
-        /// Sorts the given array slice using the <seealso cref="Comparer"/>. this method uses the intro sort
+        /// Sorts the given array slice using the <see cref="IComparer{T}"/>. This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small arrays. </summary>
-        /// <param name="fromIndex"> start index (inclusive) </param>
-        /// <param name="toIndex"> end index (exclusive) </param>
+        /// <param name="fromIndex"> Start index (inclusive) </param>
+        /// <param name="toIndex"> End index (exclusive) </param>
         public static void IntroSort<T>(T[] a, int fromIndex, int toIndex, IComparer<T> comp)
         {
             if (toIndex - fromIndex <= 1)
@@ -874,7 +873,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sorts the given array using the <seealso cref="Comparer"/>. this method uses the intro sort
+        /// Sorts the given array using the <see cref="IComparer{T}"/>. This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small arrays.
         /// </summary>
         public static void IntroSort<T>(T[] a, IComparer<T> comp)
@@ -883,10 +882,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sorts the given array slice in natural order. this method uses the intro sort
+        /// Sorts the given array slice in natural order. This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small arrays. </summary>
-        /// <param name="fromIndex"> start index (inclusive) </param>
-        /// <param name="toIndex"> end index (exclusive) </param>
+        /// <param name="fromIndex"> Start index (inclusive) </param>
+        /// <param name="toIndex"> End index (exclusive) </param>
         public static void IntroSort<T>(T[] a, int fromIndex, int toIndex) //where T : IComparable<T> // LUCENENET specific: removing constraint because in .NET, it is not needed
         {
             if (toIndex - fromIndex <= 1)
@@ -897,7 +896,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sorts the given array in natural order. this method uses the intro sort
+        /// Sorts the given array in natural order. This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small arrays.
         /// </summary>
         public static void IntroSort<T>(T[] a) //where T : IComparable<T> // LUCENENET specific: removing constraint because in .NET, it is not needed
@@ -908,10 +907,10 @@ namespace Lucene.Net.Util
         // tim sorts:
 
         /// <summary>
-        /// Sorts the given array slice using the <seealso cref="Comparer"/>. this method uses the Tim sort
+        /// Sorts the given array slice using the <see cref="IComparer{T}"/>. This method uses the Tim sort
         /// algorithm, but falls back to binary sort for small arrays. </summary>
-        /// <param name="fromIndex"> start index (inclusive) </param>
-        /// <param name="toIndex"> end index (exclusive) </param>
+        /// <param name="fromIndex"> Start index (inclusive) </param>
+        /// <param name="toIndex"> End index (exclusive) </param>
         public static void TimSort<T>(T[] a, int fromIndex, int toIndex, IComparer<T> comp)
         {
             if (toIndex - fromIndex <= 1)
@@ -922,7 +921,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Sorts the given array using the <seealso cref="Comparer"/>. this method uses the Tim sort
+        /// Sorts the given array using the <see cref="IComparer{T}"/>. this method uses the Tim sort
         /// algorithm, but falls back to binary sort for small arrays.
         /// </summary>
         public static void TimSort<T>(T[] a, IComparer<T> comp)
@@ -933,8 +932,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Sorts the given array slice in natural order. this method uses the Tim sort
         /// algorithm, but falls back to binary sort for small arrays. </summary>
-        /// <param name="fromIndex"> start index (inclusive) </param>
-        /// <param name="toIndex"> end index (exclusive) </param>
+        /// <param name="fromIndex"> Start index (inclusive) </param>
+        /// <param name="toIndex"> End index (exclusive) </param>
         public static void TimSort<T>(T[] a, int fromIndex, int toIndex) //where T : IComparable<T> // LUCENENET specific: removing constraint because in .NET, it is not needed
         {
             if (toIndex - fromIndex <= 1)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/AttributeImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/AttributeImpl.cs b/src/Lucene.Net/Util/AttributeImpl.cs
index 3afa263..4259bfa 100644
--- a/src/Lucene.Net/Util/AttributeImpl.cs
+++ b/src/Lucene.Net/Util/AttributeImpl.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Util
 
     /// <summary> Base class for Attributes that can be added to a
     /// <see cref="Lucene.Net.Util.AttributeSource" />.
-    /// <p/>
+    /// <para/>
     /// Attributes are used to add data in a dynamic, yet type-safe way to a source
     /// of usually streamed objects, e. g. a <see cref="Lucene.Net.Analysis.TokenStream" />.
     /// </summary>
@@ -37,14 +37,14 @@ namespace Lucene.Net.Util
         , ICloneable
 #endif
     {
-        /// <summary> Clears the values in this Attribute and resets it to its
-        /// default value. If this implementation implements more than one Attribute interface
+        /// <summary> Clears the values in this <see cref="Attribute"/> and resets it to its
+        /// default value. If this implementation implements more than one <see cref="Attribute"/> interface
         /// it clears all.
         /// </summary>
         public abstract void Clear();
 
         /// <summary>
-        /// This is equivalent to the anonymous class in the java version of ReflectAsString
+        /// This is equivalent to the anonymous class in the Java version of ReflectAsString
         /// </summary>
         private class StringBuilderAttributeReflector : IAttributeReflector
         {
@@ -77,6 +77,15 @@ namespace Lucene.Net.Util
             }
         }
 
+        /// <summary>
+        /// This method returns the current attribute values as a string in the following format
+        /// by calling the <see cref="ReflectWith(IAttributeReflector)"/> method:
+        /// <list type="bullet">
+        ///     <item><term>if <paramref name="prependAttClass"/>=true:</term> <description> <c>"AttributeClass.Key=value,AttributeClass.Key=value"</c> </description></item>
+        ///     <item><term>if <paramref name="prependAttClass"/>=false:</term> <description> <c>"key=value,key=value"</c> </description></item>
+        /// </list>
+        /// </summary>
+        /// <seealso cref="ReflectWith(IAttributeReflector)"/>
         public string ReflectAsString(bool prependAttClass)
         {
             StringBuilder buffer = new StringBuilder();
@@ -86,6 +95,30 @@ namespace Lucene.Net.Util
             return buffer.ToString();
         }
 
+        /// <summary>
+        /// This method is for introspection of attributes, it should simply
+        /// add the key/values this attribute holds to the given <see cref="IAttributeReflector"/>.
+        /// 
+        /// <para/>The default implementation calls <see cref="IAttributeReflector.Reflect(Type, string, object)"/> for all
+        /// non-static fields from the implementing class, using the field name as key
+        /// and the field value as value. The <see cref="IAttribute"/> class is also determined by Reflection.
+        /// Please note that the default implementation can only handle single-Attribute
+        /// implementations.
+        /// 
+        /// <para/>Custom implementations look like this (e.g. for a combined attribute implementation):
+        /// <code>
+        ///     public void ReflectWith(IAttributeReflector reflector) 
+        ///     {
+        ///         reflector.Reflect(typeof(ICharTermAttribute), "term", GetTerm());
+        ///         reflector.Reflect(typeof(IPositionIncrementAttribute), "positionIncrement", GetPositionIncrement());
+        ///     }
+        /// </code>
+        /// 
+        /// <para/>If you implement this method, make sure that for each invocation, the same set of <see cref="IAttribute"/>
+        /// interfaces and keys are passed to <see cref="IAttributeReflector.Reflect(Type, string, object)"/> in the same order, but possibly
+        /// different values. So don't automatically exclude e.g. <c>null</c> properties!
+        /// </summary>
+        /// <seealso cref="ReflectAsString(bool)"/>
         public virtual void ReflectWith(IAttributeReflector reflector) // LUCENENET NOTE: This method was abstract in Lucene
         {
             Type clazz = this.GetType();
@@ -127,8 +160,9 @@ namespace Lucene.Net.Util
         /// fields of this object and prints the values in the following syntax:
         ///
         /// <code>
-        /// public String toString() {
-        /// return "start=" + startOffset + ",end=" + endOffset;
+        /// public String ToString() 
+        /// {
+        ///     return "start=" + startOffset + ",end=" + endOffset;
         /// }
         /// </code>
         ///
@@ -163,9 +197,9 @@ namespace Lucene.Net.Util
             return buffer.ToString();
         }
 
-        /// <summary> Copies the values from this Attribute into the passed-in
-        /// target attribute. The target implementation must support all the
-        /// Attributes this implementation supports.
+        /// <summary> Copies the values from this <see cref="Attribute"/> into the passed-in
+        /// <paramref name="target"/> attribute. The <paramref name="target"/> implementation must support all the
+        /// <see cref="IAttribute"/>s this implementation supports.
         /// </summary>
         public abstract void CopyTo(IAttribute target);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/AttributeReflector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/AttributeReflector.cs b/src/Lucene.Net/Util/AttributeReflector.cs
new file mode 100644
index 0000000..9061886
--- /dev/null
+++ b/src/Lucene.Net/Util/AttributeReflector.cs
@@ -0,0 +1,42 @@
+using System;
+
+namespace Lucene.Net.Util
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    /// <summary>
+    /// This interface is used to reflect contents of <see cref="AttributeSource"/> or <see cref="Attribute"/>.
+    /// </summary>
+    public interface IAttributeReflector
+    {
+        /// <summary>
+        /// LUCENENET specific overload to support generics.
+        /// </summary>
+        void Reflect<T>(string key, object value)
+            where T : IAttribute;
+
+        /// <summary>
+        /// This method gets called for every property in an <see cref="Attribute"/>/<see cref="AttributeSource"/>
+        /// passing the <see cref="Type"/> of the <see cref="IAttribute"/>, a <paramref name="key"/> and the actual <paramref name="value"/>.
+        /// E.g., an invocation of <see cref="Analysis.TokenAttributes.CharTermAttribute.ReflectWith(IAttributeReflector)"/>
+        /// would call this method once using <see cref="T:typeof(Analysis.TokenAttributes.ICharTermAttribute)"/>
+        /// as attribute type, <c>"term"</c> as <paramref name="key"/> and the actual <paramref name="value"/> as a <see cref="string"/>.
+        /// </summary>
+        void Reflect(Type type, string key, object value);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/AttributeSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/AttributeSource.cs b/src/Lucene.Net/Util/AttributeSource.cs
index 8c52d2e..ec80c40 100644
--- a/src/Lucene.Net/Util/AttributeSource.cs
+++ b/src/Lucene.Net/Util/AttributeSource.cs
@@ -27,11 +27,11 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// An AttributeSource contains a list of different <seealso cref="Attribute"/>s,
+    /// An <see cref="AttributeSource"/> contains a list of different <see cref="Attribute"/>s,
     /// and methods to add and get them. There can only be a single instance
-    /// of an attribute in the same AttributeSource instance. this is ensured
-    /// by passing in the actual type of the Attribute (Class&lt;Attribute&gt;) to
-    /// the <seealso cref="#addAttribute(Class)"/>, which then checks if an instance of
+    /// of an attribute in the same <see cref="AttributeSource"/> instance. This is ensured
+    /// by passing in the actual type of the <see cref="IAttribute"/> to
+    /// the <see cref="AddAttribute{T}"/>, which then checks if an instance of
     /// that type is already present. If yes, it returns the instance, otherwise
     /// it creates a new instance and returns it.
     /// </summary>
@@ -41,18 +41,18 @@ namespace Lucene.Net.Util
     public class AttributeSource
     {
         /// <summary>
-        /// An AttributeFactory creates instances of <seealso cref="Attribute"/>s.
+        /// An <see cref="AttributeFactory"/> creates instances of <see cref="Attribute"/>s.
         /// </summary>
         public abstract class AttributeFactory
         {
             /// <summary>
-            /// returns an <seealso cref="Attribute"/> for the supplied <seealso cref="Attribute"/> interface class.
+            /// returns an <see cref="Attribute"/> for the supplied <see cref="IAttribute"/> interface.
             /// </summary>
             public abstract Attribute CreateAttributeInstance<T>() where T : IAttribute;
 
             /// <summary>
-            /// this is the default factory that creates <seealso cref="Attribute"/>s using the
-            /// class name of the supplied <seealso cref="Attribute"/> interface class by appending <code>Impl</code> to it.
+            /// This is the default factory that creates <see cref="Attribute"/>s using the
+            /// <see cref="Type"/> of the supplied <see cref="IAttribute"/> interface by removing the <code>I</code> from the prefix.
             /// </summary>
             public static readonly AttributeFactory DEFAULT_ATTRIBUTE_FACTORY = new DefaultAttributeFactory();
 
@@ -101,9 +101,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this class holds the state of an AttributeSource. </summary>
-        /// <seealso cref= #captureState </seealso>
-        /// <seealso cref= #restoreState </seealso>
+        /// This class holds the state of an <see cref="AttributeSource"/>. </summary>
+        /// <seealso cref="CaptureState()"/>
+        /// <seealso cref="RestoreState(State)"/>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -136,7 +136,7 @@ namespace Lucene.Net.Util
         private readonly AttributeFactory factory;
 
         /// <summary>
-        /// An AttributeSource using the default attribute factory <seealso cref="AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY"/>.
+        /// An <see cref="AttributeSource"/> using the default attribute factory <see cref="AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY"/>.
         /// </summary>
         public AttributeSource()
             : this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY)
@@ -144,7 +144,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// An AttributeSource that uses the same attributes as the supplied one.
+        /// An <see cref="AttributeSource"/> that uses the same attributes as the supplied one.
         /// </summary>
         public AttributeSource(AttributeSource input)
         {
@@ -159,7 +159,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// An AttributeSource using the supplied <seealso cref="AttributeFactory"/> for creating new <seealso cref="Attribute"/> instances.
+        /// An <see cref="AttributeSource"/> using the supplied <see cref="AttributeFactory"/> for creating new <see cref="IAttribute"/> instances.
         /// </summary>
         public AttributeSource(AttributeFactory factory)
         {
@@ -170,7 +170,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns the used AttributeFactory.
+        /// Returns the used <see cref="AttributeFactory"/>.
         /// </summary>
         public AttributeFactory GetAttributeFactory()
         {
@@ -187,9 +187,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns a new iterator that iterates all unique Attribute implementations.
-        /// this iterator may contain less entries that <seealso cref="#getAttributeClassesIterator"/>,
-        /// if one instance implements more than one Attribute interface.
+        /// Returns a new iterator that iterates all unique <see cref="IAttribute"/> implementations.
+        /// This iterator may contain less entries than <see cref="GetAttributeClassesEnumerator()"/>,
+        /// if one instance implements more than one <see cref="IAttribute"/> interface.
         /// </summary>
         public IEnumerator<Attribute> GetAttributeImplsEnumerator()
         {
@@ -260,7 +260,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// a cache that stores all interfaces for known implementation classes for performance (slow reflection) </summary>
+        /// A cache that stores all interfaces for known implementation classes for performance (slow reflection) </summary>
         private static readonly WeakIdentityMap<Type, LinkedList<WeakReference>> knownImplClasses =
             WeakIdentityMap<Type, LinkedList<WeakReference>>.NewConcurrentHashMap(false);
 
@@ -294,13 +294,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// <b>Expert:</b> Adds a custom Attribute instance with one or more Attribute interfaces.
-        /// <p><font color="red"><b>Please note:</b> It is not guaranteed, that <code>att</code> is added to
-        /// the <code>AttributeSource</code>, because the provided attributes may already exist.
-        /// You should always retrieve the wanted attributes using <seealso cref="#getAttribute"/> after adding
-        /// with this method and cast to your class.
-        /// The recommended way to use custom implementations is using an <seealso cref="AttributeFactory"/>.
-        /// </font></p>
+        /// <b>Expert:</b> Adds a custom <see cref="Attribute"/> instance with one or more <see cref="IAttribute"/> interfaces.
+        /// <para><font color="red"><b>Please note:</b> It is not guaranteed, that <paramref name="att"/> is added to
+        /// the <see cref="AttributeSource"/>, because the provided attributes may already exist.
+        /// You should always retrieve the wanted attributes using <see cref="GetAttribute{T}"/> after adding
+        /// with this method and cast to your <see cref="Type"/>.
+        /// The recommended way to use custom implementations is using an <see cref="AttributeFactory"/>.
+        /// </font></para>
         /// </summary>
         public void AddAttributeImpl(Attribute att)
         {
@@ -331,6 +331,12 @@ namespace Lucene.Net.Util
             }
         }
 
+        /// <summary>
+        /// The caller must pass in an interface type that extends <see cref="IAttribute"/>.
+        /// This method first checks if an instance of the corresponding class is 
+        /// already in this <see cref="AttributeSource"/> and returns it. Otherwise a
+        /// new instance is created, added to this <see cref="AttributeSource"/> and returned. 
+        /// </summary>
         public T AddAttribute<T>()
             where T : IAttribute
         {
@@ -360,15 +366,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns true, iff this AttributeSource has any attributes </summary>
+        /// Returns <c>true</c>, if this <see cref="AttributeSource"/> has any attributes </summary>
         public bool HasAttributes
         {
             get { return this.attributes.Count > 0; }
         }
 
         /// <summary>
-        /// The caller must pass in a Class&lt;? extends Attribute&gt; value.
-        /// Returns true, iff this AttributeSource contains the passed-in Attribute.
+        /// The caller must pass in an interface type that extends <see cref="IAttribute"/>.
+        /// Returns <c>true</c>, if this <see cref="AttributeSource"/> contains the corrsponding <see cref="Attribute"/>.
         /// </summary>
         public bool HasAttribute<T>() where T : IAttribute
         {
@@ -377,15 +383,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// The caller must pass in a Class&lt;? extends Attribute&gt; value.
-        /// Returns the instance of the passed in Attribute contained in this AttributeSource
+        /// The caller must pass in an interface type that extends <see cref="IAttribute"/>.
+        /// Returns the instance of the corresponding <see cref="Attribute"/> contained in this <see cref="AttributeSource"/>
         /// </summary>
-        /// <exception cref="IllegalArgumentException"> if this AttributeSource does not contain the
-        ///         Attribute. It is recommended to always use <seealso cref="#addAttribute"/> even in consumers
-        ///         of TokenStreams, because you cannot know if a specific TokenStream really uses
-        ///         a specific Attribute. <seealso cref="#addAttribute"/> will automatically make the attribute
+        /// <exception cref="ArgumentException"> if this <see cref="AttributeSource"/> does not contain the
+        ///         <see cref="Attribute"/>. It is recommended to always use <see cref="AddAttribute{T}()"/> even in consumers
+        ///         of <see cref="Analysis.TokenStream"/>s, because you cannot know if a specific <see cref="Analysis.TokenStream"/> really uses
+        ///         a specific <see cref="Attribute"/>. <see cref="AddAttribute{T}()"/> will automatically make the attribute
         ///         available. If you want to only use the attribute, if it is available (to optimize
-        ///         consuming), use <seealso cref="#HasAttribute"/>. </exception>
+        ///         consuming), use <see cref="HasAttribute{T}()"/>. </exception>
         public virtual T GetAttribute<T>() where T : IAttribute
         {
             var attClass = typeof(T);
@@ -417,8 +423,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Resets all Attributes in this AttributeSource by calling
-        /// <seealso cref="Attribute#clear()"/> on each Attribute implementation.
+        /// Resets all <see cref="Attribute"/>s in this <see cref="AttributeSource"/> by calling
+        /// <see cref="Attribute.Clear()"/> on each <see cref="IAttribute"/> implementation.
         /// </summary>
         public void ClearAttributes()
         {
@@ -429,8 +435,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Captures the state of all Attributes. The return value can be passed to
-        /// <seealso cref="#restoreState"/> to restore the state of this or another AttributeSource.
+        /// Captures the state of all <see cref="Attribute"/>s. The return value can be passed to
+        /// <see cref="RestoreState(State)"/> to restore the state of this or another <see cref="AttributeSource"/>.
         /// </summary>
         public virtual State CaptureState()
         {
@@ -443,15 +449,15 @@ namespace Lucene.Net.Util
         /// that this state contains into the attributes implementations of the targetStream.
         /// The targetStream must contain a corresponding instance for each argument
         /// contained in this state (e.g. it is not possible to restore the state of
-        /// an AttributeSource containing a TermAttribute into a AttributeSource using
-        /// a Token instance as implementation).
-        /// <p>
+        /// an <see cref="AttributeSource"/> containing a <see cref="Analysis.TokenAttributes.ICharTermAttribute"/> into a <see cref="AttributeSource"/> using
+        /// a <see cref="Analysis.Token"/> instance as implementation).
+        /// <para/>
         /// Note that this method does not affect attributes of the targetStream
         /// that are not contained in this state. In other words, if for example
-        /// the targetStream contains an OffsetAttribute, but this state doesn't, then
-        /// the value of the OffsetAttribute remains unchanged. It might be desirable to
+        /// the targetStream contains an <see cref="Analysis.TokenAttributes.IOffsetAttribute"/>, but this state doesn't, then
+        /// the value of the <see cref="Analysis.TokenAttributes.IOffsetAttribute"/> remains unchanged. It might be desirable to
         /// reset its value to the default, in which case the caller should first
-        /// call <seealso cref="TokenStream#ClearAttributes()"/> on the targetStream.
+        /// call <see cref="AttributeSource.ClearAttributes()"/> (<c>TokenStream.ClearAttributes()</c> on the targetStream.
         /// </summary>
         public void RestoreState(State state)
         {
@@ -530,15 +536,15 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method returns the current attribute values as a string in the following format
-        /// by calling the <seealso cref="#reflectWith(AttributeReflector)"/> method:
+        /// This method returns the current attribute values as a string in the following format
+        /// by calling the <see cref="ReflectWith(IAttributeReflector)"/> method:
         ///
-        /// <ul>
-        /// <li><em>iff {@code prependAttClass=true}:</em> {@code "AttributeClass#key=value,AttributeClass#key=value"}
-        /// <li><em>iff {@code prependAttClass=false}:</em> {@code "key=value,key=value"}
-        /// </ul>
+        /// <list type="bullet">
+        ///     <item><term>if <paramref name="prependAttClass"/>=true:</term> <description> <c>"AttributeClass.Key=value,AttributeClass.Key=value"</c> </description></item>
+        ///     <item><term>if <paramref name="prependAttClass"/>=false:</term> <description> <c>"key=value,key=value"</c> </description></item>
+        /// </list>
         /// </summary>
-        /// <seealso cref= #reflectWith(AttributeReflector) </seealso>
+        /// <seealso cref="ReflectWith(IAttributeReflector)"/>
         public string ReflectAsString(bool prependAttClass)
         {
             StringBuilder buffer = new StringBuilder();
@@ -581,13 +587,13 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method is for introspection of attributes, it should simply
-        /// add the key/values this AttributeSource holds to the given <seealso cref="AttributeReflector"/>.
+        /// This method is for introspection of attributes, it should simply
+        /// add the key/values this <see cref="AttributeSource"/> holds to the given <see cref="IAttributeReflector"/>.
         ///
-        /// <p>this method iterates over all Attribute implementations and calls the
-        /// corresponding <seealso cref="Attribute#reflectWith"/> method.</p>
+        /// <para>This method iterates over all <see cref="IAttribute"/> implementations and calls the
+        /// corresponding <see cref="Attribute.ReflectWith(IAttributeReflector)"/> method.</para>
         /// </summary>
-        /// <seealso cref= Attribute#reflectWith </seealso>
+        /// <seealso cref="Attribute.ReflectWith(IAttributeReflector)"/>
         public void ReflectWith(IAttributeReflector reflector)
         {
             for (State state = GetCurrentState(); state != null; state = state.next)
@@ -597,10 +603,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Performs a clone of all <seealso cref="Attribute"/> instances returned in a new
-        /// {@code AttributeSource} instance. this method can be used to e.g. create another TokenStream
-        /// with exactly the same attributes (using <seealso cref="#AttributeSource(AttributeSource)"/>).
-        /// You can also use it as a (non-performant) replacement for <seealso cref="#captureState"/>, if you need to look
+        /// Performs a clone of all <see cref="Attribute"/> instances returned in a new
+        /// <see cref="AttributeSource"/> instance. This method can be used to e.g. create another <see cref="Analysis.TokenStream"/>
+        /// with exactly the same attributes (using <see cref="AttributeSource(AttributeSource)"/>).
+        /// You can also use it as a (non-performant) replacement for <see cref="CaptureState()"/>, if you need to look
         /// into / modify the captured state.
         /// </summary>
         public AttributeSource CloneAttributes()
@@ -632,12 +638,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the contents of this {@code AttributeSource} to the given target {@code AttributeSource}.
-        /// The given instance has to provide all <seealso cref="Attribute"/>s this instance contains.
-        /// The actual attribute implementations must be identical in both {@code AttributeSource} instances;
-        /// ideally both AttributeSource instances should use the same <seealso cref="AttributeFactory"/>.
-        /// You can use this method as a replacement for <seealso cref="#restoreState"/>, if you use
-        /// <seealso cref="#cloneAttributes"/> instead of <seealso cref="#captureState"/>.
+        /// Copies the contents of this <see cref="AttributeSource"/> to the given target <see cref="AttributeSource"/>.
+        /// The given instance has to provide all <see cref="IAttribute"/>s this instance contains.
+        /// The actual attribute implementations must be identical in both <see cref="AttributeSource"/> instances;
+        /// ideally both <see cref="AttributeSource"/> instances should use the same <see cref="AttributeFactory"/>.
+        /// You can use this method as a replacement for <see cref="RestoreState(State)"/>, if you use
+        /// <see cref="CloneAttributes()"/> instead of <see cref="CaptureState()"/>.
         /// </summary>
         public void CopyTo(AttributeSource target)
         {
@@ -655,7 +661,7 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Returns a string consisting of the class's simple name, the hex representation of the identity hash code,
         /// and the current reflection of all attributes. </summary>
-        /// <seealso cref= #reflectAsString(boolean) </seealso>
+        /// <seealso cref="ReflectAsString(bool)"/>
         public override string ToString()
         {
             return this.GetType().Name + '@' + RuntimeHelpers.GetHashCode(this).ToString("x") + " " + ReflectAsString(false);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BitUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BitUtil.cs b/src/Lucene.Net/Util/BitUtil.cs
index 08fea44..dd1b81a 100644
--- a/src/Lucene.Net/Util/BitUtil.cs
+++ b/src/Lucene.Net/Util/BitUtil.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
      */
 
     /// <summary>
-    ///  A variety of high efficiency bit twiddling routines.
+    /// A variety of high efficiency bit twiddling routines.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class BitUtil
@@ -50,19 +51,20 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         // should be faster than accessing an array for each index, and
         // the total array size is kept smaller (256*sizeof(int))=1K
         /// <summary>
-        ///*** the python code that generated bitlist
+        /// the python code that generated bitlist
+        /// <code>
         /// def bits2int(val):
         /// arr=0
         /// for shift in range(8,0,-1):
-        ///  if val & 0x80:
-        ///    arr = (arr << 4) | shift
-        ///  val = val << 1
+        ///  if val &amp; 0x80:
+        ///    arr = (arr &lt;&lt; 4) | shift
+        ///  val = val &lt;&lt; 1
         /// return arr
         ///
         /// def int_table():
         ///  tbl = [ hex(bits2int(val)).strip('L') for val in range(256) ]
         ///  return ','.join(tbl)
-        /// *****
+        /// </code>
         /// </summary>
         private static readonly int[] BIT_LISTS = new int[] {
             0x0, 0x1, 0x2, 0x21, 0x3, 0x31, 0x32, 0x321, 0x4, 0x41, 0x42, 0x421, 0x43,
@@ -100,21 +102,22 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         }
 
         /// <summary>
-        /// Return the number of bits sets in b. </summary>
+        /// Return the number of bits sets in <paramref name="b"/>. </summary>
         public static int BitCount(byte b)
         {
             return BYTE_COUNTS[b & 0xFF];
         }
 
         /// <summary>
-        /// Return the list of bits which are set in b encoded as followed:
-        /// <code>(i >>> (4 * n)) & 0x0F</code> is the offset of the n-th set bit of
+        /// Return the list of bits which are set in <paramref name="b"/> encoded as followed:
+        /// <code>(i >>> (4 * n)) &amp; 0x0F</code> is the offset of the n-th set bit of
         /// the given byte plus one, or 0 if there are n or less bits set in the given
-        /// byte. For example <code>bitList(12)</code> returns 0x43:<ul>
-        /// <li><code>0x43 & 0x0F</code> is 3, meaning the the first bit set is at offset 3-1 = 2,</li>
-        /// <li><code>(0x43 >>> 4) & 0x0F</code> is 4, meaning there is a second bit set at offset 4-1=3,</li>
-        /// <li><code>(0x43 >>> 8) & 0x0F</code> is 0, meaning there is no more bit set in this byte.</li>
-        /// </ul>
+        /// byte. For example <code>bitList(12)</code> returns 0x43:
+        /// <list type="bullet">
+        ///     <item><description><code>0x43 &amp; 0x0F</code> is 3, meaning the the first bit set is at offset 3-1 = 2,</description></item>
+        ///     <item><description><code>(0x43 >>> 4) &amp; 0x0F</code> is 4, meaning there is a second bit set at offset 4-1=3,</description></item>
+        ///     <item><description><code>(0x43 >>> 8) &amp; 0x0F</code> is 0, meaning there is no more bit set in this byte.</description></item>
+        /// </list>
         /// </summary>
         public static int BitList(byte b)
         {
@@ -126,7 +129,7 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         // intrinsic since Java 6u18) in a naive loop, see LUCENE-2221
 
         /// <summary>
-        /// Returns the number of set bits in an array of longs. </summary>
+        /// Returns the number of set bits in an array of <see cref="long"/>s. </summary>
         public static long Pop_Array(long[] arr, int wordOffset, int numWords)
         {
             long popCount = 0;
@@ -139,7 +142,7 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
 
         /// <summary>
         /// Returns the popcount or cardinality of the two sets after an intersection.
-        ///  Neither array is modified.
+        /// Neither array is modified.
         /// </summary>
         public static long Pop_Intersect(long[] arr1, long[] arr2, int wordOffset, int numWords)
         {
@@ -153,7 +156,7 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
 
         /// <summary>
         /// Returns the popcount or cardinality of the union of two sets.
-        ///  Neither array is modified.
+        /// Neither array is modified.
         /// </summary>
         public static long Pop_Union(long[] arr1, long[] arr2, int wordOffset, int numWords)
         {
@@ -166,8 +169,8 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         }
 
         /// <summary>
-        /// Returns the popcount or cardinality of A & ~B.
-        ///  Neither array is modified.
+        /// Returns the popcount or cardinality of A &amp; ~B.
+        /// Neither array is modified.
         /// </summary>
         public static long Pop_AndNot(long[] arr1, long[] arr2, int wordOffset, int numWords)
         {
@@ -194,7 +197,7 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         }
 
         /// <summary>
-        /// returns the next highest power of two, or the current value if it's already a power of two or zero </summary>
+        /// Returns the next highest power of two, or the current value if it's already a power of two or zero </summary>
         public static int NextHighestPowerOfTwo(int v)
         {
             v--;
@@ -208,7 +211,7 @@ namespace Lucene.Net.Util // from org.apache.solr.util rev 555343
         }
 
         /// <summary>
-        /// returns the next highest power of two, or the current value if it's already a power of two or zero </summary>
+        /// Returns the next highest power of two, or the current value if it's already a power of two or zero </summary>
         public static long NextHighestPowerOfTwo(long v)
         {
             v--;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/Bits.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Bits.cs b/src/Lucene.Net/Util/Bits.cs
index fc7a256..d1cbcf9 100644
--- a/src/Lucene.Net/Util/Bits.cs
+++ b/src/Lucene.Net/Util/Bits.cs
@@ -19,17 +19,18 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Interface for Bitset-like structures.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public interface IBits
     {
         /// <summary>
-        /// Returns the value of the bit with the specified <code>index</code>.
+        /// Returns the value of the bit with the specified <paramref name="index"/>.
         /// </summary>
-        /// <param name="index"> index, should be non-negative and &lt; <seealso cref="#length()"/>.
+        /// <param name="index"> Index, should be non-negative and &lt; <see cref="Length"/>.
         ///        The result of passing negative or out of bounds values is undefined
         ///        by this interface, <b>just don't do it!</b> </param>
-        /// <returns> <code>true</code> if the bit is set, <code>false</code> otherwise. </returns>
+        /// <returns> <c>true</c> if the bit is set, <c>false</c> otherwise. </returns>
         bool Get(int index);
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BroadWord.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BroadWord.cs b/src/Lucene.Net/Util/BroadWord.cs
index 34864a4..3cc9b5c 100644
--- a/src/Lucene.Net/Util/BroadWord.cs
+++ b/src/Lucene.Net/Util/BroadWord.cs
@@ -23,14 +23,14 @@ namespace Lucene.Net.Util
     /// <summary>
     /// Methods and constants inspired by the article
     /// "Broadword Implementation of Rank/Select Queries" by Sebastiano Vigna, January 30, 2012:
-    /// <ul>
-    /// <li>algorithm 1: <seealso cref="#bitCount(long)"/>, count of set bits in a <code>long</code>
-    /// <li>algorithm 2: <seealso cref="#select(long, int)"/>, selection of a set bit in a <code>long</code>,
-    /// <li>bytewise signed smaller &lt;<sub><small>8</small></sub> operator: <seealso cref="#smallerUpTo7_8(long,long)"/>.
-    /// <li>shortwise signed smaller &lt;<sub><small>16</small></sub> operator: <seealso cref="#smallerUpto15_16(long,long)"/>.
-    /// <li>some of the Lk and Hk constants that are used by the above:
-    /// L8 <seealso cref="#L8_L"/>, H8 <seealso cref="#H8_L"/>, L9 <seealso cref="#L9_L"/>, L16 <seealso cref="#L16_L"/>and H16 <seealso cref="#H8_L"/>.
-    /// </ul>
+    /// <list type="bullet">
+    ///     <item><description>algorithm 1: <see cref="BitCount(long)"/>, count of set bits in a <see cref="long"/></description></item>
+    ///     <item><description>algorithm 2: <see cref="Select(long, int)"/>, selection of a set bit in a <see cref="long"/>,</description></item>
+    ///     <item><description>bytewise signed smaller &lt;<sub><small>8</small></sub> operator: <see cref="SmallerUpTo7_8(long,long)"/>.</description></item>
+    ///     <item><description>shortwise signed smaller &lt;<sub><small>16</small></sub> operator: <see cref="SmallerUpto15_16(long,long)"/>.</description></item>
+    ///     <item><description>some of the Lk and Hk constants that are used by the above:
+    ///         L8 <see cref="L8_L"/>, H8 <see cref="H8_L"/>, L9 <see cref="L9_L"/>, L16 <see cref="L16_L"/>and H16 <see cref="H8_L"/>.</description></item>
+    /// </list>
     /// @lucene.internal
     /// </summary>
     public sealed class BroadWord
@@ -41,9 +41,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Bit count of a long.
-        /// Only here to compare the implementation with <seealso cref="#select(long,int)"/>,
-        /// normally <seealso cref="Long#bitCount"/> is preferable. </summary>
+        /// Bit count of a <see cref="long"/>.
+        /// Only here to compare the implementation with <see cref="Select(long, int)"/>,
+        /// normally <see cref="Number.BitCount(long)"/> is preferable. </summary>
         /// <returns> The total number of 1 bits in x. </returns>
         internal static int BitCount(long x)
         {
@@ -58,7 +58,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Select a 1-bit from a long. </summary>
+        /// Select a 1-bit from a <see cref="long"/>. </summary>
         /// <returns> The index of the r-th 1 bit in x, or if no such bit exists, 72. </returns>
         public static int Select(long x, int r)
         {
@@ -89,9 +89,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A signed bytewise smaller &lt;<sub><small>8</small></sub> operator, for operands 0L<= x, y <=0x7L.
-        /// this uses the following numbers of basic long operations: 1 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
-        /// <returns> A long with bits set in the <seealso cref="#H8_L"/> positions corresponding to each input signed byte pair that compares smaller. </returns>
+        /// A signed bytewise smaller &lt;<sub><small>8</small></sub> operator, for operands 0L&lt;= x, y &lt;=0x7L.
+        /// This uses the following numbers of basic <see cref="long"/> operations: 1 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
+        /// <returns> A <see cref="long"/> with bits set in the <see cref="H8_L"/> positions corresponding to each input signed byte pair that compares smaller. </returns>
         public static long SmallerUpTo7_8(long x, long y)
         {
             // See section 4, page 5, line 14 of the Vigna article:
@@ -100,8 +100,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// An unsigned bytewise smaller &lt;<sub><small>8</small></sub> operator.
-        /// this uses the following numbers of basic long operations: 3 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
-        /// <returns> A long with bits set in the <seealso cref="#H8_L"/> positions corresponding to each input unsigned byte pair that compares smaller. </returns>
+        /// This uses the following numbers of basic <see cref="long"/> operations: 3 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
+        /// <returns> A <see cref="long"/> with bits set in the <see cref="H8_L"/> positions corresponding to each input unsigned byte pair that compares smaller. </returns>
         public static long Smalleru_8(long x, long y)
         {
             // See section 4, 8th line from the bottom of the page 5, of the Vigna article:
@@ -110,8 +110,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// An unsigned bytewise not equals 0 operator.
-        /// this uses the following numbers of basic long operations: 2 or, 1 and, 1 minus. </summary>
-        /// <returns> A long with bits set in the <seealso cref="#H8_L"/> positions corresponding to each unsigned byte that does not equal 0. </returns>
+        /// This uses the following numbers of basic <see cref="long"/> operations: 2 or, 1 and, 1 minus. </summary>
+        /// <returns> A <see cref="long"/> with bits set in the <see cref="H8_L"/> positions corresponding to each unsigned byte that does not equal 0. </returns>
         public static long NotEquals0_8(long x)
         {
             // See section 4, line 6-8 on page 6, of the Vigna article:
@@ -120,8 +120,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// A bytewise smaller &lt;<sub><small>16</small></sub> operator.
-        /// this uses the following numbers of basic long operations: 1 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
-        /// <returns> A long with bits set in the <seealso cref="#H16_L"/> positions corresponding to each input signed short pair that compares smaller. </returns>
+        /// This uses the following numbers of basic <see cref="long"/> operations: 1 or, 2 and, 2 xor, 1 minus, 1 not. </summary>
+        /// <returns> A <see cref="long"/> with bits set in the <see cref="H16_L"/> positions corresponding to each input signed short pair that compares smaller. </returns>
         public static long SmallerUpto15_16(long x, long y)
         {
             return (((x | H16_L) - (y & (~H16_L))) ^ x ^ ~y) & H16_L;
@@ -129,8 +129,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Lk denotes the constant whose ones are in position 0, k, 2k, . . .
-        ///  These contain the low bit of each group of k bits.
-        ///  The suffix _L indicates the long implementation.
+        /// These contain the low bit of each group of k bits.
+        /// The suffix _L indicates the <see cref="long"/> implementation.
         /// </summary>
         public const long L8_L = 0x0101010101010101L;
 
@@ -138,16 +138,16 @@ namespace Lucene.Net.Util
         public const long L16_L = 0x0001000100010001L;
 
         /// <summary>
-        /// Hk = Lk << (k-1) .
-        ///  These contain the high bit of each group of k bits.
-        ///  The suffix _L indicates the long implementation.
+        /// Hk = Lk &lt;&lt; (k-1) .
+        /// These contain the high bit of each group of k bits.
+        /// The suffix _L indicates the <see cref="long"/> implementation.
         /// </summary>
         public static readonly long H8_L = L8_L << 7;
 
         public static readonly long H16_L = L16_L << 15;
 
         /// <summary>
-        /// Naive implementation of <seealso cref="#select(long,int)"/>, using <seealso cref="Long#numberOfTrailingZeros"/> repetitively.
+        /// Naive implementation of <see cref="Select(long, int)"/>, using <see cref="Number.NumberOfLeadingZeros(long)"/> repetitively.
         /// Works relatively fast for low ranks. </summary>
         /// <returns> The index of the r-th 1 bit in x, or if no such bit exists, 72. </returns>
         public static int SelectNaive(long x, int r)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/ByteBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/ByteBlockPool.cs b/src/Lucene.Net/Util/ByteBlockPool.cs
index 5a1f0ae..58c13cd8 100644
--- a/src/Lucene.Net/Util/ByteBlockPool.cs
+++ b/src/Lucene.Net/Util/ByteBlockPool.cs
@@ -26,23 +26,22 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Class that Posting and PostingVector use to write byte
-    /// streams into shared fixed-size byte[] arrays.  The idea
-    /// is to allocate slices of increasing lengths For
+    /// streams into shared fixed-size <see cref="T:byte[]"/> arrays.  The idea
+    /// is to allocate slices of increasing lengths. For
     /// example, the first slice is 5 bytes, the next slice is
     /// 14, etc.  We start by writing our bytes into the first
     /// 5 bytes.  When we hit the end of the slice, we allocate
     /// the next slice and then write the address of the new
     /// slice into the last 4 bytes of the previous slice (the
     /// "forwarding address").
-    ///
+    /// <para/>
     /// Each slice is filled with 0's initially, and we mark
-    /// the end with a non-zero byte.  this way the methods
+    /// the end with a non-zero byte.  This way the methods
     /// that are writing into the slice don't need to record
     /// its length and instead allocate a new slice once they
     /// hit a non-zero byte.
-    ///
+    /// <para/>
     /// @lucene.internal
-    ///
     /// </summary>
     public sealed class ByteBlockPool
     {
@@ -52,7 +51,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Abstract class for allocating and freeing byte
-        ///  blocks.
+        /// blocks.
         /// </summary>
         public abstract class Allocator
         {
@@ -78,7 +77,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A simple <seealso cref="Allocator"/> that never recycles. </summary>
+        /// A simple <see cref="Allocator"/> that never recycles. </summary>
         public sealed class DirectAllocator : Allocator
         {
             public DirectAllocator()
@@ -97,8 +96,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A simple <seealso cref="Allocator"/> that never recycles, but
-        ///  tracks how much total RAM is in use.
+        /// A simple <see cref="Allocator"/> that never recycles, but
+        /// tracks how much total RAM is in use.
         /// </summary>
         public class DirectTrackingAllocator : Allocator
         {
@@ -132,7 +131,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// array of buffers currently used in the pool. Buffers are allocated if
+        /// Array of buffers currently used in the pool. Buffers are allocated if
         /// needed don't modify this outside of this class.
         /// </summary>
         [WritableArray]
@@ -181,9 +180,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Resets the pool to its initial state reusing the first buffer and fills all
-        /// buffers with <tt>0</tt> bytes before they reused or passed to
-        /// <seealso cref="Allocator#recycleByteBlocks(byte[][], int, int)"/>. Calling
-        /// <seealso cref="ByteBlockPool#nextBuffer()"/> is not needed after reset.
+        /// buffers with <c>0</c> bytes before they reused or passed to
+        /// <see cref="Allocator.RecycleByteBlocks(byte[][], int, int)"/>. Calling
+        /// <see cref="ByteBlockPool.NextBuffer()"/> is not needed after reset.
         /// </summary>
         public void Reset()
         {
@@ -192,12 +191,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Expert: Resets the pool to its initial state reusing the first buffer. Calling
-        /// <seealso cref="ByteBlockPool#nextBuffer()"/> is not needed after reset. </summary>
-        /// <param name="zeroFillBuffers"> if <code>true</code> the buffers are filled with <tt>0</tt>.
-        ///        this should be set to <code>true</code> if this pool is used with slices. </param>
-        /// <param name="reuseFirst"> if <code>true</code> the first buffer will be reused and calling
-        ///        <seealso cref="ByteBlockPool#nextBuffer()"/> is not needed after reset iff the
-        ///        block pool was used before ie. <seealso cref="ByteBlockPool#nextBuffer()"/> was called before. </param>
+        /// <see cref="ByteBlockPool.NextBuffer()"/> is not needed after reset. </summary>
+        /// <param name="zeroFillBuffers"> if <c>true</c> the buffers are filled with <tt>0</tt>.
+        ///        this should be set to <c>true</c> if this pool is used with slices. </param>
+        /// <param name="reuseFirst"> if <c>true</c> the first buffer will be reused and calling
+        ///        <see cref="ByteBlockPool.NextBuffer()"/> is not needed after reset if the
+        ///        block pool was used before ie. <see cref="ByteBlockPool.NextBuffer()"/> was called before. </param>
         public void Reset(bool zeroFillBuffers, bool reuseFirst)
         {
             if (bufferUpto != -1)
@@ -209,11 +208,9 @@ namespace Lucene.Net.Util
                     for (int i = 0; i < bufferUpto; i++)
                     {
                         // Fully zero fill buffers that we fully used
-                        //Array.Clear(Buffers[i], 0, Buffers[i].Length);
                         Arrays.Fill(buffers[i], (byte)0);
                     }
                     // Partial zero fill the final buffer
-                    //Array.Clear(Buffers[BufferUpto], 0, BufferUpto);
                     Arrays.Fill(buffers[bufferUpto], 0, ByteUpto, (byte)0);
                 }
 
@@ -222,7 +219,6 @@ namespace Lucene.Net.Util
                     int offset = reuseFirst ? 1 : 0;
                     // Recycle all but the first buffer
                     allocator.RecycleByteBlocks(buffers, offset, 1 + bufferUpto);
-                    //Array.Clear(Buffers, 0, Buffers.Length);
                     Arrays.Fill(buffers, offset, 1 + bufferUpto, null);
                 }
                 if (reuseFirst)
@@ -244,9 +240,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Advances the pool to its next buffer. this method should be called once
+        /// Advances the pool to its next buffer. This method should be called once
         /// after the constructor to initialize the pool. In contrast to the
-        /// constructor a <seealso cref="ByteBlockPool#reset()"/> call will advance the pool to
+        /// constructor a <see cref="ByteBlockPool.Reset()"/> call will advance the pool to
         /// its first buffer immediately.
         /// </summary>
         public void NextBuffer()
@@ -266,7 +262,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Allocates a new slice with the given size.</summary>
-        /// <seealso>ByteBlockPool#FIRST_LEVEL_SIZE</seealso>
+        /// <seealso cref="ByteBlockPool.FIRST_LEVEL_SIZE"/>
         public int NewSlice(int size)
         {
             if (ByteUpto > BYTE_BLOCK_SIZE - size)
@@ -286,7 +282,7 @@ namespace Lucene.Net.Util
         // bytes, next slice is 14 bytes, etc.
 
         /// <summary>
-        /// An array holding the offset into the <seealso cref="ByteBlockPool#LEVEL_SIZE_ARRAY"/>
+        /// An array holding the offset into the <see cref="ByteBlockPool.LEVEL_SIZE_ARRAY"/>
         /// to quickly navigate to the next slice level.
         /// </summary>
         public static readonly int[] NEXT_LEVEL_ARRAY = new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 };
@@ -298,7 +294,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// The first level size for new slices </summary>
-        /// <seealso cref= ByteBlockPool#newSlice(int) </seealso>
+        /// <seealso cref="ByteBlockPool.NewSlice(int)"/>
         public static readonly int FIRST_LEVEL_SIZE = LEVEL_SIZE_ARRAY[0];
 
         /// <summary>
@@ -361,7 +357,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Appends the bytes in the provided <seealso cref="BytesRef"/> at
+        /// Appends the bytes in the provided <see cref="BytesRef"/> at
         /// the current position.
         /// </summary>
         public void Append(BytesRef bytes)
@@ -398,8 +394,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Reads bytes bytes out of the pool starting at the given offset with the given
-        /// length into the given byte array at offset <tt>off</tt>.
-        /// <p>Note: this method allows to copy across block boundaries.</p>
+        /// length into the given byte array at offset <c>off</c>.
+        /// <para>Note: this method allows to copy across block boundaries.</para>
         /// </summary>
         public void ReadBytes(long offset, byte[] bytes, int off, int length)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BytesRef.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BytesRef.cs b/src/Lucene.Net/Util/BytesRef.cs
index 2530231..624bda5 100644
--- a/src/Lucene.Net/Util/BytesRef.cs
+++ b/src/Lucene.Net/Util/BytesRef.cs
@@ -25,14 +25,14 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Represents byte[], as a slice (offset + length) into an
-    ///  existing byte[].  The <seealso cref="#bytes"/> member should never be null;
-    ///  use <seealso cref="#EMPTY_BYTES"/> if necessary.
+    /// Represents <see cref="T:byte[]"/>, as a slice (offset + length) into an
+    /// existing <see cref="T:byte[]"/>.  The <see cref="Bytes"/> property should never be <c>null</c>;
+    /// use <see cref="EMPTY_BYTES"/> if necessary.
     ///
-    /// <p><b>Important note:</b> Unless otherwise noted, Lucene uses this class to
+    /// <para/><b>Important note:</b> Unless otherwise noted, Lucene uses this class to
     /// represent terms that are encoded as <b>UTF8</b> bytes in the index. To
-    /// convert them to a Java <seealso cref="String"/> (which is UTF16), use <seealso cref="#utf8ToString"/>.
-    /// Using code like {@code new String(bytes, offset, length)} to do this
+    /// convert them to a .NET <see cref="string"/> (which is UTF16), use <see cref="Utf8ToString()"/>.
+    /// Using code like <c>new String(bytes, offset, length)</c> to do this
     /// is <b>wrong</b>, as it does not respect the correct character set
     /// and may return wrong results (depending on the platform's defaults)!
     /// </summary>
@@ -46,7 +46,7 @@ namespace Lucene.Net.Util
         public static readonly byte[] EMPTY_BYTES = new byte[0];
 
         /// <summary>
-        /// The contents of the BytesRef. Should never be {@code null}.
+        /// The contents of the BytesRef. Should never be <c>null</c>.
         /// </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
@@ -68,15 +68,15 @@ namespace Lucene.Net.Util
         public int Length { get; set; }
 
         /// <summary>
-        /// Create a BytesRef with <seealso cref="#EMPTY_BYTES"/> </summary>
+        /// Create a <see cref="BytesRef"/> with <see cref="EMPTY_BYTES"/> </summary>
         public BytesRef()
             : this(EMPTY_BYTES)
         {
         }
 
         /// <summary>
-        /// this instance will directly reference bytes w/o making a copy.
-        /// bytes should not be null.
+        /// This instance will directly reference <paramref name="bytes"/> w/o making a copy.
+        /// <paramref name="bytes"/> should not be <c>null</c>.
         /// </summary>
         public BytesRef(byte[] bytes, int offset, int length)
         {
@@ -87,8 +87,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this instance will directly reference bytes w/o making a copy.
-        /// bytes should not be null
+        /// This instance will directly reference <paramref name="bytes"/> w/o making a copy.
+        /// <paramref name="bytes"/> should not be <c>null</c>.
         /// </summary>
         public BytesRef(byte[] bytes)
             : this(bytes, 0, bytes.Length)
@@ -96,7 +96,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Create a BytesRef pointing to a new array of size <code>capacity</code>.
+        /// Create a <see cref="BytesRef"/> pointing to a new array of size <paramref name="capacity"/>.
         /// Offset and length will both be zero.
         /// </summary>
         public BytesRef(int capacity)
@@ -105,10 +105,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Initialize the byte[] from the UTF8 bytes
-        /// for the provided String.
+        /// Initialize the <see cref="T:byte[]"/> from the UTF8 bytes
+        /// for the provided <see cref="ICharSequence"/>.
         /// </summary>
-        /// <param name="text"> this must be well-formed
+        /// <param name="text"> This must be well-formed
         /// unicode text, with no unpaired surrogates. </param>
         public BytesRef(ICharSequence text)
             : this()
@@ -117,10 +117,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Initialize the byte[] from the UTF8 bytes
-        /// for the provided String.
+        /// Initialize the <see cref="T:byte[]"/> from the UTF8 bytes
+        /// for the provided <see cref="string"/>.
         /// </summary>
-        /// <param name="text"> this must be well-formed
+        /// <param name="text"> This must be well-formed
         /// unicode text, with no unpaired surrogates. </param>
         public BytesRef(string text)
             : this()
@@ -129,7 +129,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the UTF8 bytes for this string.
+        /// Copies the UTF8 bytes for this <see cref="ICharSequence"/>.
         /// </summary>
         /// <param name="text"> Must be well-formed unicode text, with no
         /// unpaired surrogates or invalid UTF16 code units. </param>
@@ -140,7 +140,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the UTF8 bytes for this string.
+        /// Copies the UTF8 bytes for this <see cref="string"/>.
         /// </summary>
         /// <param name="text"> Must be well-formed unicode text, with no
         /// unpaired surrogates or invalid UTF16 code units. </param>
@@ -151,11 +151,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Expert: compares the bytes against another BytesRef,
-        /// returning true if the bytes are equal.
+        /// Expert: Compares the bytes against another <see cref="BytesRef"/>,
+        /// returning <c>true</c> if the bytes are equal.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="other"> Another BytesRef, should not be null.
-        /// @lucene.internal </param>
+        /// <param name="other"> Another <see cref="BytesRef"/>, should not be <c>null</c>. </param>
         public bool BytesEquals(BytesRef other)
         {
             Debug.Assert(other != null);
@@ -184,18 +185,18 @@ namespace Lucene.Net.Util
         /// <b>not</b> copied and will be shared by both the returned object and this
         /// object.
         /// </summary>
-        /// <seealso cref= #deepCopyOf </seealso>
+        /// <seealso cref="DeepCopyOf(BytesRef)"/>
         public object Clone()
         {
             return new BytesRef(bytes, Offset, Length);
         }
 
         /// <summary>
-        /// Calculates the hash code as required by TermsHash during indexing.
-        ///  <p> this is currently implemented as MurmurHash3 (32
-        ///  bit), using the seed from {@link
-        ///  StringHelper#GOOD_FAST_HASH_SEED}, but is subject to
-        ///  change from release to release.
+        /// Calculates the hash code as required by <see cref="Index.TermsHash"/> during indexing.
+        /// <para/> This is currently implemented as MurmurHash3 (32
+        /// bit), using the seed from 
+        /// <see cref="StringHelper.GOOD_FAST_HASH_SEED"/>, but is subject to
+        /// change from release to release.
         /// </summary>
         public override int GetHashCode()
         {
@@ -217,7 +218,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Interprets stored bytes as UTF8 bytes, returning the
-        ///  resulting string
+        /// resulting <see cref="string"/>.
         /// </summary>
         public string Utf8ToString()
         {
@@ -246,8 +247,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the bytes from the given <seealso cref="BytesRef"/>
-        /// <p>
+        /// Copies the bytes from the given <see cref="BytesRef"/>
+        /// <para/>
         /// NOTE: if this would exceed the array size, this method creates a
         /// new reference array.
         /// </summary>
@@ -263,8 +264,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Appends the bytes from the given <seealso cref="BytesRef"/>
-        /// <p>
+        /// Appends the bytes from the given <see cref="BytesRef"/>
+        /// <para/>
         /// NOTE: if this would exceed the array size, this method creates a
         /// new reference array.
         /// </summary>
@@ -284,8 +285,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Used to grow the reference array.
-        ///
+        /// <para/>
         /// In general this should not be used as it does not take the offset into account.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public void Grow(int newLength)
@@ -338,13 +340,13 @@ namespace Lucene.Net.Util
 
         // LUCENENET NOTE: De-nested Utf8SortedAsUtf16Comparer class to prevent naming conflict
 
-        
+
 
         /// <summary>
-        /// Creates a new BytesRef that points to a copy of the bytes from
-        /// <code>other</code>
-        /// <p>
-        /// The returned BytesRef will have a length of other.length
+        /// Creates a new <see cref="BytesRef"/> that points to a copy of the bytes from
+        /// <paramref name="other"/>.
+        /// <para/>
+        /// The returned <see cref="BytesRef"/> will have a length of <c>other.Length</c>
         /// and an offset of zero.
         /// </summary>
         public static BytesRef DeepCopyOf(BytesRef other)
@@ -356,7 +358,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Performs internal consistency checks.
-        /// Always returns true (or throws InvalidOperationException)
+        /// Always returns true (or throws <see cref="InvalidOperationException"/>)
         /// </summary>
         public bool IsValid()
         {


[30/48] lucenenet git commit: Lucene.Net.Util.Automaton: Fixed XML documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/SpecialOperations.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/SpecialOperations.cs b/src/Lucene.Net/Util/Automaton/SpecialOperations.cs
index 9de45e5..d5f75db 100644
--- a/src/Lucene.Net/Util/Automaton/SpecialOperations.cs
+++ b/src/Lucene.Net/Util/Automaton/SpecialOperations.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Util.Automaton
 
     /// <summary>
     /// Special automata operations.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     internal sealed class SpecialOperations
@@ -49,7 +49,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Finds the largest entry whose value is less than or equal to c, or 0 if
+        /// Finds the largest entry whose value is less than or equal to <paramref name="c"/>, or 0 if
         /// there is no such entry.
         /// </summary>
         internal static int FindIndex(int c, int[] points)
@@ -76,7 +76,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Returns true if the language of this automaton is finite.
+        /// Returns <c>true</c> if the language of this automaton is finite.
         /// </summary>
         public static bool IsFinite(Automaton a)
         {
@@ -88,7 +88,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Checks whether there is a loop containing s. (this is sufficient since
+        /// Checks whether there is a loop containing <paramref name="s"/>. (This is sufficient since
         /// there are never transitions to dead states.)
         /// </summary>
         // TODO: not great that this is recursive... in theory a
@@ -112,7 +112,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns the longest string that is a prefix of all accepted strings and
         /// visits each state at most once.
         /// </summary>
-        /// <returns> common prefix </returns>
+        /// <returns> Common prefix. </returns>
         public static string GetCommonPrefix(Automaton a)
         {
             if (a.IsSingleton)
@@ -182,7 +182,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns the longest string that is a suffix of all accepted strings and
         /// visits each state at most once.
         /// </summary>
-        /// <returns> common suffix </returns>
+        /// <returns> Common suffix. </returns>
         public static string GetCommonSuffix(Automaton a)
         {
             if (a.IsSingleton) // if singleton, the suffix is the string itself.
@@ -281,8 +281,8 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns the set of accepted strings, assuming that at most
-        /// <code>limit</code> strings are accepted. If more than <code>limit</code>
-        /// strings are accepted, the first limit strings found are returned. If <code>limit</code>&lt;0, then
+        /// <paramref name="limit"/> strings are accepted. If more than <paramref name="limit"/>
+        /// strings are accepted, the first limit strings found are returned. If <paramref name="limit"/>&lt;0, then
         /// the limit is infinite.
         /// </summary>
         public static ISet<Int32sRef> GetFiniteStrings(Automaton a, int limit)
@@ -304,8 +304,8 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns the strings that can be produced from the given state, or
-        /// false if more than <code>limit</code> strings are found.
-        /// <code>limit</code>&lt;0 means "infinite".
+        /// <c>false</c> if more than <paramref name="limit"/> strings are found.
+        /// <paramref name="limit"/>&lt;0 means "infinite".
         /// </summary>
         private static bool GetFiniteStrings(State s, HashSet<State> pathstates, HashSet<Int32sRef> strings, Int32sRef path, int limit)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/State.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/State.cs b/src/Lucene.Net/Util/Automaton/State.cs
index b3df5a3..8db9cdb 100644
--- a/src/Lucene.Net/Util/Automaton/State.cs
+++ b/src/Lucene.Net/Util/Automaton/State.cs
@@ -37,8 +37,8 @@ using System.Text;
 namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
-    /// <tt>Automaton</tt> state.
-    ///
+    /// <see cref="Automaton"/> state.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class State : IComparable<State>
@@ -150,7 +150,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns the set of outgoing transitions. Subsequent changes are reflected
         /// in the automaton.
         /// </summary>
-        /// <returns> transition set </returns>
+        /// <returns> Transition set. </returns>
         public virtual IEnumerable<Transition> GetTransitions()
         {
             return new TransitionsIterable(this);
@@ -170,7 +170,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Adds an outgoing transition.
         /// </summary>
-        /// <param name="t"> transition </param>
+        /// <param name="t"> Transition. </param>
         public virtual void AddTransition(Transition t)
         {
             if (numTransitions == transitionsArray.Length)
@@ -183,9 +183,8 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Sets acceptance for this state.
+        /// Sets acceptance for this state. If <c>true</c>, this state is an accept state.
         /// </summary>
-        /// <param name="accept"> if true, this state is an accept state </param>
         public virtual bool Accept
         {
             set
@@ -201,9 +200,9 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Performs lookup in transitions, assuming determinism.
         /// </summary>
-        /// <param name="c"> codepoint to look up </param>
-        /// <returns> destination state, null if no matching outgoing transition </returns>
-        /// <seealso cref= #step(int, Collection) </seealso>
+        /// <param name="c"> Codepoint to look up. </param>
+        /// <returns> Destination state, <c>null</c> if no matching outgoing transition. </returns>
+        /// <seealso cref="Step(int, ICollection{State})"/>
         public virtual State Step(int c)
         {
             Debug.Assert(c >= 0);
@@ -221,9 +220,9 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Performs lookup in transitions, allowing nondeterminism.
         /// </summary>
-        /// <param name="c"> codepoint to look up </param>
-        /// <param name="dest"> collection where destination states are stored </param>
-        /// <seealso cref= #step(int) </seealso>
+        /// <param name="c"> Codepoint to look up. </param>
+        /// <param name="dest"> Collection where destination states are stored. </param>
+        /// <seealso cref="Step(int)"/>
         public virtual void Step(int c, ICollection<State> dest)
         {
             for (int i = 0; i < numTransitions; i++)
@@ -238,9 +237,9 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Virtually adds an epsilon transition to the target
-        ///  {@code to} state.  this is implemented by copying all
-        ///  transitions from {@code to} to this state, and if {@code
-        ///  to} is an accept state then set accept for this state.
+        /// <paramref name="to"/> state.  this is implemented by copying all
+        /// transitions from <paramref name="to"/> to this state, and if 
+        /// <paramref name="to"/> is an accept state then set accept for this state.
         /// </summary>
         internal virtual void AddEpsilon(State to)
         {
@@ -255,7 +254,7 @@ namespace Lucene.Net.Util.Automaton
         }
 
         /// <summary>
-        /// Downsizes transitionArray to numTransitions </summary>
+        /// Downsizes transitionArray to numTransitions. </summary>
         public virtual void TrimTransitionsArray()
         {
             if (numTransitions < transitionsArray.Length)
@@ -324,9 +323,8 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns sorted list of outgoing transitions.
         /// </summary>
-        /// <param name="to_first"> if true, order by (to, min, reverse max); otherwise (min,
-        ///          reverse max, to) </param>
-        /// <returns> transition list </returns>
+        /// <param name="comparer"> Comparer to sort with. </param>
+        /// <returns> Transition list. </returns>
 
         /// <summary>
         /// Sorts transitions array in-place. </summary>
@@ -341,10 +339,10 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Return this state's number.
-        /// <p>
-        /// Expert: Will be useless unless <seealso cref="Automaton#getNumberedStates"/>
+        /// <para/>
+        /// Expert: Will be useless unless <see cref="Automaton.GetNumberedStates()"/>
         /// has been called first to number the states. </summary>
-        /// <returns> the number </returns>
+        /// <returns> The number. </returns>
         public virtual int Number
         {
             get
@@ -355,7 +353,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns string describing this state. Normally invoked via
-        /// <seealso cref="Automaton#toString()"/>.
+        /// <see cref="Automaton.ToString()"/>.
         /// </summary>
         public override string ToString()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/StatePair.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/StatePair.cs b/src/Lucene.Net/Util/Automaton/StatePair.cs
index 6b7b182..939c386 100644
--- a/src/Lucene.Net/Util/Automaton/StatePair.cs
+++ b/src/Lucene.Net/Util/Automaton/StatePair.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
     /// Pair of states.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class StatePair
@@ -50,8 +50,8 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Constructs a new state pair.
         /// </summary>
-        /// <param name="s1"> first state </param>
-        /// <param name="s2"> second state </param>
+        /// <param name="s1"> First state. </param>
+        /// <param name="s2"> Second state. </param>
         public StatePair(State s1, State s2)
         {
             this.S1 = s1;
@@ -61,7 +61,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns first component of this pair.
         /// </summary>
-        /// <returns> first state </returns>
+        /// <returns> First state. </returns>
         public virtual State FirstState
         {
             get
@@ -73,7 +73,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns second component of this pair.
         /// </summary>
-        /// <returns> second state </returns>
+        /// <returns> Second state. </returns>
         public virtual State SecondState
         {
             get
@@ -85,9 +85,9 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Checks for equality.
         /// </summary>
-        /// <param name="obj"> object to compare with </param>
-        /// <returns> true if <tt>obj</tt> represents the same pair of states as this
-        ///         pair </returns>
+        /// <param name="obj"> Object to compare with. </param>
+        /// <returns> <c>true</c> if <paramref name="obj"/> represents the same pair of states as this
+        ///         pair. </returns>
         public override bool Equals(object obj)
         {
             if (obj is StatePair)
@@ -104,7 +104,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Returns hash code.
         /// </summary>
-        /// <returns> hash code </returns>
+        /// <returns> Hash code. </returns>
         public override int GetHashCode()
         {
             return S1.GetHashCode() + S2.GetHashCode();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/Transition.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/Transition.cs b/src/Lucene.Net/Util/Automaton/Transition.cs
index 4b6ffe3..545deb5 100644
--- a/src/Lucene.Net/Util/Automaton/Transition.cs
+++ b/src/Lucene.Net/Util/Automaton/Transition.cs
@@ -35,11 +35,11 @@ using System.Text;
 namespace Lucene.Net.Util.Automaton
 {
     /// <summary>
-    /// <tt>Automaton</tt> transition.
-    /// <p>
+    /// <see cref="Automaton"/> transition.
+    /// <para/>
     /// A transition, which belongs to a source state, consists of a Unicode
     /// codepoint interval and a destination state.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class Transition
@@ -55,8 +55,8 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Constructs a new singleton interval transition.
         /// </summary>
-        /// <param name="c"> transition codepoint </param>
-        /// <param name="to"> destination state </param>
+        /// <param name="c"> Transition codepoint. </param>
+        /// <param name="to"> Destination state. </param>
         public Transition(int c, State to)
         {
             Debug.Assert(c >= 0);
@@ -67,9 +67,9 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Constructs a new transition. Both end points are included in the interval.
         /// </summary>
-        /// <param name="min"> transition interval minimum </param>
-        /// <param name="max"> transition interval maximum </param>
-        /// <param name="to"> destination state </param>
+        /// <param name="min"> Transition interval minimum. </param>
+        /// <param name="max"> Transition interval maximum. </param>
+        /// <param name="to"> Destination state. </param>
         public Transition(int min, int max, State to)
         {
             Debug.Assert(min >= 0);
@@ -118,8 +118,8 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Checks for equality.
         /// </summary>
-        /// <param name="obj"> object to compare with </param>
-        /// <returns> true if <tt>obj</tt> is a transition with same character interval
+        /// <param name="obj"> Object to compare with. </param>
+        /// <returns> <c>true</c> if <paramref name="obj"/> is a transition with same character interval
         ///         and destination state as this transition. </returns>
         public override bool Equals(object obj)
         {
@@ -138,7 +138,7 @@ namespace Lucene.Net.Util.Automaton
         /// Returns hash code. The hash code is based on the character interval (not
         /// the destination state).
         /// </summary>
-        /// <returns> hash code </returns>
+        /// <returns> Hash code. </returns>
         public override int GetHashCode()
         {
             return min * 2 + max * 3;
@@ -147,7 +147,7 @@ namespace Lucene.Net.Util.Automaton
         /// <summary>
         /// Clones this transition.
         /// </summary>
-        /// <returns> clone with same character interval and destination state </returns>
+        /// <returns> Clone with same character interval and destination state. </returns>
         public virtual object Clone()
         {
             return (Transition)base.MemberwiseClone();
@@ -200,7 +200,7 @@ namespace Lucene.Net.Util.Automaton
 
         /// <summary>
         /// Returns a string describing this state. Normally invoked via
-        /// <seealso cref="Automaton#toString()"/>.
+        /// <seealso cref="Automaton.ToString()"/>.
         /// </summary>
         public override string ToString()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/9bd4dc81/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs b/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
index 2f4646a..65d08ee 100644
--- a/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
+++ b/src/Lucene.Net/Util/Automaton/UTF32ToUTF8.cs
@@ -28,6 +28,7 @@ namespace Lucene.Net.Util.Automaton
 
     /// <summary>
     /// Converts UTF-32 automata to the equivalent UTF-8 representation.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class UTF32ToUTF8
@@ -301,11 +302,11 @@ namespace Lucene.Net.Util.Automaton
         private int utf8StateCount;
 
         /// <summary>
-        /// Converts an incoming utf32 automaton to an equivalent
-        ///  utf8 one.  The incoming automaton need not be
-        ///  deterministic.  Note that the returned automaton will
-        ///  not in general be deterministic, so you must
-        ///  determinize it if that's needed.
+        /// Converts an incoming utf32 <see cref="Automaton"/> to an equivalent
+        /// utf8 one.  The incoming automaton need not be
+        /// deterministic.  Note that the returned automaton will
+        /// not in general be deterministic, so you must
+        /// determinize it if that's needed.
         /// </summary>
         public Automaton Convert(Automaton utf32)
         {


[35/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML documentation comments (excluding sub-namespaces)

Posted by ni...@apache.org.
Lucene.Net.Codecs: Fixed XML documentation comments (excluding sub-namespaces)


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/a08ae945
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/a08ae945
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/a08ae945

Branch: refs/heads/master
Commit: a08ae9451435e960821b6a45c9134c40ce40cc3c
Parents: d4e4498
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 05:12:52 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:39 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  20 +-
 src/Lucene.Net/Codecs/BlockTermState.cs         |  14 +-
 src/Lucene.Net/Codecs/BlockTreeTermsReader.cs   |  92 ++--
 src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs   | 172 +++---
 src/Lucene.Net/Codecs/Codec.cs                  |  28 +-
 src/Lucene.Net/Codecs/CodecUtil.cs              | 124 +++--
 src/Lucene.Net/Codecs/DocValuesConsumer.cs      | 525 ++-----------------
 src/Lucene.Net/Codecs/DocValuesFormat.cs        |  10 +-
 src/Lucene.Net/Codecs/DocValuesProducer.cs      |  49 +-
 src/Lucene.Net/Codecs/FieldInfosFormat.cs       |  13 +-
 src/Lucene.Net/Codecs/FieldInfosReader.cs       |   9 +-
 src/Lucene.Net/Codecs/FieldInfosWriter.cs       |   9 +-
 src/Lucene.Net/Codecs/FieldsConsumer.cs         |  35 +-
 src/Lucene.Net/Codecs/FieldsProducer.cs         |  18 +-
 src/Lucene.Net/Codecs/FilterCodec.cs            |  38 +-
 src/Lucene.Net/Codecs/LiveDocsFormat.cs         |  13 +-
 .../Codecs/Lucene41/Lucene41PostingsFormat.cs   |   4 +-
 .../Codecs/MappingMultiDocsAndPositionsEnum.cs  |   8 +-
 src/Lucene.Net/Codecs/MappingMultiDocsEnum.cs   |   8 +-
 .../Codecs/MultiLevelSkipListReader.cs          |  51 +-
 .../Codecs/MultiLevelSkipListWriter.cs          |  38 +-
 src/Lucene.Net/Codecs/NormsFormat.cs            |  14 +-
 src/Lucene.Net/Codecs/PostingsBaseFormat.cs     |  16 +-
 src/Lucene.Net/Codecs/PostingsConsumer.cs       |  48 +-
 src/Lucene.Net/Codecs/PostingsFormat.cs         |  16 +-
 src/Lucene.Net/Codecs/PostingsReaderBase.cs     |  44 +-
 src/Lucene.Net/Codecs/PostingsWriterBase.cs     |  55 +-
 src/Lucene.Net/Codecs/SegmentInfoFormat.cs      |  18 +-
 src/Lucene.Net/Codecs/SegmentInfoReader.cs      |  17 +-
 src/Lucene.Net/Codecs/SegmentInfoWriter.cs      |  10 +-
 src/Lucene.Net/Codecs/StoredFieldsFormat.cs     |  12 +-
 src/Lucene.Net/Codecs/StoredFieldsReader.cs     |  26 +-
 src/Lucene.Net/Codecs/StoredFieldsWriter.cs     |  67 +--
 src/Lucene.Net/Codecs/TermStats.cs              |   8 +-
 src/Lucene.Net/Codecs/TermVectorsFormat.cs      |  12 +-
 src/Lucene.Net/Codecs/TermVectorsReader.cs      |  25 +-
 src/Lucene.Net/Codecs/TermVectorsWriter.cs      | 109 ++--
 src/Lucene.Net/Codecs/TermsConsumer.cs          |  48 +-
 38 files changed, 745 insertions(+), 1078 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ccfa22a..dafe5a8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -51,9 +51,25 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 ### Documentation Comments == up for grabs:
 
 1. Lucene.Net.Core (project)
-   1. Codecs (namespace)
-   2. Util.Packed (namespace)
+   1. Codecs.Compressing (namespace)
+   2. Codecs.Lucene3x (namespace)
+   3. Codecs.Lucene40 (namespace)
+   4. Codecs.Lucene41 (namespace)
+   5. Codecs.Lucene42 (namespace)
+   6. Codecs.Lucene45 (namespace)
+   7. Codecs.Lucene46 (namespace)
+   8. Codecs.PerField (namespace)
+   9. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
+   1. Appending (namespace)
+   2. BlockTerms (namespace)
+   3. Bloom (namespace)
+   4. DiskDV (namespace)
+   5. IntBlock (namespace)
+   6. Memory (namespace)
+   7. Pulsing (namespace)
+   8. Sep (namespace)
+   9. SimpleText (namespace)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTermState.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTermState.cs b/src/Lucene.Net/Codecs/BlockTermState.cs
index acd8723..e799921 100644
--- a/src/Lucene.Net/Codecs/BlockTermState.cs
+++ b/src/Lucene.Net/Codecs/BlockTermState.cs
@@ -24,32 +24,32 @@ namespace Lucene.Net.Codecs
     using TermState = Lucene.Net.Index.TermState;
 
     /// <summary>
-    /// Holds all state required for <seealso cref="PostingsReaderBase"/>
-    /// to produce a <seealso cref="DocsEnum"/> without re-seeking the
+    /// Holds all state required for <see cref="PostingsReaderBase"/>
+    /// to produce a <see cref="DocsEnum"/> without re-seeking the
     /// terms dict.
     /// </summary>
     public class BlockTermState : OrdTermState
     {
         /// <summary>
-        /// how many docs have this term </summary>
+        /// How many docs have this term? </summary>
         public int DocFreq { get; set; }
 
         /// <summary>
-        /// total number of occurrences of this term </summary>
+        /// Total number of occurrences of this term. </summary>
         public long TotalTermFreq { get; set; }
 
         /// <summary>
-        /// the term's ord in the current block </summary>
+        /// The term's ord in the current block. </summary>
         public int TermBlockOrd { get; set; }
 
         /// <summary>
-        /// fp into the terms dict primary file (_X.tim) that holds this term </summary>
+        /// File pointer into the terms dict primary file (_X.tim) that holds this term. </summary>
         // TODO: update BTR to nuke this
         public long BlockFilePointer { get; set; }
 
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal BlockTermState()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
index fedc3b0..b4a73bf 100644
--- a/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
+++ b/src/Lucene.Net/Codecs/BlockTreeTermsReader.cs
@@ -54,32 +54,32 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// A block-based terms index and dictionary that assigns
-    ///  terms to variable length blocks according to how they
-    ///  share prefixes.  The terms index is a prefix trie
-    ///  whose leaves are term blocks.  The advantage of this
-    ///  approach is that seekExact is often able to
-    ///  determine a term cannot exist without doing any IO, and
-    ///  intersection with Automata is very fast.  Note that this
-    ///  terms dictionary has it's own fixed terms index (ie, it
-    ///  does not support a pluggable terms index
-    ///  implementation).
+    /// terms to variable length blocks according to how they
+    /// share prefixes.  The terms index is a prefix trie
+    /// whose leaves are term blocks.  The advantage of this
+    /// approach is that SeekExact() is often able to
+    /// determine a term cannot exist without doing any IO, and
+    /// intersection with Automata is very fast.  Note that this
+    /// terms dictionary has it's own fixed terms index (ie, it
+    /// does not support a pluggable terms index
+    /// implementation).
     ///
-    ///  <p><b>NOTE</b>: this terms dictionary does not support
-    ///  index divisor when opening an IndexReader.  Instead, you
-    ///  can change the min/maxItemsPerBlock during indexing.</p>
+    /// <para><b>NOTE</b>: this terms dictionary does not support
+    /// index divisor when opening an IndexReader.  Instead, you
+    /// can change the min/maxItemsPerBlock during indexing.</para>
     ///
-    ///  <p>The data structure used by this implementation is very
-    ///  similar to a burst trie
-    ///  (http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.3499),
-    ///  but with added logic to break up too-large blocks of all
-    ///  terms sharing a given prefix into smaller ones.</p>
+    /// <para>The data structure used by this implementation is very
+    /// similar to a burst trie
+    /// (http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.3499),
+    /// but with added logic to break up too-large blocks of all
+    /// terms sharing a given prefix into smaller ones.</para>
     ///
-    ///  <p>Use <seealso cref="Lucene.Net.Index.CheckIndex"/> with the <code>-verbose</code>
-    ///  option to see summary statistics on the blocks in the
-    ///  dictionary.
-    ///
-    ///  See <seealso cref="BlockTreeTermsWriter"/>.
+    /// <para>Use <see cref="Lucene.Net.Index.CheckIndex"/> with the <c>-verbose</c>
+    /// option to see summary statistics on the blocks in the
+    /// dictionary.</para>
     ///
+    /// See <see cref="BlockTreeTermsWriter"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class BlockTreeTermsReader : FieldsProducer
@@ -242,7 +242,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Seek {@code input} to the directory offset. </summary>
+        /// Seek <paramref name="input"/> to the directory offset. </summary>
         protected internal virtual void SeekDir(IndexInput input, long dirOffset)
         {
             if (version >= BlockTreeTermsWriter.VERSION_CHECKSUM)
@@ -263,6 +263,9 @@ namespace Lucene.Net.Codecs
         //   return "0x" + Integer.toHexString(v);
         // }
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         protected override void Dispose(bool disposing)
         {
             if (disposing)
@@ -323,7 +326,7 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// BlockTree statistics for a single field
-        /// returned by <seealso cref="FieldReader#computeStats()"/>.
+        /// returned by <see cref="FieldReader.ComputeStats()"/>.
         /// </summary>
         public class Stats
         {
@@ -353,7 +356,7 @@ namespace Lucene.Net.Codecs
 
             /// <summary>
             /// The number of floor blocks (meta-blocks larger than the
-            ///  allowed {@code maxItemsPerBlock}) in the terms file.
+            ///  allowed <c>maxItemsPerBlock</c>) in the terms file.
             /// </summary>
             public int FloorBlockCount { get; set; }
 
@@ -403,14 +406,14 @@ namespace Lucene.Net.Codecs
 
             /// <summary>
             /// Total number of bytes used to store term stats (not
-            ///  including what the <seealso cref="PostingsBaseFormat"/>
-            ///  stores.
+            /// including what the <see cref="PostingsBaseFormat"/>
+            /// stores.
             /// </summary>
             public long TotalBlockStatsBytes { get; set; }
 
             /// <summary>
-            /// Total bytes stored by the <seealso cref="PostingsBaseFormat"/>,
-            ///  plus the other few vInts stored in the frame.
+            /// Total bytes stored by the <see cref="PostingsBaseFormat"/>,
+            /// plus the other few vInts stored in the frame.
             /// </summary>
             public long TotalBlockOtherBytes { get; set; }
 
@@ -538,7 +541,7 @@ namespace Lucene.Net.Codecs
         internal BytesRef NO_OUTPUT;
 
         /// <summary>
-        /// BlockTree's implementation of <seealso cref="GetTerms"/>. </summary>
+        /// BlockTree's implementation of <see cref="GetTerms(string)"/>. </summary>
         public sealed class FieldReader : Terms
         {
             private readonly BlockTreeTermsReader outerInstance;
@@ -2808,16 +2811,17 @@ namespace Lucene.Net.Codecs
                         LoadBlock();
                     }
 
-                    /* Does initial decode of next block of terms; this
-                       doesn't actually decode the docFreq, totalTermFreq,
-                       postings details (frq/prx offset, etc.) metadata;
-                       it just loads them as byte[] blobs which are then
-                       decoded on-demand if the metadata is ever requested
-                       for any term in this block.  this enables terms-only
-                       intensive consumes (eg certain MTQs, respelling) to
-                       not pay the price of decoding metadata they won't
-                       use. */
-
+                    /// <summary>
+                    /// Does initial decode of next block of terms; this
+                    /// doesn't actually decode the docFreq, totalTermFreq,
+                    /// postings details (frq/prx offset, etc.) metadata;
+                    /// it just loads them as byte[] blobs which are then
+                    /// decoded on-demand if the metadata is ever requested
+                    /// for any term in this block.  this enables terms-only
+                    /// intensive consumes (eg certain MTQs, respelling) to
+                    /// not pay the price of decoding metadata they won't
+                    /// use.
+                    /// </summary>
                     internal void LoadBlock()
                     {
                         // Clone the IndexInput lazily, so that consumers
@@ -3144,9 +3148,11 @@ namespace Lucene.Net.Codecs
                         return true;
                     }
 
-                    // Scans to sub-block that has this target fp; only
-                    // called by next(); NOTE: does not set
-                    // startBytePos/suffix as a side effect
+                    /// <summary>
+                    /// Scans to sub-block that has this target fp; only
+                    /// called by Next(); NOTE: does not set
+                    /// startBytePos/suffix as a side effect
+                    /// </summary>
                     public void ScanToSubBlock(long subFP)
                     {
                         Debug.Assert(!isLeafBlock);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs b/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
index 4b5df96..a0aafe4 100644
--- a/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
+++ b/src/Lucene.Net/Codecs/BlockTreeTermsWriter.cs
@@ -71,120 +71,121 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Block-based terms index and dictionary writer.
-    /// <p>
+    /// <para/>
     /// Writes terms dict and index, block-encoding (column
     /// stride) each term's metadata for each set of terms
     /// between two index terms.
-    /// <p>
+    /// <para/>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
-    ///   <li><tt>.tip</tt>: <a href="#Termindex">Term Index</a></li>
-    /// </ul>
-    /// <p>
+    /// <list type="bullet">
+    ///     <item><term>.tim:</term> <description><a href="#Termdictionary">Term Dictionary</a></description></item>
+    ///     <item><term>.tip:</term> <description><a href="#Termindex">Term Index</a></description></item>
+    /// </list>
+    /// <para/>
     /// <a name="Termdictionary" id="Termdictionary"></a>
     /// <h3>Term Dictionary</h3>
     ///
-    /// <p>The .tim file contains the list of terms in each
+    /// <para>The .tim file contains the list of terms in each
     /// field along with per-term statistics (such as docfreq)
     /// and per-term metadata (typically pointers to the postings list
     /// for that term in the inverted index).
-    /// </p>
+    /// </para>
     ///
-    /// <p>The .tim is arranged in blocks: with blocks containing
+    /// <para>The .tim is arranged in blocks: with blocks containing
     /// a variable number of entries (by default 25-48), where
     /// each entry is either a term or a reference to a
-    /// sub-block.</p>
+    /// sub-block.</para>
     ///
-    /// <p>NOTE: The term dictionary can plug into different postings implementations:
+    /// <para>NOTE: The term dictionary can plug into different postings implementations:
     /// the postings writer/reader are actually responsible for encoding
-    /// and decoding the Postings Metadata and Term Metadata sections.</p>
+    /// and decoding the Postings Metadata and Term Metadata sections.</para>
     ///
-    /// <ul>
-    ///    <li>TermsDict (.tim) --&gt; Header, <i>PostingsHeader</i>, NodeBlock<sup>NumBlocks</sup>,
-    ///                               FieldSummary, DirOffset, Footer</li>
-    ///    <li>NodeBlock --&gt; (OuterNode | InnerNode)</li>
-    ///    <li>OuterNode --&gt; EntryCount, SuffixLength, Byte<sup>SuffixLength</sup>, StatsLength, &lt; TermStats &gt;<sup>EntryCount</sup>, MetaLength, &lt;<i>TermMetadata</i>&gt;<sup>EntryCount</sup></li>
-    ///    <li>InnerNode --&gt; EntryCount, SuffixLength[,Sub?], Byte<sup>SuffixLength</sup>, StatsLength, &lt; TermStats ? &gt;<sup>EntryCount</sup>, MetaLength, &lt;<i>TermMetadata ? </i>&gt;<sup>EntryCount</sup></li>
-    ///    <li>TermStats --&gt; DocFreq, TotalTermFreq </li>
-    ///    <li>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, RootCodeLength, Byte<sup>RootCodeLength</sup>,
-    ///                            SumTotalTermFreq?, SumDocFreq, DocCount&gt;<sup>NumFields</sup></li>
-    ///    <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///    <li>DirOffset --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    ///    <li>EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength,NumFields,
-    ///        FieldNumber,RootCodeLength,DocCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///    <li>TotalTermFreq,NumTerms,SumTotalTermFreq,SumDocFreq --&gt;
-    ///        <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///    <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///    <li>Header is a <seealso cref="CodecUtil#writeHeader CodecHeader"/> storing the version information
-    ///        for the BlockTree implementation.</li>
-    ///    <li>DirOffset is a pointer to the FieldSummary section.</li>
-    ///    <li>DocFreq is the count of documents which contain the term.</li>
-    ///    <li>TotalTermFreq is the total number of occurrences of the term. this is encoded
-    ///        as the difference between the total number of occurrences and the DocFreq.</li>
-    ///    <li>FieldNumber is the fields number from <seealso cref="fieldInfos"/>. (.fnm)</li>
-    ///    <li>NumTerms is the number of unique terms for the field.</li>
-    ///    <li>RootCode points to the root block for the field.</li>
-    ///    <li>SumDocFreq is the total number of postings, the number of term-document pairs across
-    ///        the entire field.</li>
-    ///    <li>DocCount is the number of documents that have at least one posting for this field.</li>
-    ///    <li>PostingsHeader and TermMetadata are plugged into by the specific postings implementation:
+    /// <list type="bullet">
+    ///    <item><description>TermsDict (.tim) --&gt; Header, <i>PostingsHeader</i>, NodeBlock<sup>NumBlocks</sup>,
+    ///                               FieldSummary, DirOffset, Footer</description></item>
+    ///    <item><description>NodeBlock --&gt; (OuterNode | InnerNode)</description></item>
+    ///    <item><description>OuterNode --&gt; EntryCount, SuffixLength, Byte<sup>SuffixLength</sup>, StatsLength, &lt; TermStats &gt;<sup>EntryCount</sup>, MetaLength, &lt;<i>TermMetadata</i>&gt;<sup>EntryCount</sup></description></item>
+    ///    <item><description>InnerNode --&gt; EntryCount, SuffixLength[,Sub?], Byte<sup>SuffixLength</sup>, StatsLength, &lt; TermStats ? &gt;<sup>EntryCount</sup>, MetaLength, &lt;<i>TermMetadata ? </i>&gt;<sup>EntryCount</sup></description></item>
+    ///    <item><description>TermStats --&gt; DocFreq, TotalTermFreq </description></item>
+    ///    <item><description>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, RootCodeLength, Byte<sup>RootCodeLength</sup>,
+    ///                            SumTotalTermFreq?, SumDocFreq, DocCount&gt;<sup>NumFields</sup></description></item>
+    ///    <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/></description></item>
+    ///    <item><description>DirOffset --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>)</description></item>
+    ///    <item><description>EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength,NumFields,
+    ///        FieldNumber,RootCodeLength,DocCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>_</description></item>
+    ///    <item><description>TotalTermFreq,NumTerms,SumTotalTermFreq,SumDocFreq --&gt;
+    ///        VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>)</description></item>
+    ///    <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>)</description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///    <item><description>Header is a CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) storing the version information
+    ///        for the BlockTree implementation.</description></item>
+    ///    <item><description>DirOffset is a pointer to the FieldSummary section.</description></item>
+    ///    <item><description>DocFreq is the count of documents which contain the term.</description></item>
+    ///    <item><description>TotalTermFreq is the total number of occurrences of the term. this is encoded
+    ///        as the difference between the total number of occurrences and the DocFreq.</description></item>
+    ///    <item><description>FieldNumber is the fields number from <see cref="fieldInfos"/>. (.fnm)</description></item>
+    ///    <item><description>NumTerms is the number of unique terms for the field.</description></item>
+    ///    <item><description>RootCode points to the root block for the field.</description></item>
+    ///    <item><description>SumDocFreq is the total number of postings, the number of term-document pairs across
+    ///        the entire field.</description></item>
+    ///    <item><description>DocCount is the number of documents that have at least one posting for this field.</description></item>
+    ///    <item><description>PostingsHeader and TermMetadata are plugged into by the specific postings implementation:
     ///        these contain arbitrary per-file data (such as parameters or versioning information)
-    ///        and per-term data (such as pointers to inverted files).</li>
-    ///    <li>For inner nodes of the tree, every entry will steal one bit to mark whether it points
-    ///        to child nodes(sub-block). If so, the corresponding TermStats and TermMetaData are omitted </li>
-    /// </ul>
+    ///        and per-term data (such as pointers to inverted files).</description></item>
+    ///    <item><description>For inner nodes of the tree, every entry will steal one bit to mark whether it points
+    ///        to child nodes(sub-block). If so, the corresponding <see cref="TermStats"/> and TermMetadata are omitted </description></item>
+    /// </list>
     /// <a name="Termindex" id="Termindex"></a>
     /// <h3>Term Index</h3>
-    /// <p>The .tip file contains an index into the term dictionary, so that it can be
+    /// <para>The .tip file contains an index into the term dictionary, so that it can be
     /// accessed randomly.  The index is also used to determine
-    /// when a given term cannot exist on disk (in the .tim file), saving a disk seek.</p>
-    /// <ul>
-    ///   <li>TermsIndex (.tip) --&gt; Header, FSTIndex<sup>NumFields</sup>
-    ///                                &lt;IndexStartFP&gt;<sup>NumFields</sup>, DirOffset, Footer</li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>DirOffset --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    ///   <li>IndexStartFP --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
+    /// when a given term cannot exist on disk (in the .tim file), saving a disk seek.</para>
+    /// <list type="bullet">
+    ///   <item><description>TermsIndex (.tip) --&gt; Header, FSTIndex<sup>NumFields</sup>
+    ///                                &lt;IndexStartFP&gt;<sup>NumFields</sup>, DirOffset, Footer</description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>)</description></item>
+    ///   <item><description>DirOffset --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/></description>)</item>
+    ///   <item><description>IndexStartFP --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/></description>)</item>
     ///   <!-- TODO: better describe FST output here -->
-    ///   <li>FSTIndex --&gt; <seealso cref="FST FST&lt;byte[]&gt;"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///   <li>The .tip file contains a separate FST for each
+    ///   <item><description>FSTIndex --&gt; <see cref="T:FST{byte[]}"/></description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/></description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///   <item><description>The .tip file contains a separate FST for each
     ///       field.  The FST maps a term prefix to the on-disk
     ///       block that holds all terms starting with that
     ///       prefix.  Each field's IndexStartFP points to its
-    ///       FST.</li>
-    ///   <li>DirOffset is a pointer to the start of the IndexStartFPs
-    ///       for all fields</li>
-    ///   <li>It's possible that an on-disk block would contain
+    ///       FST.</description></item>
+    ///   <item><description>DirOffset is a pointer to the start of the IndexStartFPs
+    ///       for all fields</description></item>
+    ///   <item><description>It's possible that an on-disk block would contain
     ///       too many terms (more than the allowed maximum
     ///       (default: 48)).  When this happens, the block is
     ///       sub-divided into new blocks (called "floor
     ///       blocks"), and then the output in the FST for the
     ///       block's prefix encodes the leading byte of each
-    ///       sub-block, and its file pointer.
-    /// </ul>
+    ///       sub-block, and its file pointer.</description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= BlockTreeTermsReader
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="BlockTreeTermsReader"/>
     public class BlockTreeTermsWriter : FieldsConsumer
     {
         /// <summary>
-        /// Suggested default value for the {@code
-        ///  minItemsInBlock} parameter to {@link
-        ///  #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}.
+        /// Suggested default value for the 
+        /// <c>minItemsInBlock</c> parameter to 
+        /// <see cref="BlockTreeTermsWriter(SegmentWriteState, PostingsWriterBase, int, int)"/>.
         /// </summary>
         public const int DEFAULT_MIN_BLOCK_SIZE = 25;
 
         /// <summary>
-        /// Suggested default value for the {@code
-        ///  maxItemsInBlock} parameter to {@link
-        ///  #BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)}.
+        /// Suggested default value for the 
+        /// <c>maxItemsInBlock</c> parameter to 
+        /// <see cref="BlockTreeTermsWriter(SegmentWriteState, PostingsWriterBase, int, int)"/>.
         /// </summary>
         public const int DEFAULT_MAX_BLOCK_SIZE = 48;
 
@@ -197,7 +198,7 @@ namespace Lucene.Net.Codecs
         internal const int OUTPUT_FLAG_HAS_TERMS = 0x2;
 
         /// <summary>
-        /// Extension of terms file </summary>
+        /// Extension of terms file. </summary>
         internal const string TERMS_EXTENSION = "tim";
 
         internal const string TERMS_CODEC_NAME = "BLOCK_TREE_TERMS_DICT";
@@ -211,11 +212,11 @@ namespace Lucene.Net.Codecs
         public const int VERSION_APPEND_ONLY = 1;
 
         /// <summary>
-        /// Meta data as array </summary>
+        /// Meta data as array. </summary>
         public const int VERSION_META_ARRAY = 2;
 
         /// <summary>
-        /// checksums </summary>
+        /// Checksums. </summary>
         public const int VERSION_CHECKSUM = 3;
 
         /// <summary>
@@ -223,7 +224,7 @@ namespace Lucene.Net.Codecs
         public const int VERSION_CURRENT = VERSION_CHECKSUM;
 
         /// <summary>
-        /// Extension of terms index file </summary>
+        /// Extension of terms index file. </summary>
         internal const string TERMS_INDEX_EXTENSION = "tip";
 
         internal const string TERMS_INDEX_CODEC_NAME = "BLOCK_TREE_TERMS_INDEX";
@@ -272,9 +273,9 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Create a new writer.  The number of items (terms or
-        ///  sub-blocks) per block will aim to be between
-        ///  minItemsPerBlock and maxItemsPerBlock, though in some
-        ///  cases the blocks may be smaller than the min.
+        /// sub-blocks) per block will aim to be between
+        /// <paramref name="minItemsInBlock"/> and <paramref name="maxItemsInBlock"/>, though in some
+        /// cases the blocks may be smaller than the min.
         /// </summary>
         public BlockTreeTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter, int minItemsInBlock, int maxItemsInBlock)
         {
@@ -1197,6 +1198,9 @@ namespace Lucene.Net.Codecs
             internal readonly RAMOutputStream bytesWriter = new RAMOutputStream();
         }
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         protected override void Dispose(bool disposing)
         {
             if (disposing)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Codec.cs b/src/Lucene.Net/Codecs/Codec.cs
index a9f2448..680e490 100644
--- a/src/Lucene.Net/Codecs/Codec.cs
+++ b/src/Lucene.Net/Codecs/Codec.cs
@@ -98,7 +98,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Returns this codec's name </summary>
+        /// Returns this codec's name. </summary>
         public string Name
         {
             get
@@ -108,46 +108,46 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Encodes/decodes postings </summary>
+        /// Encodes/decodes postings. </summary>
         public abstract PostingsFormat PostingsFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes docvalues </summary>
+        /// Encodes/decodes docvalues. </summary>
         public abstract DocValuesFormat DocValuesFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes stored fields </summary>
+        /// Encodes/decodes stored fields. </summary>
         public abstract StoredFieldsFormat StoredFieldsFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes term vectors </summary>
+        /// Encodes/decodes term vectors. </summary>
         public abstract TermVectorsFormat TermVectorsFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes field infos file </summary>
+        /// Encodes/decodes field infos file. </summary>
         public abstract FieldInfosFormat FieldInfosFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes segment info file </summary>
+        /// Encodes/decodes segment info file. </summary>
         public abstract SegmentInfoFormat SegmentInfoFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes document normalization values </summary>
+        /// Encodes/decodes document normalization values. </summary>
         public abstract NormsFormat NormsFormat { get; }
 
         /// <summary>
-        /// Encodes/decodes live docs </summary>
+        /// Encodes/decodes live docs. </summary>
         public abstract LiveDocsFormat LiveDocsFormat { get; }
 
         /// <summary>
-        /// looks up a codec by name </summary>
+        /// Looks up a codec by name. </summary>
         public static Codec ForName(string name)
         {
             return codecFactory.GetCodec(name);
         }
 
         /// <summary>
-        /// returns a list of all available codec names </summary>
+        /// Returns a list of all available codec names. </summary>
         public static ICollection<string> AvailableCodecs()
         {
             if (codecFactory is IServiceListable)
@@ -166,8 +166,8 @@ namespace Lucene.Net.Codecs
         private static Codec defaultCodec;
 
         /// <summary>
-        /// expert: returns the default codec used for newly created
-        ///  <seealso cref="IndexWriterConfig"/>s.
+        /// Expert: returns the default codec used for newly created
+        /// <seealso cref="Index.IndexWriterConfig"/>s.
         /// </summary>
         // TODO: should we use this, or maybe a system property is better?
         public static Codec Default
@@ -189,7 +189,7 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// returns the codec's name. Subclasses can override to provide
+        /// Returns the codec's name. Subclasses can override to provide
         /// more detail (such as parameters).
         /// </summary>
         public override string ToString()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/CodecUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/CodecUtil.cs b/src/Lucene.Net/Codecs/CodecUtil.cs
index 221fd72..a6dd3af 100644
--- a/src/Lucene.Net/Codecs/CodecUtil.cs
+++ b/src/Lucene.Net/Codecs/CodecUtil.cs
@@ -24,13 +24,12 @@ namespace Lucene.Net.Codecs
 
     /// <summary>
     /// Utility class for reading and writing versioned headers.
-    /// <p>
+    /// <para/>
     /// Writing codec headers is useful to ensure that a file is in
     /// the format you think it is.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-
     public sealed class CodecUtil
     {
         private CodecUtil() // no instance
@@ -49,29 +48,29 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Writes a codec header, which records both a string to
-        /// identify the file and a version number. this header can
+        /// identify the file and a version number. This header can
         /// be parsed and validated with
-        /// <seealso cref="#checkHeader(DataInput, String, int, int) checkHeader()"/>.
-        /// <p>
+        /// <see cref="CheckHeader(DataInput, string, int, int)"/>.
+        /// <para/>
         /// CodecHeader --&gt; Magic,CodecName,Version
-        /// <ul>
-        ///    <li>Magic --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. this
-        ///        identifies the start of the header. It is always {@value #CODEC_MAGIC}.
-        ///    <li>CodecName --&gt; <seealso cref="DataOutput#writeString String"/>. this
-        ///        is a string to identify this file.
-        ///    <li>Version --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. Records
-        ///        the version of the file.
-        /// </ul>
-        /// <p>
+        /// <list type="bullet">
+        ///    <item><description>Magic --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
+        ///        identifies the start of the header. It is always <see cref="CODEC_MAGIC"/>.</description></item>
+        ///    <item><description>CodecName --&gt; String (<see cref="DataOutput.WriteString(string)"/>). this
+        ///        is a string to identify this file.</description></item>
+        ///    <item><description>Version --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). Records
+        ///        the version of the file.</description></item>
+        /// </list>
+        /// <para/>
         /// Note that the length of a codec header depends only upon the
         /// name of the codec, so this length can be computed at any time
-        /// with <seealso cref="#headerLength(String)"/>.
+        /// with <see cref="HeaderLength(string)"/>.
         /// </summary>
         /// <param name="out"> Output stream </param>
         /// <param name="codec"> String to identify this file. It should be simple ASCII,
         ///              less than 128 characters in length. </param>
         /// <param name="version"> Version number </param>
-        /// <exception cref="IOException"> If there is an I/O error writing to the underlying medium. </exception>
+        /// <exception cref="System.IO.IOException"> If there is an I/O error writing to the underlying medium. </exception>
         public static void WriteHeader(DataOutput @out, string codec, int version)
         {
             BytesRef bytes = new BytesRef(codec);
@@ -88,8 +87,8 @@ namespace Lucene.Net.Codecs
         /// Computes the length of a codec header.
         /// </summary>
         /// <param name="codec"> Codec name. </param>
-        /// <returns> length of the entire codec header. </returns>
-        /// <seealso cref= #writeHeader(DataOutput, String, int) </seealso>
+        /// <returns> Length of the entire codec header. </returns>
+        /// <seealso cref="WriteHeader(DataOutput, string, int)"/>
         public static int HeaderLength(string codec)
         {
             return 9 + codec.Length;
@@ -97,10 +96,10 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Reads and validates a header previously written with
-        /// <seealso cref="#writeHeader(DataOutput, String, int)"/>.
-        /// <p>
-        /// When reading a file, supply the expected <code>codec</code> and
-        /// an expected version range (<code>minVersion to maxVersion</code>).
+        /// <see cref="WriteHeader(DataOutput, string, int)"/>.
+        /// <para/>
+        /// When reading a file, supply the expected <paramref name="codec"/> and
+        /// an expected version range (<paramref name="minVersion"/> to <paramref name="maxVersion"/>).
         /// </summary>
         /// <param name="in"> Input stream, positioned at the point where the
         ///        header was previously written. Typically this is located
@@ -109,18 +108,18 @@ namespace Lucene.Net.Codecs
         /// <param name="minVersion"> The minimum supported expected version number. </param>
         /// <param name="maxVersion"> The maximum supported expected version number. </param>
         /// <returns> The actual version found, when a valid header is found
-        ///         that matches <code>codec</code>, with an actual version
-        ///         where <code>minVersion <= actual <= maxVersion</code>.
+        ///         that matches <paramref name="codec"/>, with an actual version
+        ///         where <c>minVersion &lt;= actual &lt;= maxVersion</c>.
         ///         Otherwise an exception is thrown. </returns>
-        /// <exception cref="CorruptIndexException"> If the first four bytes are not
-        ///         <seealso cref="#CODEC_MAGIC"/>, or if the actual codec found is
-        ///         not <code>codec</code>. </exception>
-        /// <exception cref="IndexFormatTooOldException"> If the actual version is less
-        ///         than <code>minVersion</code>. </exception>
-        /// <exception cref="IndexFormatTooNewException"> If the actual version is greater
-        ///         than <code>maxVersion</code>. </exception>
-        /// <exception cref="IOException"> If there is an I/O error reading from the underlying medium. </exception>
-        /// <seealso cref= #writeHeader(DataOutput, String, int) </seealso>
+        /// <exception cref="Index.CorruptIndexException"> If the first four bytes are not
+        ///         <see cref="CODEC_MAGIC"/>, or if the actual codec found is
+        ///         not <paramref name="codec"/>. </exception>
+        /// <exception cref="Index.IndexFormatTooOldException"> If the actual version is less
+        ///         than <paramref name="minVersion"/>. </exception>
+        /// <exception cref="Index.IndexFormatTooNewException"> If the actual version is greater
+        ///         than <paramref name="maxVersion"/>. </exception>
+        /// <exception cref="System.IO.IOException"> If there is an I/O error reading from the underlying medium. </exception>
+        /// <seealso cref="WriteHeader(DataOutput, string, int)"/>
         public static int CheckHeader(DataInput @in, string codec, int minVersion, int maxVersion)
         {
             // Safety to guard against reading a bogus string:
@@ -133,10 +132,10 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Like {@link
-        ///  #checkHeader(DataInput,String,int,int)} except this
-        ///  version assumes the first int has already been read
-        ///  and validated from the input.
+        /// Like 
+        /// <see cref="CheckHeader(DataInput,string,int,int)"/> except this
+        /// version assumes the first <see cref="int"/> has already been read
+        /// and validated from the input.
         /// </summary>
         public static int CheckHeaderNoMagic(DataInput @in, string codec, int minVersion, int maxVersion)
         {
@@ -161,24 +160,24 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Writes a codec footer, which records both a checksum
-        /// algorithm ID and a checksum. this footer can
+        /// algorithm ID and a checksum. This footer can
         /// be parsed and validated with
-        /// <seealso cref="#checkFooter(ChecksumIndexInput) checkFooter()"/>.
-        /// <p>
+        /// <see cref="CheckFooter(ChecksumIndexInput)"/>.
+        /// <para/>
         /// CodecFooter --&gt; Magic,AlgorithmID,Checksum
-        /// <ul>
-        ///    <li>Magic --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. this
-        ///        identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.
-        ///    <li>AlgorithmID --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. this
+        /// <list type="bullet">
+        ///    <item><description>Magic --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
+        ///        identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.</description></item>
+        ///    <item><description>AlgorithmID --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>). this
         ///        indicates the checksum algorithm used. Currently this is always 0,
-        ///        for zlib-crc32.
-        ///    <li>Checksum --&gt; <seealso cref="DataOutput#writeLong Uint32"/>. The
+        ///        for zlib-crc32.</description></item>
+        ///    <item><description>Checksum --&gt; Uint32 (<see cref="DataOutput.WriteInt64(long)"/>). The
         ///        actual checksum value for all previous bytes in the stream, including
-        ///        the bytes from Magic and AlgorithmID.
-        /// </ul>
+        ///        the bytes from Magic and AlgorithmID.</description></item>
+        /// </list>
         /// </summary>
         /// <param name="out"> Output stream </param>
-        /// <exception cref="IOException"> If there is an I/O error writing to the underlying medium. </exception>
+        /// <exception cref="System.IO.IOException"> If there is an I/O error writing to the underlying medium. </exception>
         public static void WriteFooter(IndexOutput @out)
         {
             @out.WriteInt32(FOOTER_MAGIC);
@@ -189,18 +188,18 @@ namespace Lucene.Net.Codecs
         /// <summary>
         /// Computes the length of a codec footer.
         /// </summary>
-        /// <returns> length of the entire codec footer. </returns>
-        /// <seealso cref= #writeFooter(IndexOutput) </seealso>
+        /// <returns> Length of the entire codec footer. </returns>
+        /// <seealso cref="WriteFooter(IndexOutput)"/>
         public static int FooterLength()
         {
             return 16;
         }
 
         /// <summary>
-        /// Validates the codec footer previously written by <seealso cref="#writeFooter"/>. </summary>
-        /// <returns> actual checksum value </returns>
-        /// <exception cref="IOException"> if the footer is invalid, if the checksum does not match,
-        ///                     or if {@code in} is not properly positioned before the footer
+        /// Validates the codec footer previously written by <see cref="WriteFooter(IndexOutput)"/>. </summary>
+        /// <returns> Actual checksum value. </returns>
+        /// <exception cref="System.IO.IOException"> If the footer is invalid, if the checksum does not match,
+        ///                     or if <paramref name="in"/> is not properly positioned before the footer
         ///                     at the end of the stream. </exception>
         public static long CheckFooter(ChecksumIndexInput @in)
         {
@@ -219,9 +218,9 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Returns (but does not validate) the checksum previously written by <seealso cref="#checkFooter"/>. </summary>
+        /// Returns (but does not validate) the checksum previously written by <see cref="CheckFooter(ChecksumIndexInput)"/>. </summary>
         /// <returns> actual checksum value </returns>
-        /// <exception cref="IOException"> if the footer is invalid </exception>
+        /// <exception cref="System.IO.IOException"> If the footer is invalid. </exception>
         public static long RetrieveChecksum(IndexInput @in)
         {
             @in.Seek(@in.Length - FooterLength());
@@ -247,8 +246,7 @@ namespace Lucene.Net.Codecs
         /// <summary>
         /// Checks that the stream is positioned at the end, and throws exception
         /// if it is not. </summary>
-        /// @deprecated Use <seealso cref="#checkFooter"/> instead, this should only used for files without checksums
-        [Obsolete("Use CheckFooter() instead")]
+        [Obsolete("Use CheckFooter(ChecksumIndexInput) instead, this should only used for files without checksums.")]
         public static void CheckEOF(IndexInput @in)
         {
             if (@in.GetFilePointer() != @in.Length)
@@ -258,10 +256,10 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Clones the provided input, reads all bytes from the file, and calls <seealso cref="#checkFooter"/>
-        /// <p>
+        /// Clones the provided input, reads all bytes from the file, and calls <see cref="CheckFooter(ChecksumIndexInput)"/>
+        /// <para/>
         /// Note that this method may be slow, as it must process the entire file.
-        /// If you just need to extract the checksum value, call <seealso cref="#retrieveChecksum"/>.
+        /// If you just need to extract the checksum value, call <see cref="RetrieveChecksum(IndexInput)"/>.
         /// </summary>
         public static long ChecksumEntireFile(IndexInput input)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesConsumer.cs b/src/Lucene.Net/Codecs/DocValuesConsumer.cs
index 999d719..f8b814c 100644
--- a/src/Lucene.Net/Codecs/DocValuesConsumer.cs
+++ b/src/Lucene.Net/Codecs/DocValuesConsumer.cs
@@ -42,27 +42,28 @@ namespace Lucene.Net.Codecs
     /// sorted docvalues.  Concrete implementations of this
     /// actually do "something" with the docvalues (write it into
     /// the index in a specific format).
-    /// <p>
+    /// <para/>
     /// The lifecycle is:
-    /// <ol>
-    ///   <li>DocValuesConsumer is created by
-    ///       <seealso cref="DocValuesFormat#fieldsConsumer(SegmentWriteState)"/> or
-    ///       <seealso cref="NormsFormat#normsConsumer(SegmentWriteState)"/>.
-    ///   <li><seealso cref="#addNumericField"/>, <seealso cref="#addBinaryField"/>,
-    ///       or <seealso cref="#addSortedField"/> are called for each Numeric,
+    /// <list type="number">
+    ///   <item><description>DocValuesConsumer is created by
+    ///       <see cref="DocValuesFormat.FieldsConsumer(Index.SegmentWriteState)"/> or
+    ///       <see cref="NormsFormat.NormsConsumer(Index.SegmentWriteState)"/>.</description></item>
+    ///   <item><description><see cref="AddNumericField(FieldInfo, IEnumerable{long?})"/>, 
+    ///       <see cref="AddBinaryField(FieldInfo, IEnumerable{BytesRef})"/>,
+    ///       or <see cref="AddSortedField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?})"/> are called for each Numeric,
     ///       Binary, or Sorted docvalues field. The API is a "pull" rather
     ///       than "push", and the implementation is free to iterate over the
-    ///       values multiple times (<seealso cref="Iterable#iterator()"/>).
-    ///   <li>After all fields are added, the consumer is <seealso cref="#close"/>d.
-    /// </ol>
-    ///
+    ///       values multiple times (<see cref="IEnumerable{T}.GetEnumerator()"/>).</description></item>
+    ///   <item><description>After all fields are added, the consumer is <see cref="Dispose()"/>d.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class DocValuesConsumer : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal DocValuesConsumer()
         {
@@ -70,44 +71,44 @@ namespace Lucene.Net.Codecs
 
         /// <summary>
         /// Writes numeric docvalues for a field. </summary>
-        /// <param name="field"> field information </param>
-        /// <param name="values"> Iterable of numeric values (one for each document). {@code null} indicates
+        /// <param name="field"> Field information. </param>
+        /// <param name="values"> <see cref="IEnumerable{T}"/> of numeric values (one for each document). <c>null</c> indicates
         ///               a missing value. </param>
-        /// <exception cref="IOException"> if an I/O error occurred. </exception>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
         public abstract void AddNumericField(FieldInfo field, IEnumerable<long?> values);
 
         /// <summary>
         /// Writes binary docvalues for a field. </summary>
-        /// <param name="field"> field information </param>
-        /// <param name="values"> Iterable of binary values (one for each document). {@code null} indicates
+        /// <param name="field"> Field information. </param>
+        /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values (one for each document). <c>null</c> indicates
         ///               a missing value. </param>
-        /// <exception cref="IOException"> if an I/O error occurred. </exception>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
         public abstract void AddBinaryField(FieldInfo field, IEnumerable<BytesRef> values);
 
         /// <summary>
         /// Writes pre-sorted binary docvalues for a field. </summary>
-        /// <param name="field"> field information </param>
-        /// <param name="values"> Iterable of binary values in sorted order (deduplicated). </param>
-        /// <param name="docToOrd"> Iterable of ordinals (one for each document). {@code -1} indicates
+        /// <param name="field"> Field information. </param>
+        /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values in sorted order (deduplicated). </param>
+        /// <param name="docToOrd"> <see cref="IEnumerable{T}"/> of ordinals (one for each document). <c>-1</c> indicates
         ///                 a missing value. </param>
-        /// <exception cref="IOException"> if an I/O error occurred. </exception>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
         public abstract void AddSortedField(FieldInfo field, IEnumerable<BytesRef> values, IEnumerable<long?> docToOrd);
 
         /// <summary>
         /// Writes pre-sorted set docvalues for a field </summary>
-        /// <param name="field"> field information </param>
-        /// <param name="values"> Iterable of binary values in sorted order (deduplicated). </param>
-        /// <param name="docToOrdCount"> Iterable of the number of values for each document. A zero ordinal
+        /// <param name="field"> Field information. </param>
+        /// <param name="values"> <see cref="IEnumerable{T}"/> of binary values in sorted order (deduplicated). </param>
+        /// <param name="docToOrdCount"> <see cref="IEnumerable{T}"/> of the number of values for each document. A zero ordinal
         ///                      count indicates a missing value. </param>
-        /// <param name="ords"> Iterable of ordinal occurrences (docToOrdCount*maxDoc total). </param>
-        /// <exception cref="IOException"> if an I/O error occurred. </exception>
+        /// <param name="ords"> <see cref="IEnumerable{T}"/> of ordinal occurrences (<paramref name="docToOrdCount"/>*maxDoc total). </param>
+        /// <exception cref="System.IO.IOException"> If an I/O error occurred. </exception>
         public abstract void AddSortedSetField(FieldInfo field, IEnumerable<BytesRef> values, IEnumerable<long?> docToOrdCount, IEnumerable<long?> ords);
 
         /// <summary>
-        /// Merges the numeric docvalues from <code>toMerge</code>.
-        /// <p>
-        /// The default implementation calls <seealso cref="#addNumericField"/>, passing
-        /// an Iterable that merges and filters deleted documents on the fly.</p>
+        /// Merges the numeric docvalues from <paramref name="toMerge"/>.
+        /// <para>
+        /// The default implementation calls <see cref="AddNumericField(FieldInfo, IEnumerable{long?})"/>, passing
+        /// an <see cref="IEnumerable{T}"/> that merges and filters deleted documents on the fly.</para>
         /// </summary>
         public virtual void MergeNumericField(FieldInfo fieldInfo, MergeState mergeState, IList<NumericDocValues> toMerge, IList<IBits> docsWithField)
         {
@@ -166,10 +167,10 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Merges the binary docvalues from <code>toMerge</code>.
-        /// <p>
-        /// The default implementation calls <seealso cref="#addBinaryField"/>, passing
-        /// an Iterable that merges and filters deleted documents on the fly.
+        /// Merges the binary docvalues from <paramref name="toMerge"/>.
+        /// <para>
+        /// The default implementation calls <see cref="AddBinaryField(FieldInfo, IEnumerable{BytesRef})"/>, passing
+        /// an <see cref="IEnumerable{T}"/> that merges and filters deleted documents on the fly.</para>
         /// </summary>
         public virtual void MergeBinaryField(FieldInfo fieldInfo, MergeState mergeState, IList<BinaryDocValues> toMerge, IList<IBits> docsWithField)
         {
@@ -229,10 +230,10 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Merges the sorted docvalues from <code>toMerge</code>.
-        /// <p>
-        /// The default implementation calls <seealso cref="#addSortedField"/>, passing
-        /// an Iterable that merges ordinals and values and filters deleted documents.</p>
+        /// Merges the sorted docvalues from <paramref name="toMerge"/>.
+        /// <para>
+        /// The default implementation calls <see cref="AddSortedField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?})"/>, passing
+        /// an <see cref="IEnumerable{T}"/> that merges ordinals and values and filters deleted documents.</para>
         /// </summary>
         public virtual void MergeSortedField(FieldInfo fieldInfo, MergeState mergeState, IList<SortedDocValues> toMerge)
         {
@@ -331,166 +332,11 @@ namespace Lucene.Net.Codecs
             }
         }
 
-        /*
-        private class IterableAnonymousInnerClassHelper3 : IEnumerable<BytesRef>
-        {
-            private readonly DocValuesConsumer OuterInstance;
-
-            private SortedDocValues[] Dvs;
-            private OrdinalMap Map;
-
-            public IterableAnonymousInnerClassHelper3(DocValuesConsumer outerInstance, SortedDocValues[] dvs, OrdinalMap map)
-            {
-                this.OuterInstance = outerInstance;
-                this.Dvs = dvs;
-                this.Map = map;
-            }
-
-                // ord -> value
-            public virtual IEnumerator<BytesRef> GetEnumerator()
-            {
-              return new IteratorAnonymousInnerClassHelper3(this);
-            }
-
-            private class IteratorAnonymousInnerClassHelper3 : IEnumerator<BytesRef>
-            {
-                private readonly IterableAnonymousInnerClassHelper3 OuterInstance;
-
-                public IteratorAnonymousInnerClassHelper3(IterableAnonymousInnerClassHelper3 outerInstance)
-                {
-                    this.OuterInstance = outerInstance;
-                    scratch = new BytesRef();
-                }
-
-                internal readonly BytesRef scratch;
-                internal int currentOrd;
-
-                public virtual bool HasNext()
-                {
-                  return currentOrd < OuterInstance.Map.ValueCount;
-                }
-
-                public virtual BytesRef Next()
-                {
-                  if (!HasNext())
-                  {
-                    throw new Exception();
-                  }
-                  int segmentNumber = OuterInstance.Map.GetFirstSegmentNumber(currentOrd);
-                  int segmentOrd = (int)OuterInstance.Map.GetFirstSegmentOrd(currentOrd);
-                  OuterInstance.Dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
-                  currentOrd++;
-                  return scratch;
-                }
-
-                public virtual void Remove()
-                {
-                  throw new System.NotSupportedException();
-                }
-            }
-        }
-
-        private class IterableAnonymousInnerClassHelper4 : IEnumerable<Number>
-        {
-            private readonly DocValuesConsumer OuterInstance;
-
-            private AtomicReader[] Readers;
-            private SortedDocValues[] Dvs;
-            private OrdinalMap Map;
-
-            public IterableAnonymousInnerClassHelper4(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedDocValues[] dvs, OrdinalMap map)
-            {
-                this.OuterInstance = outerInstance;
-                this.Readers = readers;
-                this.Dvs = dvs;
-                this.Map = map;
-            }
-
-            public virtual IEnumerator<Number> GetEnumerator()
-            {
-              return new IteratorAnonymousInnerClassHelper4(this);
-            }
-
-            private class IteratorAnonymousInnerClassHelper4 : IEnumerator<Number>
-            {
-                private readonly IterableAnonymousInnerClassHelper4 OuterInstance;
-
-                public IteratorAnonymousInnerClassHelper4(IterableAnonymousInnerClassHelper4 outerInstance)
-                {
-                    this.OuterInstance = outerInstance;
-                    readerUpto = -1;
-                }
-
-                internal int readerUpto;
-                internal int docIDUpto;
-                internal int nextValue;
-                internal AtomicReader currentReader;
-                internal Bits currentLiveDocs;
-                internal bool nextIsSet;
-
-                public virtual bool HasNext()
-                {
-                  return nextIsSet || SetNext();
-                }
-
-                public virtual void Remove()
-                {
-                  throw new System.NotSupportedException();
-                }
-
-                public virtual Number Next()
-                {
-                  if (!HasNext())
-                  {
-                    throw new NoSuchElementException();
-                  }
-                  Debug.Assert(nextIsSet);
-                  nextIsSet = false;
-                  // TODO make a mutable number
-                  return nextValue;
-                }
-
-                private bool SetNext()
-                {
-                  while (true)
-                  {
-                    if (readerUpto == OuterInstance.Readers.Length)
-                    {
-                      return false;
-                    }
-
-                    if (currentReader == null || docIDUpto == currentReader.MaxDoc)
-                    {
-                      readerUpto++;
-                      if (readerUpto < OuterInstance.Readers.Length)
-                      {
-                        currentReader = OuterInstance.Readers[readerUpto];
-                        currentLiveDocs = currentReader.LiveDocs;
-                      }
-                      docIDUpto = 0;
-                      continue;
-                    }
-
-                    if (currentLiveDocs == null || currentLiveDocs.get(docIDUpto))
-                    {
-                      nextIsSet = true;
-                      int segOrd = OuterInstance.Dvs[readerUpto].GetOrd(docIDUpto);
-                      nextValue = segOrd == -1 ? - 1 : (int) OuterInstance.Map.GetGlobalOrd(readerUpto, segOrd);
-                      docIDUpto++;
-                      return true;
-                    }
-
-                    docIDUpto++;
-                  }
-                }
-            }
-        }*/
-
         /// <summary>
-        /// Merges the sortedset docvalues from <code>toMerge</code>.
-        /// <p>
-        /// The default implementation calls <seealso cref="#addSortedSetField"/>, passing
-        /// an Iterable that merges ordinals and values and filters deleted documents .
+        /// Merges the sortedset docvalues from <paramref name="toMerge"/>.
+        /// <para>
+        /// The default implementation calls <see cref="AddSortedSetField(FieldInfo, IEnumerable{BytesRef}, IEnumerable{long?}, IEnumerable{long?})"/>, passing
+        /// an <see cref="IEnumerable{T}"/> that merges ordinals and values and filters deleted documents.</para>
         /// </summary>
         public virtual void MergeSortedSetField(FieldInfo fieldInfo, MergeState mergeState, IList<SortedSetDocValues> toMerge)
         {
@@ -659,283 +505,6 @@ namespace Lucene.Net.Codecs
             }
         }
 
-        /*
-        private class IterableAnonymousInnerClassHelper5 : IEnumerable<BytesRef>
-        {
-            private readonly DocValuesConsumer OuterInstance;
-
-            private SortedSetDocValues[] Dvs;
-            private OrdinalMap Map;
-
-            public IterableAnonymousInnerClassHelper5(DocValuesConsumer outerInstance, SortedSetDocValues[] dvs, OrdinalMap map)
-            {
-                this.OuterInstance = outerInstance;
-                this.Dvs = dvs;
-                this.Map = map;
-            }
-
-                // ord -> value
-            public virtual IEnumerator<BytesRef> GetEnumerator()
-            {
-              return new IteratorAnonymousInnerClassHelper5(this);
-            }
-
-            private class IteratorAnonymousInnerClassHelper5 : IEnumerator<BytesRef>
-            {
-                private readonly IterableAnonymousInnerClassHelper5 OuterInstance;
-
-                public IteratorAnonymousInnerClassHelper5(IterableAnonymousInnerClassHelper5 outerInstance)
-                {
-                    this.OuterInstance = outerInstance;
-                    scratch = new BytesRef();
-                }
-
-                internal readonly BytesRef scratch;
-                internal long currentOrd;
-
-                public virtual bool HasNext()
-                {
-                  return currentOrd < OuterInstance.Map.ValueCount;
-                }
-
-                public virtual BytesRef Next()
-                {
-                  if (!HasNext())
-                  {
-                    throw new Exception();
-                  }
-                  int segmentNumber = OuterInstance.Map.GetFirstSegmentNumber(currentOrd);
-                  long segmentOrd = OuterInstance.Map.GetFirstSegmentOrd(currentOrd);
-                  OuterInstance.Dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
-                  currentOrd++;
-                  return scratch;
-                }
-
-                public virtual void Remove()
-                {
-                  throw new System.NotSupportedException();
-                }
-            }
-        }
-
-        private class IterableAnonymousInnerClassHelper6 : IEnumerable<Number>
-        {
-            private readonly DocValuesConsumer OuterInstance;
-
-            private AtomicReader[] Readers;
-            private SortedSetDocValues[] Dvs;
-
-            public IterableAnonymousInnerClassHelper6(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedSetDocValues[] dvs)
-            {
-                this.OuterInstance = outerInstance;
-                this.Readers = readers;
-                this.Dvs = dvs;
-            }
-
-            public virtual IEnumerator<Number> GetEnumerator()
-            {
-              return new IteratorAnonymousInnerClassHelper6(this);
-            }
-
-            private class IteratorAnonymousInnerClassHelper6 : IEnumerator<Number>
-            {
-                private readonly IterableAnonymousInnerClassHelper6 OuterInstance;
-
-                public IteratorAnonymousInnerClassHelper6(IterableAnonymousInnerClassHelper6 outerInstance)
-                {
-                    this.OuterInstance = outerInstance;
-                    readerUpto = -1;
-                }
-
-                internal int readerUpto;
-                internal int docIDUpto;
-                internal int nextValue;
-                internal AtomicReader currentReader;
-                internal Bits currentLiveDocs;
-                internal bool nextIsSet;
-
-                public virtual bool HasNext()
-                {
-                  return nextIsSet || SetNext();
-                }
-
-                public virtual void Remove()
-                {
-                  throw new System.NotSupportedException();
-                }
-
-                public virtual Number Next()
-                {
-                  if (!HasNext())
-                  {
-                    throw new Exception();
-                  }
-                  Debug.Assert(nextIsSet);
-                  nextIsSet = false;
-                  // TODO make a mutable number
-                  return nextValue;
-                }
-
-                private bool SetNext()
-                {
-                  while (true)
-                  {
-                    if (readerUpto == OuterInstance.Readers.Length)
-                    {
-                      return false;
-                    }
-
-                    if (currentReader == null || docIDUpto == currentReader.MaxDoc)
-                    {
-                      readerUpto++;
-                      if (readerUpto < OuterInstance.Readers.Length)
-                      {
-                        currentReader = OuterInstance.Readers[readerUpto];
-                        currentLiveDocs = currentReader.LiveDocs;
-                      }
-                      docIDUpto = 0;
-                      continue;
-                    }
-
-                    if (currentLiveDocs == null || currentLiveDocs.Get(docIDUpto))
-                    {
-                      nextIsSet = true;
-                      SortedSetDocValues dv = OuterInstance.Dvs[readerUpto];
-                      dv.Document = docIDUpto;
-                      nextValue = 0;
-                      while (dv.NextOrd() != SortedSetDocValues.NO_MORE_ORDS)
-                      {
-                        nextValue++;
-                      }
-                      docIDUpto++;
-                      return true;
-                    }
-
-                    docIDUpto++;
-                  }
-                }
-            }
-        }
-
-        private class IterableAnonymousInnerClassHelper7 : IEnumerable<Number>
-        {
-            private readonly DocValuesConsumer OuterInstance;
-
-            private AtomicReader[] Readers;
-            private SortedSetDocValues[] Dvs;
-            private OrdinalMap Map;
-
-            public IterableAnonymousInnerClassHelper7(DocValuesConsumer outerInstance, AtomicReader[] readers, SortedSetDocValues[] dvs, OrdinalMap map)
-            {
-                this.OuterInstance = outerInstance;
-                this.Readers = readers;
-                this.Dvs = dvs;
-                this.Map = map;
-            }
-
-            public virtual IEnumerator<Number> GetEnumerator()
-            {
-              return new IteratorAnonymousInnerClassHelper7(this);
-            }
-
-            private class IteratorAnonymousInnerClassHelper7 : IEnumerator<Number>
-            {
-                private readonly IterableAnonymousInnerClassHelper7 OuterInstance;
-
-                public IteratorAnonymousInnerClassHelper7(IterableAnonymousInnerClassHelper7 outerInstance)
-                {
-                    this.OuterInstance = outerInstance;
-                    readerUpto = -1;
-                    ords = new long[8];
-                }
-
-                internal int readerUpto;
-                internal int docIDUpto;
-                internal long nextValue;
-                internal AtomicReader currentReader;
-                internal Bits currentLiveDocs;
-                internal bool nextIsSet;
-                internal long[] ords;
-                internal int ordUpto;
-                internal int ordLength;
-
-                public virtual bool HasNext()
-                {
-                  return nextIsSet || SetNext();
-                }
-
-                public virtual void Remove()
-                {
-                  throw new System.NotSupportedException();
-                }
-
-                public virtual Number Next()
-                {
-                  if (!HasNext())
-                  {
-                    throw new Exception();
-                  }
-                  Debug.Assert(nextIsSet);
-                  nextIsSet = false;
-                  // TODO make a mutable number
-                  return nextValue;
-                }
-
-                private bool SetNext()
-                {
-                  while (true)
-                  {
-                    if (readerUpto == OuterInstance.Readers.Length)
-                    {
-                      return false;
-                    }
-
-                    if (ordUpto < ordLength)
-                    {
-                      nextValue = ords[ordUpto];
-                      ordUpto++;
-                      nextIsSet = true;
-                      return true;
-                    }
-
-                    if (currentReader == null || docIDUpto == currentReader.MaxDoc)
-                    {
-                      readerUpto++;
-                      if (readerUpto < OuterInstance.Readers.Length)
-                      {
-                        currentReader = OuterInstance.Readers[readerUpto];
-                        currentLiveDocs = currentReader.LiveDocs;
-                      }
-                      docIDUpto = 0;
-                      continue;
-                    }
-
-                    if (currentLiveDocs == null || currentLiveDocs.Get(docIDUpto))
-                    {
-                      Debug.Assert(docIDUpto < currentReader.MaxDoc);
-                      SortedSetDocValues dv = OuterInstance.Dvs[readerUpto];
-                      dv.Document = docIDUpto;
-                      ordUpto = ordLength = 0;
-                      long ord;
-                      while ((ord = dv.NextOrd()) != SortedSetDocValues.NO_MORE_ORDS)
-                      {
-                        if (ordLength == ords.Length)
-                        {
-                          ords = ArrayUtil.Grow(ords, ordLength + 1);
-                        }
-                        ords[ordLength] = OuterInstance.Map.GetGlobalOrd(readerUpto, ord);
-                        ordLength++;
-                      }
-                      docIDUpto++;
-                      continue;
-                    }
-
-                    docIDUpto++;
-                  }
-                }
-            }
-        }*/
-
         // TODO: seek-by-ord to nextSetBit
         internal class BitsFilteredTermsEnum : FilteredTermsEnum
         {
@@ -954,12 +523,18 @@ namespace Lucene.Net.Codecs
             }
         }
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesFormat.cs b/src/Lucene.Net/Codecs/DocValuesFormat.cs
index 9ef0f4d..907aed9 100644
--- a/src/Lucene.Net/Codecs/DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/DocValuesFormat.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Codecs
     /// Note, when extending this class, the name (<see cref="Name"/>) may
     /// written into the index in certain configurations. In order for the segment
     /// to be read, the name must resolve to your implementation via <see cref="ForName(string)"/>.
-    /// this method uses <see cref="IDocValuesFormatFactory.GetDocValuesFormat(string)"/> to resolve format names.
+    /// This method uses <see cref="IDocValuesFormatFactory.GetDocValuesFormat(string)"/> to resolve format names.
     /// <para/>
     /// To implement your own format:
     /// <list type="number">
@@ -123,8 +123,8 @@ namespace Lucene.Net.Codecs
         /// NOTE: by the time this call returns, it must hold open any files it will
         /// need to use; else, those files may be deleted. Additionally, required files
         /// may be deleted during the execution of this call before there is a chance
-        /// to open them. Under these circumstances an IOException should be thrown by
-        /// the implementation. IOExceptions are expected and will automatically cause
+        /// to open them. Under these circumstances an <see cref="System.IO.IOException"/> should be thrown by
+        /// the implementation. <see cref="System.IO.IOException"/>s are expected and will automatically cause
         /// a retry of the segment opening logic with the newly revised segments.
         /// </summary>
         public abstract DocValuesProducer FieldsProducer(SegmentReadState state);
@@ -147,14 +147,14 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// looks up a format by name </summary>
+        /// Looks up a format by name. </summary>
         public static DocValuesFormat ForName(string name)
         {
             return docValuesFormatFactory.GetDocValuesFormat(name);
         }
 
         /// <summary>
-        /// returns a list of all available format names </summary>
+        /// Returns a list of all available format names. </summary>
         public static ICollection<string> AvailableDocValuesFormats()
         {
             if (docValuesFormatFactory is IServiceListable)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/DocValuesProducer.cs b/src/Lucene.Net/Codecs/DocValuesProducer.cs
index c5f2605..900d1b3 100644
--- a/src/Lucene.Net/Codecs/DocValuesProducer.cs
+++ b/src/Lucene.Net/Codecs/DocValuesProducer.cs
@@ -29,74 +29,81 @@ namespace Lucene.Net.Codecs
     /// <summary>
     /// Abstract API that produces numeric, binary and
     /// sorted docvalues.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class DocValuesProducer : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal DocValuesProducer()
         {
         }
 
         /// <summary>
-        /// Returns <seealso cref="NumericDocValues"/> for this field.
-        ///  The returned instance need not be thread-safe: it will only be
-        ///  used by a single thread.
+        /// Returns <see cref="NumericDocValues"/> for this field.
+        /// The returned instance need not be thread-safe: it will only be
+        /// used by a single thread.
         /// </summary>
         public abstract NumericDocValues GetNumeric(FieldInfo field);
 
         /// <summary>
-        /// Returns <seealso cref="BinaryDocValues"/> for this field.
-        ///  The returned instance need not be thread-safe: it will only be
-        ///  used by a single thread.
+        /// Returns <see cref="BinaryDocValues"/> for this field.
+        /// The returned instance need not be thread-safe: it will only be
+        /// used by a single thread.
         /// </summary>
         public abstract BinaryDocValues GetBinary(FieldInfo field);
 
         /// <summary>
-        /// Returns <seealso cref="SortedDocValues"/> for this field.
-        ///  The returned instance need not be thread-safe: it will only be
-        ///  used by a single thread.
+        /// Returns <see cref="SortedDocValues"/> for this field.
+        /// The returned instance need not be thread-safe: it will only be
+        /// used by a single thread.
         /// </summary>
         public abstract SortedDocValues GetSorted(FieldInfo field);
 
         /// <summary>
-        /// Returns <seealso cref="SortedSetDocValues"/> for this field.
-        ///  The returned instance need not be thread-safe: it will only be
-        ///  used by a single thread.
+        /// Returns <see cref="SortedSetDocValues"/> for this field.
+        /// The returned instance need not be thread-safe: it will only be
+        /// used by a single thread.
         /// </summary>
         public abstract SortedSetDocValues GetSortedSet(FieldInfo field);
 
         /// <summary>
-        /// Returns a <seealso cref="IBits"/> at the size of <code>reader.maxDoc()</code>,
-        ///  with turned on bits for each docid that does have a value for this field.
-        ///  The returned instance need not be thread-safe: it will only be
-        ///  used by a single thread.
+        /// Returns a <see cref="IBits"/> at the size of <c>reader.MaxDoc</c>,
+        /// with turned on bits for each docid that does have a value for this field.
+        /// The returned instance need not be thread-safe: it will only be
+        /// used by a single thread.
         /// </summary>
         public abstract IBits GetDocsWithField(FieldInfo field);
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public abstract long RamBytesUsed();
 
         /// <summary>
-        /// Checks consistency of this producer
-        /// <p>
+        /// Checks consistency of this producer.
+        /// <para/>
         /// Note that this may be costly in terms of I/O, e.g.
         /// may involve computing a checksum value against large data files.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract void CheckIntegrity();
 
+        /// <summary>
+        /// Disposes all resources used by this object.
+        /// </summary>
         public virtual void Dispose()
         {
             Dispose(true);
             GC.SuppressFinalize(this);
         }
 
+        /// <summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosFormat.cs b/src/Lucene.Net/Codecs/FieldInfosFormat.cs
index a9f932c..30215f2 100644
--- a/src/Lucene.Net/Codecs/FieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosFormat.cs
@@ -20,28 +20,29 @@ namespace Lucene.Net.Codecs
     using FieldInfos = Lucene.Net.Index.FieldInfos; // javadocs
 
     /// <summary>
-    /// Encodes/decodes <seealso cref="FieldInfos"/>
+    /// Encodes/decodes <see cref="FieldInfos"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class FieldInfosFormat
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal FieldInfosFormat()
         {
         }
 
         /// <summary>
-        /// Returns a <seealso cref="FieldInfosReader"/> to read field infos
-        ///  from the index
+        /// Returns a <see cref="Codecs.FieldInfosReader"/> to read field infos
+        /// from the index.
         /// </summary>
         public abstract FieldInfosReader FieldInfosReader { get; }
 
         /// <summary>
-        /// Returns a <seealso cref="FieldInfosWriter"/> to write field infos
-        ///  to the index
+        /// Returns a <see cref="Codecs.FieldInfosWriter"/> to write field infos
+        /// to the index.
         /// </summary>
         public abstract FieldInfosWriter FieldInfosWriter { get; }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosReader.cs b/src/Lucene.Net/Codecs/FieldInfosReader.cs
index 3e86d1b..8014e33 100644
--- a/src/Lucene.Net/Codecs/FieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosReader.cs
@@ -22,22 +22,23 @@ namespace Lucene.Net.Codecs
     using IOContext = Lucene.Net.Store.IOContext;
 
     /// <summary>
-    /// Codec API for reading <seealso cref="FieldInfos"/>.
+    /// Codec API for reading <see cref="FieldInfos"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class FieldInfosReader
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal FieldInfosReader()
         {
         }
 
         /// <summary>
-        /// Read the <seealso cref="FieldInfos"/> previously written with {@link
-        ///  FieldInfosWriter}.
+        /// Read the <see cref="FieldInfos"/> previously written with 
+        /// <see cref="FieldInfosWriter"/>.
         /// </summary>
         public abstract FieldInfos Read(Directory directory, string segmentName, string segmentSuffix, IOContext iocontext);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldInfosWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldInfosWriter.cs b/src/Lucene.Net/Codecs/FieldInfosWriter.cs
index cd06e29..92bfe07 100644
--- a/src/Lucene.Net/Codecs/FieldInfosWriter.cs
+++ b/src/Lucene.Net/Codecs/FieldInfosWriter.cs
@@ -22,22 +22,23 @@ namespace Lucene.Net.Codecs
     using IOContext = Lucene.Net.Store.IOContext;
 
     /// <summary>
-    /// Codec API for writing <seealso cref="FieldInfos"/>.
+    /// Codec API for writing <see cref="FieldInfos"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class FieldInfosWriter
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal FieldInfosWriter()
         {
         }
 
         /// <summary>
-        /// Writes the provided <seealso cref="FieldInfos"/> to the
-        ///  directory.
+        /// Writes the provided <see cref="FieldInfos"/> to the
+        /// directory.
         /// </summary>
         public abstract void Write(Directory directory, string segmentName, string segmentSuffix, FieldInfos infos, IOContext context);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/a08ae945/src/Lucene.Net/Codecs/FieldsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/FieldsConsumer.cs b/src/Lucene.Net/Codecs/FieldsConsumer.cs
index ee6d7cd..8c29e2e 100644
--- a/src/Lucene.Net/Codecs/FieldsConsumer.cs
+++ b/src/Lucene.Net/Codecs/FieldsConsumer.cs
@@ -32,30 +32,30 @@ namespace Lucene.Net.Codecs
     /// payloads postings.  Concrete implementations of this
     /// actually do "something" with the postings (write it into
     /// the index in a specific format).
-    /// <p>
+    /// <para/>
     /// The lifecycle is:
-    /// <ol>
-    ///   <li>FieldsConsumer is created by
-    ///       <seealso cref="PostingsFormat#fieldsConsumer(SegmentWriteState)"/>.
-    ///   <li>For each field, <seealso cref="#addField(FieldInfo)"/> is called,
-    ///       returning a <seealso cref="TermsConsumer"/> for the field.
-    ///   <li>After all fields are added, the consumer is <seealso cref="#close"/>d.
-    /// </ol>
-    ///
+    /// <list type="number">
+    ///   <item><description>FieldsConsumer is created by
+    ///       <see cref="PostingsFormat.FieldsConsumer(Index.SegmentWriteState)"/>.</description></item>
+    ///   <item><description>For each field, <see cref="AddField(FieldInfo)"/> is called,
+    ///       returning a <see cref="TermsConsumer"/> for the field.</description></item>
+    ///   <item><description>After all fields are added, the consumer is <see cref="Dispose()"/>d.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class FieldsConsumer : IDisposable
     {
         /// <summary>
         /// Sole constructor. (For invocation by subclass
-        ///  constructors, typically implicit.)
+        /// constructors, typically implicit.)
         /// </summary>
         protected internal FieldsConsumer()
         {
         }
 
         /// <summary>
-        /// Add a new field </summary>
+        /// Add a new field. </summary>
         public abstract TermsConsumer AddField(FieldInfo field);
 
         /// <summary>
@@ -68,15 +68,16 @@ namespace Lucene.Net.Codecs
         }
 
         /// <summary>
-        /// Called when we are done adding everything. </summary>
+        /// Implementations must override and should dispose all resources used by this instance.
+        /// </summary>
         protected abstract void Dispose(bool disposing);
 
         /// <summary>
-        /// Called during merging to merge all <seealso cref="Fields"/> from
-        ///  sub-readers.  this must recurse to merge all postings
-        ///  (terms, docs, positions, etc.).  A {@link
-        ///  PostingsFormat} can override this default
-        ///  implementation to do its own merging.
+        /// Called during merging to merge all <see cref="Fields"/> from
+        /// sub-readers.  this must recurse to merge all postings
+        /// (terms, docs, positions, etc.).  A 
+        /// <see cref="PostingsFormat"/> can override this default
+        /// implementation to do its own merging.
         /// </summary>
         public virtual void Merge(MergeState mergeState, Fields fields)
         {


[21/48] lucenenet git commit: Lucene.Net.Join: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Join: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/30520703
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/30520703
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/30520703

Branch: refs/heads/master
Commit: 30520703a4298254d0da7088b849f0ae7dcde480
Parents: e4c37d3
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 04:13:15 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 04:13:15 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Join/ToParentBlockJoinCollector.cs | 6 ++++--
 src/Lucene.Net.Join/ToParentBlockJoinQuery.cs     | 3 ++-
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/30520703/src/Lucene.Net.Join/ToParentBlockJoinCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Join/ToParentBlockJoinCollector.cs b/src/Lucene.Net.Join/ToParentBlockJoinCollector.cs
index 7be54c8..184a368 100644
--- a/src/Lucene.Net.Join/ToParentBlockJoinCollector.cs
+++ b/src/Lucene.Net.Join/ToParentBlockJoinCollector.cs
@@ -33,8 +33,10 @@ namespace Lucene.Net.Join
     /// BlockJoinQuery clauses, sorted by the
     /// specified parent <see cref="Sort"/>.  Note that this cannot perform
     /// arbitrary joins; rather, it requires that all joined
-    /// documents are indexed as a doc block (using <see cref="IndexWriter.AddDocuments"/>
-    /// or <see cref="IndexWriter.UpdateDocuments"/>. Ie, the join is computed
+    /// documents are indexed as a doc block (using 
+    /// <see cref="IndexWriter.AddDocuments(IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/>
+    /// or <see cref="IndexWriter.UpdateDocuments(Term, IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/>. 
+    /// Ie, the join is computed
     /// at index time.
     /// 
     /// <para>The parent <see cref="Sort"/> must only use

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/30520703/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs b/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
index ec43f33..518c9d9 100644
--- a/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
+++ b/src/Lucene.Net.Join/ToParentBlockJoinQuery.cs
@@ -28,7 +28,8 @@ namespace Lucene.Net.Join
     /// <summary>
     /// This query requires that you index
     /// children and parent docs as a single block, using the
-    /// <see cref="IndexWriter.AddDocuments"/> or <see cref="IndexWriter.UpdateDocuments"/>
+    /// <see cref="IndexWriter.AddDocuments(IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/> 
+    /// or <see cref="IndexWriter.UpdateDocuments(Term, IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/>
     /// API.  In each block, the
     /// child documents must appear first, ending with the parent
     /// document.  At search time you provide a <see cref="Filter"/>


[38/48] lucenenet git commit: Lucene.Net.Codecs.Lucene42: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene42: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/ee52fd34
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/ee52fd34
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/ee52fd34

Branch: refs/heads/master
Commit: ee52fd34c52edf2bc67aecbed717fac22f931e8a
Parents: b27d10c
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 13:40:50 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:40 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   3 +-
 src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs |  23 +--
 .../Codecs/Lucene42/Lucene42DocValuesFormat.cs  | 188 +++++++++----------
 .../Lucene42/Lucene42DocValuesProducer.cs       |   2 +-
 .../Codecs/Lucene42/Lucene42FieldInfosFormat.cs | 111 ++++++-----
 .../Codecs/Lucene42/Lucene42FieldInfosReader.cs |   8 +-
 .../Codecs/Lucene42/Lucene42NormsConsumer.cs    |   2 +-
 .../Codecs/Lucene42/Lucene42NormsFormat.cs      |  35 ++--
 .../Lucene42/Lucene42TermVectorsFormat.cs       | 183 +++++++++---------
 9 files changed, 279 insertions(+), 276 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ce132b9..6886da2 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,8 +55,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
    2. Codecs.Lucene3x (namespace)
    3. Codecs.Lucene40 (namespace)
    4. Codecs.Lucene41 (namespace)
-   5. Codecs.Lucene42 (namespace)
-   6. Util.Packed (namespace)
+   5. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs
index 3972a5e..b9fe243 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42Codec.cs
@@ -29,13 +29,14 @@ namespace Lucene.Net.Codecs.Lucene42
     /// <summary>
     /// Implements the Lucene 4.2 index format, with configurable per-field postings
     /// and docvalues formats.
-    /// <p>
+    /// <para/>
     /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
+    /// <see cref="FilterCodec"/>.
+    /// <para/>
+    /// See <see cref="Lucene.Net.Codecs.Lucene42"/> package documentation for file format details.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene42 package documentation for file format details.
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.2 segments
     // NOTE: if we make largish changes in a minor release, easier to just make Lucene43Codec or whatever
     // if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
     // (it writes a minor version, etc).
@@ -124,9 +125,9 @@ namespace Lucene.Net.Codecs.Lucene42
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene41"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene41"
         /// </summary>
         public virtual PostingsFormat GetPostingsFormatForField(string field)
         {
@@ -135,9 +136,9 @@ namespace Lucene.Net.Codecs.Lucene42
 
         /// <summary>
         /// Returns the docvalues format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene42"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene42"
         /// </summary>
         public virtual DocValuesFormat GetDocValuesFormatForField(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs
index 82e8c89..29419a5 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs
@@ -25,99 +25,99 @@ namespace Lucene.Net.Codecs.Lucene42
 
     /// <summary>
     /// Lucene 4.2 DocValues format.
-    /// <p>
+    /// <para/>
     /// Encodes the four per-document value types (Numeric,Binary,Sorted,SortedSet) with seven basic strategies.
-    /// <p>
-    /// <ul>
-    ///    <li>Delta-compressed Numerics: per-document integers written in blocks of 4096. For each block
-    ///        the minimum value is encoded, and each entry is a delta from that minimum value.
-    ///    <li>Table-compressed Numerics: when the number of unique values is very small, a lookup table
-    ///        is written instead. Each per-document entry is instead the ordinal to this table.
-    ///    <li>Uncompressed Numerics: when all values would fit into a single byte, and the
-    ///        <code>acceptableOverheadRatio</code> would pack values into 8 bits per value anyway, they
-    ///        are written as absolute values (with no indirection or packing) for performance.
-    ///    <li>GCD-compressed Numerics: when all numbers share a common divisor, such as dates, the greatest
-    ///        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.
-    ///    <li>Fixed-width Binary: one large concatenated byte[] is written, along with the fixed length.
-    ///        Each document's value can be addressed by maxDoc*length.
-    ///    <li>Variable-width Binary: one large concatenated byte[] is written, along with end addresses
+    /// <para/>
+    /// <list type="bullet">
+    ///    <item><description>Delta-compressed Numerics: per-document integers written in blocks of 4096. For each block
+    ///        the minimum value is encoded, and each entry is a delta from that minimum value.</description></item>
+    ///    <item><description>Table-compressed Numerics: when the number of unique values is very small, a lookup table
+    ///        is written instead. Each per-document entry is instead the ordinal to this table.</description></item>
+    ///    <item><description>Uncompressed Numerics: when all values would fit into a single byte, and the
+    ///        <c>acceptableOverheadRatio</c> would pack values into 8 bits per value anyway, they
+    ///        are written as absolute values (with no indirection or packing) for performance.</description></item>
+    ///    <item><description>GCD-compressed Numerics: when all numbers share a common divisor, such as dates, the greatest
+    ///        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.</description></item>
+    ///    <item><description>Fixed-width Binary: one large concatenated byte[] is written, along with the fixed length.
+    ///        Each document's value can be addressed by <c>maxDoc*length</c>.</description></item>
+    ///    <item><description>Variable-width Binary: one large concatenated byte[] is written, along with end addresses
     ///        for each document. The addresses are written in blocks of 4096, with the current absolute
     ///        start for the block, and the average (expected) delta per entry. For each document the
-    ///        deviation from the delta (actual - expected) is written.
-    ///    <li>Sorted: an FST mapping deduplicated terms to ordinals is written, along with the per-document
-    ///        ordinals written using one of the numeric strategies above.
-    ///    <li>SortedSet: an FST mapping deduplicated terms to ordinals is written, along with the per-document
-    ///        ordinal list written using one of the binary strategies above.
-    /// </ul>
-    /// <p>
+    ///        deviation from the delta (actual - expected) is written.</description></item>
+    ///    <item><description>Sorted: an FST mapping deduplicated terms to ordinals is written, along with the per-document
+    ///        ordinals written using one of the numeric strategies above.</description></item>
+    ///    <item><description>SortedSet: an FST mapping deduplicated terms to ordinals is written, along with the per-document
+    ///        ordinal list written using one of the binary strategies above.</description></item>
+    /// </list>
+    /// <para/>
     /// Files:
-    /// <ol>
-    ///   <li><tt>.dvd</tt>: DocValues data</li>
-    ///   <li><tt>.dvm</tt>: DocValues metadata</li>
-    /// </ol>
-    /// <ol>
-    ///   <li><a name="dvm" id="dvm"></a>
-    ///   <p>The DocValues metadata or .dvm file.</p>
-    ///   <p>For DocValues field, this stores metadata, such as the offset into the
-    ///      DocValues data (.dvd)</p>
-    ///   <p>DocValues metadata (.dvm) --&gt; Header,&lt;FieldNumber,EntryType,Entry&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>Entry --&gt; NumericEntry | BinaryEntry | SortedEntry</li>
-    ///     <li>NumericEntry --&gt; DataOffset,CompressionType,PackedVersion</li>
-    ///     <li>BinaryEntry --&gt; DataOffset,DataLength,MinLength,MaxLength,PackedVersion?,BlockSize?</li>
-    ///     <li>SortedEntry --&gt; DataOffset,ValueCount</li>
-    ///     <li>FieldNumber,PackedVersion,MinLength,MaxLength,BlockSize,ValueCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///     <li>DataOffset,DataLength --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///     <li>EntryType,CompressionType --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///     <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>Sorted fields have two entries: a SortedEntry with the FST metadata,
-    ///      and an ordinary NumericEntry for the document-to-ord metadata.</p>
-    ///   <p>SortedSet fields have two entries: a SortedEntry with the FST metadata,
-    ///      and an ordinary BinaryEntry for the document-to-ord-list metadata.</p>
-    ///   <p>FieldNumber of -1 indicates the end of metadata.</p>
-    ///   <p>EntryType is a 0 (NumericEntry), 1 (BinaryEntry, or 2 (SortedEntry)</p>
-    ///   <p>DataOffset is the pointer to the start of the data in the DocValues data (.dvd)</p>
-    ///   <p>CompressionType indicates how Numeric values will be compressed:
-    ///      <ul>
-    ///         <li>0 --&gt; delta-compressed. For each block of 4096 integers, every integer is delta-encoded
-    ///             from the minimum value within the block.
-    ///         <li>1 --&gt; table-compressed. When the number of unique numeric values is small and it would save space,
-    ///             a lookup table of unique values is written, followed by the ordinal for each document.
-    ///         <li>2 --&gt; uncompressed. When the <code>acceptableOverheadRatio</code> parameter would upgrade the number
+    /// <list type="number">
+    ///   <item><description><c>.dvd</c>: DocValues data</description></item>
+    ///   <item><description><c>.dvm</c>: DocValues metadata</description></item>
+    /// </list>
+    /// <list type="number">
+    ///   <item><description><a name="dvm" id="dvm"></a>
+    ///   <para>The DocValues metadata or .dvm file.</para>
+    ///   <para>For DocValues field, this stores metadata, such as the offset into the
+    ///      DocValues data (.dvd)</para>
+    ///   <para>DocValues metadata (.dvm) --&gt; Header,&lt;FieldNumber,EntryType,Entry&gt;<sup>NumFields</sup>,Footer</para>
+    ///   <list type="bullet">
+    ///     <item><description>Entry --&gt; NumericEntry | BinaryEntry | SortedEntry</description></item>
+    ///     <item><description>NumericEntry --&gt; DataOffset,CompressionType,PackedVersion</description></item>
+    ///     <item><description>BinaryEntry --&gt; DataOffset,DataLength,MinLength,MaxLength,PackedVersion?,BlockSize?</description></item>
+    ///     <item><description>SortedEntry --&gt; DataOffset,ValueCount</description></item>
+    ///     <item><description>FieldNumber,PackedVersion,MinLength,MaxLength,BlockSize,ValueCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///     <item><description>DataOffset,DataLength --&gt; Int64  (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///     <item><description>EntryType,CompressionType --&gt; Byte  (<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    ///     <item><description>Header --&gt; CodecHeader  (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///     <item><description>Footer --&gt; CodecFooter  (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    ///   </list>
+    ///   <para>Sorted fields have two entries: a SortedEntry with the FST metadata,
+    ///      and an ordinary NumericEntry for the document-to-ord metadata.</para>
+    ///   <para>SortedSet fields have two entries: a SortedEntry with the FST metadata,
+    ///      and an ordinary BinaryEntry for the document-to-ord-list metadata.</para>
+    ///   <para>FieldNumber of -1 indicates the end of metadata.</para>
+    ///   <para>EntryType is a 0 (NumericEntry), 1 (BinaryEntry, or 2 (SortedEntry)</para>
+    ///   <para>DataOffset is the pointer to the start of the data in the DocValues data (.dvd)</para>
+    ///   <para/>CompressionType indicates how Numeric values will be compressed:
+    ///      <list type="bullet">
+    ///         <item><description>0 --&gt; delta-compressed. For each block of 4096 integers, every integer is delta-encoded
+    ///             from the minimum value within the block.</description></item>
+    ///         <item><description>1 --&gt; table-compressed. When the number of unique numeric values is small and it would save space,
+    ///             a lookup table of unique values is written, followed by the ordinal for each document.</description></item>
+    ///         <item><description>2 --&gt; uncompressed. When the <c>acceptableOverheadRatio</c> parameter would upgrade the number
     ///             of bits required to 8, and all values fit in a byte, these are written as absolute binary values
-    ///             for performance.
-    ///         <li>3 --&gt, gcd-compressed. When all integers share a common divisor, only quotients are stored
-    ///             using blocks of delta-encoded ints.
-    ///      </ul>
-    ///   <p>MinLength and MaxLength represent the min and max byte[] value lengths for Binary values.
-    ///      If they are equal, then all values are of a fixed size, and can be addressed as DataOffset + (docID * length).
+    ///             for performance.</description></item>
+    ///         <item><description>3 --&gt; gcd-compressed. When all integers share a common divisor, only quotients are stored
+    ///             using blocks of delta-encoded ints.</description></item>
+    ///      </list>
+    ///   <para/>MinLength and MaxLength represent the min and max byte[] value lengths for Binary values.
+    ///      If they are equal, then all values are of a fixed size, and can be addressed as <c>DataOffset + (docID * length)</c>.
     ///      Otherwise, the binary values are of variable size, and packed integer metadata (PackedVersion,BlockSize)
-    ///      is written for the addresses.
-    ///   <li><a name="dvd" id="dvd"></a>
-    ///   <p>The DocValues data or .dvd file.</p>
-    ///   <p>For DocValues field, this stores the actual per-document data (the heavy-lifting)</p>
-    ///   <p>DocValues data (.dvd) --&gt; Header,&lt;NumericData | BinaryData | SortedData&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>NumericData --&gt; DeltaCompressedNumerics | TableCompressedNumerics | UncompressedNumerics | GCDCompressedNumerics</li>
-    ///     <li>BinaryData --&gt;  <seealso cref="DataOutput#writeByte Byte"/><sup>DataLength</sup>,Addresses</li>
-    ///     <li>SortedData --&gt; <seealso cref="FST FST&lt;Int64&gt;"/></li>
-    ///     <li>DeltaCompressedNumerics --&gt; <seealso cref="BlockPackedWriter BlockPackedInts(blockSize=4096)"/></li>
-    ///     <li>TableCompressedNumerics --&gt; TableSize,<seealso cref="DataOutput#writeLong Int64"/><sup>TableSize</sup>,<seealso cref="PackedInt32s PackedInts"/></li>
-    ///     <li>UncompressedNumerics --&gt; <seealso cref="DataOutput#writeByte Byte"/><sup>maxdoc</sup></li>
-    ///     <li>Addresses --&gt; <seealso cref="MonotonicBlockPackedWriter MonotonicBlockPackedInts(blockSize=4096)"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>SortedSet entries store the list of ordinals in their BinaryData as a
-    ///      sequences of increasing <seealso cref="DataOutput#writeVLong vLong"/>s, delta-encoded.</p>
-    /// </ol>
-    /// <p>
+    ///      is written for the addresses.</description></item>
+    ///   <item><description><a name="dvd" id="dvd"></a>
+    ///   <para>The DocValues data or .dvd file.</para>
+    ///   <para>For DocValues field, this stores the actual per-document data (the heavy-lifting)</para>
+    ///   <para>DocValues data (.dvd) --&gt; Header,&lt;NumericData | BinaryData | SortedData&gt;<sup>NumFields</sup>,Footer</para>
+    ///   <list type="bullet">
+    ///     <item><description>NumericData --&gt; DeltaCompressedNumerics | TableCompressedNumerics | UncompressedNumerics | GCDCompressedNumerics</description></item>
+    ///     <item><description>BinaryData --&gt; Byte  (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>DataLength</sup>,Addresses</description></item>
+    ///     <item><description>SortedData --&gt; FST&lt;Int64&gt; (<see cref="Util.Fst.FST{T}"/>) </description></item>
+    ///     <item><description>DeltaCompressedNumerics --&gt; BlockPackedInts(blockSize=4096) (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///     <item><description>TableCompressedNumerics --&gt; TableSize, Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) <sup>TableSize</sup>, PackedInts (<see cref="PackedInt32s"/>) </description></item>
+    ///     <item><description>UncompressedNumerics --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>maxdoc</sup></description></item>
+    ///     <item><description>Addresses --&gt; MonotonicBlockPackedInts(blockSize=4096) (<see cref="Util.Packed.MonotonicBlockPackedWriter"/>) </description></item>
+    ///     <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/></description></item>
+    ///   </list>
+    ///   <para>SortedSet entries store the list of ordinals in their BinaryData as a
+    ///      sequences of increasing vLongs (<see cref="Store.DataOutput.WriteVInt64(long)"/>), delta-encoded.</para></description></item>
+    /// </list>
+    /// <para/>
     /// Limitations:
-    /// <ul>
-    ///   <li> Binary doc values can be at most <seealso cref="#MAX_BINARY_FIELD_LENGTH"/> in length.
-    /// </ul> </summary>
-    /// @deprecated Only for reading old 4.2 segments
+    /// <list type="bullet">
+    ///   <item><description> Binary doc values can be at most <see cref="MAX_BINARY_FIELD_LENGTH"/> in length.</description></item>
+    /// </list> 
+    /// </summary>
     [Obsolete("Only for reading old 4.2 segments")]
     [DocValuesFormatName("Lucene42")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class Lucene42DocValuesFormat : DocValuesFormat
@@ -129,8 +129,7 @@ namespace Lucene.Net.Codecs.Lucene42
         protected readonly float m_acceptableOverheadRatio;
 
         /// <summary>
-        /// Calls {@link #Lucene42DocValuesFormat(float)
-        /// Lucene42DocValuesFormat(PackedInts.DEFAULT)}
+        /// Calls <c>Lucene42DocValuesFormat(PackedInts.DEFAULT)</c> (<see cref="Lucene42DocValuesFormat(float)"/>.
         /// </summary>
         public Lucene42DocValuesFormat()
             : this(PackedInt32s.DEFAULT)
@@ -138,12 +137,13 @@ namespace Lucene.Net.Codecs.Lucene42
         }
 
         /// <summary>
-        /// Creates a new Lucene42DocValuesFormat with the specified
-        /// <code>acceptableOverheadRatio</code> for NumericDocValues. </summary>
-        /// <param name="acceptableOverheadRatio"> compression parameter for numerics.
-        ///        Currently this is only used when the number of unique values is small.
-        ///
-        /// @lucene.experimental </param>
+        /// Creates a new <see cref="Lucene42DocValuesFormat"/> with the specified
+        /// <paramref name="acceptableOverheadRatio"/> for <see cref="Index.NumericDocValues"/>. 
+        /// <para/>
+        /// @lucene.experimental
+        /// </summary>
+        /// <param name="acceptableOverheadRatio"> Compression parameter for numerics.
+        ///        Currently this is only used when the number of unique values is small.</param>
         public Lucene42DocValuesFormat(float acceptableOverheadRatio)
             : base()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesProducer.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesProducer.cs
index 4503588..dc52a9e 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesProducer.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesProducer.cs
@@ -54,7 +54,7 @@ namespace Lucene.Net.Codecs.Lucene42
     using Util = Lucene.Net.Util.Fst.Util;
 
     /// <summary>
-    /// Reader for <seealso cref="Lucene42DocValuesFormat"/>
+    /// Reader for <see cref="Lucene42DocValuesFormat"/>.
     /// </summary>
     internal class Lucene42DocValuesProducer : DocValuesProducer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosFormat.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosFormat.cs
index 8cc7e9b..6dd6820 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosFormat.cs
@@ -19,67 +19,64 @@ namespace Lucene.Net.Codecs.Lucene42
      * limitations under the License.
      */
 
-    // javadoc
-    // javadoc
-
     /// <summary>
     /// Lucene 4.2 Field Infos format.
-    /// <p>
-    /// <p>Field names are stored in the field info file, with suffix <tt>.fnm</tt>.</p>
-    /// <p>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
-    /// FieldBits,DocValuesBits,Attributes&gt; <sup>FieldsCount</sup></p>
-    /// <p>Data types:
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#checkHeader CodecHeader"/></li>
-    ///   <li>FieldsCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>FieldName --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>FieldBits, DocValuesBits --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///   <li>FieldNumber --&gt; <seealso cref="DataOutput#writeInt VInt"/></li>
-    ///   <li>Attributes --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    /// </ul>
-    /// </p>
+    /// <para/>
+    /// <para>Field names are stored in the field info file, with suffix <c>.fnm</c>.</para>
+    /// <para>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
+    /// FieldBits,DocValuesBits,Attributes&gt; <sup>FieldsCount</sup></para>
+    /// <para>Data types:
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader <see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/></description></item>
+    ///   <item><description>FieldsCount --&gt; VInt <see cref="Store.DataOutput.WriteVInt32(int)"/></description></item>
+    ///   <item><description>FieldName --&gt; String <see cref="Store.DataOutput.WriteString(string)"/></description></item>
+    ///   <item><description>FieldBits, DocValuesBits --&gt; Byte <see cref="Store.DataOutput.WriteByte(byte)"/></description></item>
+    ///   <item><description>FieldNumber --&gt; VInt <see cref="Store.DataOutput.WriteInt32(int)"/></description></item>
+    ///   <item><description>Attributes --&gt; IDictionary&lt;String,String&gt; <see cref="Store.DataOutput.WriteStringStringMap(System.Collections.Generic.IDictionary{string, string})"/></description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <ul>
-    ///   <li>FieldsCount: the number of fields in this file.</li>
-    ///   <li>FieldName: name of the field as a UTF-8 String.</li>
-    ///   <li>FieldNumber: the field's number. Note that unlike previous versions of
+    /// <list type="bullet">
+    ///   <item><description>FieldsCount: the number of fields in this file.</description></item>
+    ///   <item><description>FieldName: name of the field as a UTF-8 String.</description></item>
+    ///   <item><description>FieldNumber: the field's number. Note that unlike previous versions of
     ///       Lucene, the fields are not numbered implicitly by their order in the
-    ///       file, instead explicitly.</li>
-    ///   <li>FieldBits: a byte containing field options.
-    ///       <ul>
-    ///         <li>The low-order bit is one for indexed fields, and zero for non-indexed
-    ///             fields.</li>
-    ///         <li>The second lowest-order bit is one for fields that have term vectors
-    ///             stored, and zero for fields without term vectors.</li>
-    ///         <li>If the third lowest order-bit is set (0x4), offsets are stored into
-    ///             the postings list in addition to positions.</li>
-    ///         <li>Fourth bit is unused.</li>
-    ///         <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the
-    ///             indexed field.</li>
-    ///         <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the
-    ///             indexed field.</li>
-    ///         <li>If the seventh lowest-order bit is set (0x40), term frequencies and
-    ///             positions omitted for the indexed field.</li>
-    ///         <li>If the eighth lowest-order bit is set (0x80), positions are omitted for the
-    ///             indexed field.</li>
-    ///       </ul>
-    ///    </li>
-    ///    <li>DocValuesBits: a byte containing per-document value types. The type
+    ///       file, instead explicitly.</description></item>
+    ///   <item><description>FieldBits: a byte containing field options.
+    ///       <list type="bullet">
+    ///         <item><description>The low-order bit is one for indexed fields, and zero for non-indexed
+    ///             fields.</description></item>
+    ///         <item><description>The second lowest-order bit is one for fields that have term vectors
+    ///             stored, and zero for fields without term vectors.</description></item>
+    ///         <item><description>If the third lowest order-bit is set (0x4), offsets are stored into
+    ///             the postings list in addition to positions.</description></item>
+    ///         <item><description>Fourth bit is unused.</description></item>
+    ///         <item><description>If the fifth lowest-order bit is set (0x10), norms are omitted for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the sixth lowest-order bit is set (0x20), payloads are stored for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the seventh lowest-order bit is set (0x40), term frequencies and
+    ///             positions omitted for the indexed field.</description></item>
+    ///         <item><description>If the eighth lowest-order bit is set (0x80), positions are omitted for the
+    ///             indexed field.</description></item>
+    ///       </list>
+    ///    </description></item>
+    ///    <item><description>DocValuesBits: a byte containing per-document value types. The type
     ///        recorded as two four-bit integers, with the high-order bits representing
-    ///        <code>norms</code> options, and the low-order bits representing
-    ///        {@code DocValues} options. Each four-bit integer can be decoded as such:
-    ///        <ul>
-    ///          <li>0: no DocValues for this field.</li>
-    ///          <li>1: NumericDocValues. (<seealso cref="DocValuesType#NUMERIC"/>)</li>
-    ///          <li>2: BinaryDocValues. ({@code DocValuesType#BINARY})</li>
-    ///          <li>3: SortedDocValues. ({@code DocValuesType#SORTED})</li>
-    ///        </ul>
-    ///    </li>
-    ///    <li>Attributes: a key-value map of codec-private attributes.</li>
-    /// </ul>
-    ///
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading old 4.2-4.5 segments
+    ///        <c>norms</c> options, and the low-order bits representing
+    ///        <see cref="Index.DocValues"/> options. Each four-bit integer can be decoded as such:
+    ///        <list type="bullet">
+    ///          <item><description>0: no DocValues for this field.</description></item>
+    ///          <item><description>1: NumericDocValues. (<see cref="Index.DocValuesType.NUMERIC"/>)</description></item>
+    ///          <item><description>2: BinaryDocValues. (<see cref="Index.DocValuesType.BINARY"/>)</description></item>
+    ///          <item><description>3: SortedDocValues. (<see cref="Index.DocValuesType.SORTED"/>)</description></item>
+    ///        </list>
+    ///    </description></item>
+    ///    <item><description>Attributes: a key-value map of codec-private attributes.</description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
     [Obsolete("Only for reading old 4.2-4.5 segments")]
     public class Lucene42FieldInfosFormat : FieldInfosFormat
     {
@@ -108,7 +105,7 @@ namespace Lucene.Net.Codecs.Lucene42
         }
 
         /// <summary>
-        /// Extension of field infos </summary>
+        /// Extension of field infos. </summary>
         internal const string EXTENSION = "fnm";
 
         // Codec header

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosReader.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosReader.cs
index b81c62d..7c5bff8 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42FieldInfosReader.cs
@@ -34,10 +34,10 @@ namespace Lucene.Net.Codecs.Lucene42
 
     /// <summary>
     /// Lucene 4.2 FieldInfos reader.
-    ///
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading old 4.2-4.5 segments
-    /// <seealso cref= Lucene42FieldInfosFormat </seealso>
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="Lucene42FieldInfosFormat"/>
     [Obsolete("Only for reading old 4.2-4.5 segments")]
     internal sealed class Lucene42FieldInfosReader : FieldInfosReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsConsumer.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsConsumer.cs
index fa445de..3351309 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsConsumer.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsConsumer.cs
@@ -34,7 +34,7 @@ namespace Lucene.Net.Codecs.Lucene42
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
     /// <summary>
-    /// Writer for <seealso cref="Lucene42NormsFormat"/>
+    /// Writer for <see cref="Lucene42NormsFormat"/>.
     /// </summary>
     internal class Lucene42NormsConsumer : DocValuesConsumer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsFormat.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsFormat.cs
index 66e0c3c..7884efe 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42NormsFormat.cs
@@ -23,25 +23,25 @@ namespace Lucene.Net.Codecs.Lucene42
 
     /// <summary>
     /// Lucene 4.2 score normalization format.
-    /// <p>
-    /// NOTE: this uses the same format as <seealso cref="Lucene42DocValuesFormat"/>
+    /// <para/>
+    /// NOTE: this uses the same format as <see cref="Lucene42DocValuesFormat"/>
     /// Numeric DocValues, but with different file extensions, and passing
-    /// <seealso cref="PackedInt32s#FASTEST"/> for uncompressed encoding: trading off
+    /// <see cref="PackedInt32s.FASTEST"/> for uncompressed encoding: trading off
     /// space for performance.
-    /// <p>
+    /// <para/>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.nvd</tt>: DocValues data</li>
-    ///   <li><tt>.nvm</tt>: DocValues metadata</li>
-    /// </ul> </summary>
-    /// <seealso cref= Lucene42DocValuesFormat </seealso>
+    /// <list type="bullet">
+    ///   <item><description><c>.nvd</c>: DocValues data</description></item>
+    ///   <item><description><c>.nvm</c>: DocValues metadata</description></item>
+    /// </list>
+    /// </summary>
+    /// <seealso cref="Lucene42DocValuesFormat"/>
     public class Lucene42NormsFormat : NormsFormat
     {
         internal readonly float acceptableOverheadRatio;
 
         /// <summary>
-        /// Calls {@link #Lucene42NormsFormat(float)
-        /// Lucene42DocValuesFormat(PackedInts.FASTEST)}
+        /// Calls <c>Lucene42DocValuesFormat(PackedInt32s.FASTEST)</c> (<see cref="Lucene42NormsFormat(float)"/>).
         /// </summary>
         public Lucene42NormsFormat()
             : this(PackedInt32s.FASTEST)
@@ -50,12 +50,13 @@ namespace Lucene.Net.Codecs.Lucene42
         }
 
         /// <summary>
-        /// Creates a new Lucene42DocValuesFormat with the specified
-        /// <code>acceptableOverheadRatio</code> for NumericDocValues. </summary>
-        /// <param name="acceptableOverheadRatio"> compression parameter for numerics.
-        ///        Currently this is only used when the number of unique values is small.
-        ///
-        /// @lucene.experimental </param>
+        /// Creates a new <see cref="Lucene42DocValuesFormat"/> with the specified
+        /// <paramref name="acceptableOverheadRatio"/> for <see cref="Index.NumericDocValues"/>. 
+        /// <para/>
+        /// @lucene.experimental
+        /// </summary>
+        /// <param name="acceptableOverheadRatio"> Compression parameter for numerics.
+        ///        Currently this is only used when the number of unique values is small.</param>
         public Lucene42NormsFormat(float acceptableOverheadRatio)
         {
             this.acceptableOverheadRatio = acceptableOverheadRatio;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ee52fd34/src/Lucene.Net/Codecs/Lucene42/Lucene42TermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42TermVectorsFormat.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42TermVectorsFormat.cs
index 27e491e..9c46c86 100644
--- a/src/Lucene.Net/Codecs/Lucene42/Lucene42TermVectorsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42TermVectorsFormat.cs
@@ -21,98 +21,103 @@ namespace Lucene.Net.Codecs.Lucene42
     using CompressionMode = Lucene.Net.Codecs.Compressing.CompressionMode;
 
     /// <summary>
-    /// Lucene 4.2 <seealso cref="TermVectorsFormat term vectors format"/>.
-    /// <p>
-    /// Very similarly to <seealso cref="Lucene41StoredFieldsFormat"/>, this format is based
+    /// Lucene 4.2 term vectors format (<see cref="TermVectorsFormat"/>).
+    /// <para/>
+    /// Very similarly to <see cref="Lucene41.Lucene41StoredFieldsFormat"/>, this format is based
     /// on compressed chunks of data, with document-level granularity so that a
     /// document can never span across distinct chunks. Moreover, data is made as
-    /// compact as possible:<ul>
-    /// <li>textual data is compressed using the very light,
-    /// <a href="http://code.google.com/p/lz4/">LZ4</a> compression algorithm,
-    /// <li>binary data is written using fixed-size blocks of
-    /// <seealso cref="PackedInts packed ints"/>.
-    /// </ul>
-    /// <p>
-    /// Term vectors are stored using two files<ul>
-    /// <li>a data file where terms, frequencies, positions, offsets and payloads
-    /// are stored,
-    /// <li>an index file, loaded into memory, used to locate specific documents in
-    /// the data file.
-    /// </ul>
+    /// compact as possible:
+    /// <list type="bullet">
+    ///     <item><description>textual data is compressed using the very light,
+    ///         <a href="http://code.google.com/p/lz4/">LZ4</a> compression algorithm,</description></item>
+    ///     <item><description>binary data is written using fixed-size blocks of
+    ///         packed <see cref="int"/>s (<see cref="Util.Packed.PackedInt32s"/>).</description></item>
+    /// </list>
+    /// <para/>
+    /// Term vectors are stored using two files
+    /// <list type="bullet">
+    ///     <item><description>a data file where terms, frequencies, positions, offsets and payloads
+    ///         are stored,</description></item>
+    ///     <item><description>an index file, loaded into memory, used to locate specific documents in
+    ///         the data file.</description></item>
+    /// </list>
     /// Looking up term vectors for any document requires at most 1 disk seek.
-    /// <p><b>File formats</b>
-    /// <ol>
-    /// <li><a name="vector_data" id="vector_data"></a>
-    /// <p>A vector data file (extension <tt>.tvd</tt>). this file stores terms,
-    /// frequencies, positions, offsets and payloads for every document. Upon writing
-    /// a new segment, it accumulates data into memory until the buffer used to store
-    /// terms and payloads grows beyond 4KB. Then it flushes all metadata, terms
-    /// and positions to disk using <a href="http://code.google.com/p/lz4/">LZ4</a>
-    /// compression for terms and payloads and
-    /// <seealso cref="BlockPackedWriter blocks of packed ints"/> for positions.</p>
-    /// <p>Here is a more detailed description of the field data file format:</p>
-    /// <ul>
-    /// <li>VectorData (.tvd) --&gt; &lt;Header&gt;, PackedIntsVersion, ChunkSize, &lt;Chunk&gt;<sup>ChunkCount</sup>, Footer</li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>PackedIntsVersion --&gt; <seealso cref="PackedInts#VERSION_CURRENT"/> as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkSize is the number of bytes of terms to accumulate before flushing, as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkCount is not known in advance and is the number of chunks necessary to store all document of the segment</li>
-    /// <li>Chunk --&gt; DocBase, ChunkDocs, &lt; NumFields &gt;, &lt; FieldNums &gt;, &lt; FieldNumOffs &gt;, &lt; Flags &gt;,
-    /// &lt; NumTerms &gt;, &lt; TermLengths &gt;, &lt; TermFreqs &gt;, &lt; Positions &gt;, &lt; StartOffsets &gt;, &lt; Lengths &gt;,
-    /// &lt; PayloadLengths &gt;, &lt; TermAndPayloads &gt;</li>
-    /// <li>DocBase is the ID of the first doc of the chunk as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkDocs is the number of documents in the chunk</li>
-    /// <li>NumFields --&gt; DocNumFields<sup>ChunkDocs</sup></li>
-    /// <li>DocNumFields is the number of fields for each doc, written as a <seealso cref="DataOutput#writeVInt VInt"/> if ChunkDocs==1 and as a <seealso cref="PackedInts"/> array otherwise</li>
-    /// <li>FieldNums --&gt; FieldNumDelta<sup>TotalDistincFields</sup>, a delta-encoded list of the sorted unique field numbers present in the chunk</li>
-    /// <li>FieldNumOffs --&gt; FieldNumOff<sup>TotalFields</sup>, as a <seealso cref="PackedInts"/> array</li>
-    /// <li>FieldNumOff is the offset of the field number in FieldNums</li>
-    /// <li>TotalFields is the total number of fields (sum of the values of NumFields)</li>
-    /// <li>Flags --&gt; Bit &lt; FieldFlags &gt;</li>
-    /// <li>Bit  is a single bit which when true means that fields have the same options for every document in the chunk</li>
-    /// <li>FieldFlags --&gt; if Bit==1: Flag<sup>TotalDistinctFields</sup> else Flag<sup>TotalFields</sup></li>
-    /// <li>Flag: a 3-bits int where:<ul>
-    /// <li>the first bit means that the field has positions</li>
-    /// <li>the second bit means that the field has offsets</li>
-    /// <li>the third bit means that the field has payloads</li>
-    /// </ul></li>
-    /// <li>NumTerms --&gt; FieldNumTerms<sup>TotalFields</sup></li>
-    /// <li>FieldNumTerms: the number of terms for each field, using <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>TermLengths --&gt; PrefixLength<sup>TotalTerms</sup> SuffixLength<sup>TotalTerms</sup></li>
-    /// <li>TotalTerms: total number of terms (sum of NumTerms)</li>
-    /// <li>PrefixLength: 0 for the first term of a field, the common prefix with the previous term otherwise using <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>SuffixLength: length of the term minus PrefixLength for every term using <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>TermFreqs --&gt; TermFreqMinus1<sup>TotalTerms</sup></li>
-    /// <li>TermFreqMinus1: (frequency - 1) for each term using  <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>Positions --&gt; PositionDelta<sup>TotalPositions</sup></li>
-    /// <li>TotalPositions is the sum of frequencies of terms of all fields that have positions</li>
-    /// <li>PositionDelta: the absolute position for the first position of a term, and the difference with the previous positions for following positions using <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>StartOffsets --&gt; (AvgCharsPerTerm<sup>TotalDistinctFields</sup>) StartOffsetDelta<sup>TotalOffsets</sup></li>
-    /// <li>TotalOffsets is the sum of frequencies of terms of all fields that have offsets</li>
-    /// <li>AvgCharsPerTerm: average number of chars per term, encoded as a float on 4 bytes. They are not present if no field has both positions and offsets enabled.</li>
-    /// <li>StartOffsetDelta: (startOffset - previousStartOffset - AvgCharsPerTerm * PositionDelta). previousStartOffset is 0 for the first offset and AvgCharsPerTerm is 0 if the field has no positions using  <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>Lengths --&gt; LengthMinusTermLength<sup>TotalOffsets</sup></li>
-    /// <li>LengthMinusTermLength: (endOffset - startOffset - termLength) using  <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>PayloadLengths --&gt; PayloadLength<sup>TotalPayloads</sup></li>
-    /// <li>TotalPayloads is the sum of frequencies of terms of all fields that have payloads</li>
-    /// <li>PayloadLength is the payload length encoded using  <seealso cref="BlockPackedWriter blocks of 64 packed ints"/></li>
-    /// <li>TermAndPayloads --&gt; LZ4-compressed representation of &lt; FieldTermsAndPayLoads &gt;<sup>TotalFields</sup></li>
-    /// <li>FieldTermsAndPayLoads --&gt; Terms (Payloads)</li>
-    /// <li>Terms: term bytes</li>
-    /// <li>Payloads: payload bytes (if the field has payloads)</li>
-    /// <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// </li>
-    /// <li><a name="vector_index" id="vector_index"></a>
-    /// <p>An index file (extension <tt>.tvx</tt>).</p>
-    /// <ul>
-    /// <li>VectorIndex (.tvx) --&gt; &lt;Header&gt;, &lt;ChunkIndex&gt;, Footer</li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>ChunkIndex: See <seealso cref="CompressingStoredFieldsIndexWriter"/></li>
-    /// <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// </li>
-    /// </ol>
+    /// <para/><b>File formats</b>
+    /// <list type="number">
+    ///     <item><description><a name="vector_data" id="vector_data"></a>
+    ///         <para>A vector data file (extension <c>.tvd</c>). this file stores terms,
+    ///         frequencies, positions, offsets and payloads for every document. Upon writing
+    ///         a new segment, it accumulates data into memory until the buffer used to store
+    ///         terms and payloads grows beyond 4KB. Then it flushes all metadata, terms
+    ///         and positions to disk using <a href="http://code.google.com/p/lz4/">LZ4</a>
+    ///         compression for terms and payloads and
+    ///         blocks of packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) for positions.</para>
+    ///         <para>Here is a more detailed description of the field data file format:</para>
+    ///         <list type="bullet">
+    ///             <item><description>VectorData (.tvd) --&gt; &lt;Header&gt;, PackedIntsVersion, ChunkSize, &lt;Chunk&gt;<sup>ChunkCount</sup>, Footer</description></item>
+    ///             <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///             <item><description>PackedIntsVersion --&gt; <see cref="Util.Packed.PackedInt32s.VERSION_CURRENT"/> as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///             <item><description>ChunkSize is the number of bytes of terms to accumulate before flushing, as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///             <item><description>ChunkCount is not known in advance and is the number of chunks necessary to store all document of the segment</description></item>
+    ///             <item><description>Chunk --&gt; DocBase, ChunkDocs, &lt; NumFields &gt;, &lt; FieldNums &gt;, &lt; FieldNumOffs &gt;, &lt; Flags &gt;,
+    ///                 &lt; NumTerms &gt;, &lt; TermLengths &gt;, &lt; TermFreqs &gt;, &lt; Positions &gt;, &lt; StartOffsets &gt;, &lt; Lengths &gt;,
+    ///                 &lt; PayloadLengths &gt;, &lt; TermAndPayloads &gt;</description></item>
+    ///             <item><description>DocBase is the ID of the first doc of the chunk as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///             <item><description>ChunkDocs is the number of documents in the chunk</description></item>
+    ///             <item><description>NumFields --&gt; DocNumFields<sup>ChunkDocs</sup></description></item>
+    ///             <item><description>DocNumFields is the number of fields for each doc, written as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) if ChunkDocs==1 and as a <see cref="Util.Packed.PackedInt32s"/> array otherwise</description></item>
+    ///             <item><description>FieldNums --&gt; FieldNumDelta<sup>TotalDistincFields</sup>, a delta-encoded list of the sorted unique field numbers present in the chunk</description></item>
+    ///             <item><description>FieldNumOffs --&gt; FieldNumOff<sup>TotalFields</sup>, as a <see cref="Util.Packed.PackedInt32s"/> array</description></item>
+    ///             <item><description>FieldNumOff is the offset of the field number in FieldNums</description></item>
+    ///             <item><description>TotalFields is the total number of fields (sum of the values of NumFields)</description></item>
+    ///             <item><description>Flags --&gt; Bit &lt; FieldFlags &gt;</description></item>
+    ///             <item><description>Bit  is a single bit which when true means that fields have the same options for every document in the chunk</description></item>
+    ///             <item><description>FieldFlags --&gt; if Bit==1: Flag<sup>TotalDistinctFields</sup> else Flag<sup>TotalFields</sup></description></item>
+    ///             <item><description>Flag: a 3-bits int where:
+    ///                 <list type="bullet">
+    ///                     <item><description>the first bit means that the field has positions</description></item>
+    ///                     <item><description>the second bit means that the field has offsets</description></item>
+    ///                     <item><description>the third bit means that the field has payloads</description></item>
+    ///                 </list>
+    ///             </description></item>
+    ///             <item><description>NumTerms --&gt; FieldNumTerms<sup>TotalFields</sup></description></item>
+    ///             <item><description>FieldNumTerms: the number of terms for each field, using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>TermLengths --&gt; PrefixLength<sup>TotalTerms</sup> SuffixLength<sup>TotalTerms</sup></description></item>
+    ///             <item><description>TotalTerms: total number of terms (sum of NumTerms)</description></item>
+    ///             <item><description>PrefixLength: 0 for the first term of a field, the common prefix with the previous term otherwise using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>SuffixLength: length of the term minus PrefixLength for every term using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>TermFreqs --&gt; TermFreqMinus1<sup>TotalTerms</sup></description></item>
+    ///             <item><description>TermFreqMinus1: (frequency - 1) for each term using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>Positions --&gt; PositionDelta<sup>TotalPositions</sup></description></item>
+    ///             <item><description>TotalPositions is the sum of frequencies of terms of all fields that have positions</description></item>
+    ///             <item><description>PositionDelta: the absolute position for the first position of a term, and the difference with the previous positions for following positions using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>StartOffsets --&gt; (AvgCharsPerTerm<sup>TotalDistinctFields</sup>) StartOffsetDelta<sup>TotalOffsets</sup></description></item>
+    ///             <item><description>TotalOffsets is the sum of frequencies of terms of all fields that have offsets</description></item>
+    ///             <item><description>AvgCharsPerTerm: average number of chars per term, encoded as a float on 4 bytes. They are not present if no field has both positions and offsets enabled.</description></item>
+    ///             <item><description>StartOffsetDelta: (startOffset - previousStartOffset - AvgCharsPerTerm * PositionDelta). previousStartOffset is 0 for the first offset and AvgCharsPerTerm is 0 if the field has no positions using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>Lengths --&gt; LengthMinusTermLength<sup>TotalOffsets</sup></description></item>
+    ///             <item><description>LengthMinusTermLength: (endOffset - startOffset - termLength) using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>PayloadLengths --&gt; PayloadLength<sup>TotalPayloads</sup></description></item>
+    ///             <item><description>TotalPayloads is the sum of frequencies of terms of all fields that have payloads</description></item>
+    ///             <item><description>PayloadLength is the payload length encoded using blocks of 64 packed <see cref="int"/>s (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///             <item><description>TermAndPayloads --&gt; LZ4-compressed representation of &lt; FieldTermsAndPayLoads &gt;<sup>TotalFields</sup></description></item>
+    ///             <item><description>FieldTermsAndPayLoads --&gt; Terms (Payloads)</description></item>
+    ///             <item><description>Terms: term bytes</description></item>
+    ///             <item><description>Payloads: payload bytes (if the field has payloads)</description></item>
+    ///             <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    ///         </list>
+    ///     </description></item>
+    ///     <item><description><a name="vector_index" id="vector_index"></a>
+    ///         <para>An index file (extension <c>.tvx</c>).</para>
+    ///         <list type="bullet">
+    ///             <item><description>VectorIndex (.tvx) --&gt; &lt;Header&gt;, &lt;ChunkIndex&gt;, Footer</description></item>
+    ///             <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///             <item><description>ChunkIndex: See <see cref="Compressing.CompressingStoredFieldsIndexWriter"/></description></item>
+    ///             <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    ///         </list>
+    ///     </description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class Lucene42TermVectorsFormat : CompressingTermVectorsFormat


[25/48] lucenenet git commit: Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/WAH8DocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/WAH8DocIdSet.cs b/src/Lucene.Net/Util/WAH8DocIdSet.cs
index 2641a41..b5abda8 100644
--- a/src/Lucene.Net/Util/WAH8DocIdSet.cs
+++ b/src/Lucene.Net/Util/WAH8DocIdSet.cs
@@ -29,48 +29,48 @@ namespace Lucene.Net.Util
     using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
 
     /// <summary>
-    /// <seealso cref="DocIdSet"/> implementation based on word-aligned hybrid encoding on
+    /// <see cref="DocIdSet"/> implementation based on word-aligned hybrid encoding on
     /// words of 8 bits.
-    /// <p>this implementation doesn't support random-access but has a fast
-    /// <seealso cref="DocIdSetIterator"/> which can advance in logarithmic time thanks to
-    /// an index.</p>
-    /// <p>The compression scheme is simplistic and should work well with sparse and
+    /// <para>This implementation doesn't support random-access but has a fast
+    /// <see cref="DocIdSetIterator"/> which can advance in logarithmic time thanks to
+    /// an index.</para>
+    /// <para>The compression scheme is simplistic and should work well with sparse and
     /// very dense doc id sets while being only slightly larger than a
-    /// <seealso cref="FixedBitSet"/> for incompressible sets (overhead&lt;2% in the worst
-    /// case) in spite of the index.</p>
-    /// <p><b>Format</b>: The format is byte-aligned. An 8-bits word is either clean,
+    /// <see cref="FixedBitSet"/> for incompressible sets (overhead&lt;2% in the worst
+    /// case) in spite of the index.</para>
+    /// <para><b>Format</b>: The format is byte-aligned. An 8-bits word is either clean,
     /// meaning composed only of zeros or ones, or dirty, meaning that it contains
     /// between 1 and 7 bits set. The idea is to encode sequences of clean words
-    /// using run-length encoding and to leave sequences of dirty words as-is.</p>
-    /// <table>
-    ///   <tr><th>Token</th><th>Clean length+</th><th>Dirty length+</th><th>Dirty words</th></tr>
-    ///   <tr><td>1 byte</td><td>0-n bytes</td><td>0-n bytes</td><td>0-n bytes</td></tr>
-    /// </table>
-    /// <ul>
-    ///   <li><b>Token</b> encodes whether clean means full of zeros or ones in the
-    /// first bit, the number of clean words minus 2 on the next 3 bits and the
-    /// number of dirty words on the last 4 bits. The higher-order bit is a
-    /// continuation bit, meaning that the number is incomplete and needs additional
-    /// bytes to be read.</li>
-    ///   <li><b>Clean length+</b>: If clean length has its higher-order bit set,
-    /// you need to read a <seealso cref="DataInput#readVInt() vint"/>, shift it by 3 bits on
-    /// the left side and add it to the 3 bits which have been read in the token.</li>
-    ///   <li><b>Dirty length+</b> works the same way as <b>Clean length+</b> but
-    /// on 4 bits and for the length of dirty words.</li>
-    ///   <li><b>Dirty words</b> are the dirty words, there are <b>Dirty length</b>
-    /// of them.</li>
-    /// </ul>
-    /// <p>this format cannot encode sequences of less than 2 clean words and 0 dirty
+    /// using run-length encoding and to leave sequences of dirty words as-is.</para>
+    /// <list type="table">
+    ///     <listheader><term>Token</term><term>Clean length+</term><term>Dirty length+</term><term>Dirty words</term></listheader>
+    ///     <item><term>1 byte</term><term>0-n bytes</term><term>0-n bytes</term><term>0-n bytes</term></item>
+    /// </list>
+    /// <list type="bullet">
+    ///     <item><term><b>Token</b></term><description> encodes whether clean means full of zeros or ones in the
+    ///         first bit, the number of clean words minus 2 on the next 3 bits and the
+    ///         number of dirty words on the last 4 bits. The higher-order bit is a
+    ///         continuation bit, meaning that the number is incomplete and needs additional
+    ///         bytes to be read.</description></item>
+    ///     <item><term><b>Clean length+</b>:</term><description> If clean length has its higher-order bit set,
+    ///         you need to read a vint (<see cref="Store.DataInput.ReadVInt32()"/>), shift it by 3 bits on
+    ///         the left side and add it to the 3 bits which have been read in the token.</description></item>
+    ///     <item><term><b>Dirty length+</b></term><description> works the same way as <b>Clean length+</b> but
+    ///         on 4 bits and for the length of dirty words.</description></item>
+    ///     <item><term><b>Dirty words</b></term><description>are the dirty words, there are <b>Dirty length</b>
+    ///         of them.</description></item>
+    /// </list>
+    /// <para>This format cannot encode sequences of less than 2 clean words and 0 dirty
     /// word. The reason is that if you find a single clean word, you should rather
-    /// encode it as a dirty word. this takes the same space as starting a new
+    /// encode it as a dirty word. This takes the same space as starting a new
     /// sequence (since you need one byte for the token) but will be lighter to
     /// decode. There is however an exception for the first sequence. Since the first
     /// sequence may start directly with a dirty word, the clean length is encoded
-    /// directly, without subtracting 2.</p>
-    /// <p>There is an additional restriction on the format: the sequence of dirty
-    /// words is not allowed to contain two consecutive clean words. this restriction
+    /// directly, without subtracting 2.</para>
+    /// <para>There is an additional restriction on the format: the sequence of dirty
+    /// words is not allowed to contain two consecutive clean words. This restriction
     /// exists to make sure no space is wasted and to make sure iterators can read
-    /// the next doc ID by reading at most 2 dirty words.</p>
+    /// the next doc ID by reading at most 2 dirty words.</para>
     /// @lucene.experimental
     /// </summary>
     public sealed class WAH8DocIdSet : DocIdSet
@@ -110,14 +110,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Same as <seealso cref="#intersect(Collection, int)"/> with the default index interval. </summary>
+        /// Same as <see cref="Intersect(ICollection{WAH8DocIdSet}, int)"/> with the default index interval. </summary>
         public static WAH8DocIdSet Intersect(ICollection<WAH8DocIdSet> docIdSets)
         {
             return Intersect(docIdSets, DEFAULT_INDEX_INTERVAL);
         }
 
         /// <summary>
-        /// Compute the intersection of the provided sets. this method is much faster than
+        /// Compute the intersection of the provided sets. This method is much faster than
         /// computing the intersection manually since it operates directly at the byte level.
         /// </summary>
         public static WAH8DocIdSet Intersect(ICollection<WAH8DocIdSet> docIdSets, int indexInterval)
@@ -184,14 +184,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Same as <seealso cref="#union(Collection, int)"/> with the default index interval. </summary>
+        /// Same as <see cref="Union(ICollection{WAH8DocIdSet}, int)"/> with the default index interval. </summary>
         public static WAH8DocIdSet Union(ICollection<WAH8DocIdSet> docIdSets)
         {
             return Union(docIdSets, DEFAULT_INDEX_INTERVAL);
         }
 
         /// <summary>
-        /// Compute the union of the provided sets. this method is much faster than
+        /// Compute the union of the provided sets. This method is much faster than
         /// computing the union manually since it operates directly at the byte level.
         /// </summary>
         public static WAH8DocIdSet Union(ICollection<WAH8DocIdSet> docIdSets, int indexInterval)
@@ -292,12 +292,12 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Set the index interval. Smaller index intervals improve performance of
-            ///  <seealso cref="DocIdSetIterator#advance(int)"/> but make the <seealso cref="DocIdSet"/>
-            ///  larger. An index interval <code>i</code> makes the index add an overhead
-            ///  which is at most <code>4/i</code>, but likely much less.The default index
-            ///  interval is <code>8</code>, meaning the index has an overhead of at most
-            ///  50%. To disable indexing, you can pass <see cref="int.MaxValue"/> as an
-            ///  index interval.
+            /// <see cref="DocIdSetIterator.Advance(int)"/> but make the <see cref="DocIdSet"/>
+            /// larger. An index interval <c>i</c> makes the index add an overhead
+            /// which is at most <c>4/i</c>, but likely much less. The default index
+            /// interval is <c>8</c>, meaning the index has an overhead of at most
+            /// 50%. To disable indexing, you can pass <see cref="int.MaxValue"/> as an
+            /// index interval.
             /// </summary>
             public virtual object SetIndexInterval(int indexInterval)
             {
@@ -454,7 +454,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Build a new <seealso cref="WAH8DocIdSet"/>. </summary>
+            /// Build a new <see cref="WAH8DocIdSet"/>. </summary>
             public virtual WAH8DocIdSet Build()
             {
                 if (cardinality == 0)
@@ -509,7 +509,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A builder for <seealso cref="WAH8DocIdSet"/>s. </summary>
+        /// A builder for <see cref="WAH8DocIdSet"/>s. </summary>
         public sealed class Builder : WordBuilder
         {
             private int lastDocID;
@@ -554,7 +554,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Add the content of the provided <seealso cref="DocIdSetIterator"/>. </summary>
+            /// Add the content of the provided <see cref="DocIdSetIterator"/>. </summary>
             public Builder Add(DocIdSetIterator disi)
             {
                 for (int doc = disi.NextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = disi.NextDoc())
@@ -893,7 +893,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Return the number of documents in this <seealso cref="DocIdSet"/> in constant time. </summary>
+        /// Return the number of documents in this <see cref="DocIdSet"/> in constant time. </summary>
         public int Cardinality()
         {
             return cardinality;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/WeakIdentityMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/WeakIdentityMap.cs b/src/Lucene.Net/Util/WeakIdentityMap.cs
index 2999d94..a1ba475 100644
--- a/src/Lucene.Net/Util/WeakIdentityMap.cs
+++ b/src/Lucene.Net/Util/WeakIdentityMap.cs
@@ -24,38 +24,38 @@ namespace Lucene.Net.Util
 	 */
 
     /// <summary>
-    /// Implements a combination of <seealso cref="java.util.WeakHashMap"/> and
-    /// <seealso cref="java.util.IdentityHashMap"/>.
-    /// Useful for caches that need to key off of a {@code ==} comparison
-    /// instead of a {@code .equals}.
+    /// Implements a combination of <c>java.util.WeakHashMap</c> and
+    /// <c>java.util.IdentityHashMap</c>.
+    /// Useful for caches that need to key off of a <c>==</c> comparison
+    /// instead of a <c>.Equals(object)</c>.
     ///
-    /// <p>this class is not a general-purpose <seealso cref="java.util.Map"/>
+    /// <para/>This class is not a general-purpose <see cref="IDictionary{TKey, TValue}"/>
     /// implementation! It intentionally violates
-    /// Map's general contract, which mandates the use of the equals method
-    /// when comparing objects. this class is designed for use only in the
+    /// <see cref="IDictionary{TKey, TValue}"/>'s general contract, which mandates the use of the <see cref="object.Equals(object)"/> method
+    /// when comparing objects. This class is designed for use only in the
     /// rare cases wherein reference-equality semantics are required.
     ///
-    /// <p>this implementation was forked from <a href="http://cxf.apache.org/">Apache CXF</a>
-    /// but modified to <b>not</b> implement the <seealso cref="java.util.Map"/> interface and
+    /// <para/>This implementation was forked from <a href="http://cxf.apache.org/">Apache CXF</a>
+    /// but modified to <b>not</b> implement the <see cref="IDictionary{TKey, TValue}"/> interface and
     /// without any set views on it, as those are error-prone and inefficient,
-    /// if not implemented carefully. The map only contains <seealso cref="Iterator"/> implementations
-    /// on the values and not-GCed keys. Lucene's implementation also supports {@code null}
+    /// if not implemented carefully. The map only contains <see cref="IEnumerable{T}.GetEnumerator()"/> implementations
+    /// on the values and not-GCed keys. Lucene's implementation also supports <c>null</c>
     /// keys, but those are never weak!
     ///
-    /// <p><a name="reapInfo" />The map supports two modes of operation:
-    /// <ul>
-    ///  <li>{@code reapOnRead = true}: this behaves identical to a <seealso cref="java.util.WeakHashMap"/>
-    ///  where it also cleans up the reference queue on every read operation (<seealso cref="#get(Object)"/>,
-    ///  <seealso cref="#containsKey(Object)"/>, <seealso cref="#size()"/>, <seealso cref="#valueIterator()"/>), freeing map entries
-    ///  of already GCed keys.</li>
-    ///  <li>{@code reapOnRead = false}: this mode does not call <seealso cref="#reap()"/> on every read
-    ///  operation. In this case, the reference queue is only cleaned up on write operations
-    ///  (like <seealso cref="#put(Object, Object)"/>). this is ideal for maps with few entries where
-    ///  the keys are unlikely be garbage collected, but there are lots of <seealso cref="#get(Object)"/>
-    ///  operations. The code can still call <seealso cref="#reap()"/> to manually clean up the queue without
-    ///  doing a write operation.</li>
-    /// </ul>
-    ///
+    /// <para/><a name="reapInfo" />The map supports two modes of operation:
+    /// <list type="bullet">
+    ///     <item><term><c>reapOnRead = true</c>:</term><description> This behaves identical to a <c>java.util.WeakHashMap</c>
+    ///         where it also cleans up the reference queue on every read operation (<see cref="Get(object)"/>,
+    ///         <see cref="ContainsKey(object)"/>, <see cref="Count"/>, <see cref="GetValueEnumerator()"/>), freeing map entries
+    ///         of already GCed keys.</description></item>
+    ///     <item><term><c>reapOnRead = false</c>:</term><description> This mode does not call <see cref="Reap()"/> on every read
+    ///         operation. In this case, the reference queue is only cleaned up on write operations
+    ///         (like <see cref="Put(TKey, TValue)"/>). This is ideal for maps with few entries where
+    ///         the keys are unlikely be garbage collected, but there are lots of <see cref="Get(object)"/>
+    ///         operations. The code can still call <see cref="Reap()"/> to manually clean up the queue without
+    ///         doing a write operation.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class WeakIdentityMap<TKey, TValue>
@@ -66,7 +66,7 @@ namespace Lucene.Net.Util
         private readonly bool reapOnRead;
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a non-synchronized <seealso cref="HashMap"/>.
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a non-synchronized <see cref="Dictionary{TKey, TValue}"/>.
         /// The map <a href="#reapInfo">cleans up the reference queue on every read operation</a>.
         /// </summary>
         public static WeakIdentityMap<TKey, TValue> NewHashMap()
@@ -75,7 +75,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a non-synchronized <seealso cref="HashMap"/>. </summary>
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a non-synchronized <see cref="Dictionary{TKey, TValue}"/>. </summary>
         /// <param name="reapOnRead"> controls if the map <a href="#reapInfo">cleans up the reference queue on every read operation</a>. </param>
         public static WeakIdentityMap<TKey, TValue> NewHashMap(bool reapOnRead)
         {
@@ -83,7 +83,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a <seealso cref="ConcurrentHashMap"/>.
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a <see cref="ConcurrentDictionary{TKey, TValue}"/>.
         /// The map <a href="#reapInfo">cleans up the reference queue on every read operation</a>.
         /// </summary>
         public static WeakIdentityMap<TKey, TValue> NewConcurrentHashMap()
@@ -92,7 +92,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a <seealso cref="ConcurrentHashMap"/>. </summary>
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a <see cref="ConcurrentDictionary{TKey, TValue}"/>. </summary>
         /// <param name="reapOnRead"> controls if the map <a href="#reapInfo">cleans up the reference queue on every read operation</a>. </param>
         public static WeakIdentityMap<TKey, TValue> NewConcurrentHashMap(bool reapOnRead)
         {
@@ -116,7 +116,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns {@code true} if this map contains a mapping for the specified key. </summary>
+        /// Returns <c>true</c> if this map contains a mapping for the specified key. </summary>
         public bool ContainsKey(object key)
         {
             if (reapOnRead)
@@ -157,7 +157,10 @@ namespace Lucene.Net.Util
             return backingStore[new IdentityWeakReference(key)] = value;
         }
 
-        public IEnumerable<TKey> Keys
+        /// <summary>
+        /// Gets an <see cref="ICollection{TKey}"/> object containing the keys of the <see cref="WeakIdentityMap{TKey, TValue}"/>.
+        /// </summary>
+        public IEnumerable<TKey> Keys // LUCENENET TODO: API - change to ICollection<T>
         {
             get
             {
@@ -193,7 +196,10 @@ namespace Lucene.Net.Util
             }
         }
 
-        public IEnumerable<TValue> Values
+        /// <summary>
+        /// Gets an <see cref="ICollection{TKey}"/> object containing the values of the <see cref="WeakIdentityMap{TKey, TValue}"/>.
+        /// </summary>
+        public IEnumerable<TValue> Values // LUCENENET TODO: API - change to ICollection<T>
         {
             get
             {
@@ -203,7 +209,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns {@code true} if this map contains no key-value mappings. </summary>
+        /// Returns <c>true</c> if this map contains no key-value mappings. </summary>
         public bool IsEmpty
         {
             get
@@ -215,8 +221,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Removes the mapping for a key from this weak hash map if it is present.
         /// Returns the value to which this map previously associated the key,
-        /// or {@code null} if the map contained no mapping for the key.
-        /// A return value of {@code null} does not necessarily indicate that
+        /// or <c>null</c> if the map contained no mapping for the key.
+        /// A return value of <c>null</c> does not necessarily indicate that
         /// the map contained.
         /// </summary>
         public bool Remove(object key)
@@ -226,9 +232,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of key-value mappings in this map. this result is a snapshot,
+        /// Returns the number of key-value mappings in this map. This result is a snapshot,
         /// and may not reflect unprocessed entries that will be removed before next
         /// attempted access because they are no longer referenced.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -308,9 +315,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns an iterator over all values of this map.
-        /// this iterator may return values whose key is already
+        /// This iterator may return values whose key is already
         /// garbage collected while iterator is consumed,
-        /// especially if {@code reapOnRead} is {@code false}.
+        /// especially if <see cref="reapOnRead"/> is <c>false</c>.
         /// <para/>
         /// NOTE: This was valueIterator() in Lucene.
         /// </summary>
@@ -324,11 +331,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method manually cleans up the reference queue to remove all garbage
+        /// This method manually cleans up the reference queue to remove all garbage
         /// collected key/value pairs from the map. Calling this method is not needed
-        /// if {@code reapOnRead = true}. Otherwise it might be a good idea
-        /// to call this method when there is spare time (e.g. from a background thread). </summary>
-        /// <seealso cref= <a href="#reapInfo">Information about the <code>reapOnRead</code> setting</a> </seealso>			
+        /// if <c>reapOnRead = true</c>. Otherwise it might be a good idea
+        /// to call this method when there is spare time (e.g. from a background thread). 
+        /// <a href="#reapInfo">Information about the <c>reapOnRead</c> setting</a>		
+        /// </summary>
         public void Reap()
         {
             List<IdentityWeakReference> keysToRemove = null;


[07/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/HitQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/HitQueue.cs b/src/Lucene.Net/Search/HitQueue.cs
index dd67264..23c67bf 100644
--- a/src/Lucene.Net/Search/HitQueue.cs
+++ b/src/Lucene.Net/Search/HitQueue.cs
@@ -27,43 +27,45 @@ namespace Lucene.Net.Search
     internal sealed class HitQueue : PriorityQueue<ScoreDoc>
     {
         /// <summary>
-        /// Creates a new instance with <code>size</code> elements. If
-        /// <code>prePopulate</code> is set to true, the queue will pre-populate itself
-        /// with sentinel objects and set its <seealso cref="#size()"/> to <code>size</code>. In
-        /// that case, you should not rely on <seealso cref="#size()"/> to get the number of
-        /// actual elements that were added to the queue, but keep track yourself.<br>
-        /// <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop
+        /// Creates a new instance with <paramref name="size"/> elements. If
+        /// <paramref name="prePopulate"/> is set to <c>true</c>, the queue will pre-populate itself
+        /// with sentinel objects and set its <see cref="PriorityQueue{T}.Count"/> to <paramref name="size"/>. In
+        /// that case, you should not rely on <see cref="PriorityQueue{T}.Count"/> to get the number of
+        /// actual elements that were added to the queue, but keep track yourself.
+        /// <para/>
+        /// <b>NOTE:</b> in case <paramref name="prePopulate"/> is <c>true</c>, you should pop
         /// elements from the queue using the following code example:
         ///
-        /// <pre class="prettyprint">
+        /// <code>
         /// PriorityQueue&lt;ScoreDoc&gt; pq = new HitQueue(10, true); // pre-populate.
-        /// ScoreDoc top = pq.top();
+        /// ScoreDoc top = pq.Top;
         ///
         /// // Add/Update one element.
-        /// top.score = 1.0f;
-        /// top.doc = 0;
-        /// top = (ScoreDoc) pq.updateTop();
+        /// top.Score = 1.0f;
+        /// top.Soc = 0;
+        /// top = (ScoreDoc) pq.UpdateTop();
         /// int totalHits = 1;
         ///
         /// // Now pop only the elements that were *truly* inserted.
-        /// // First, pop all the sentinel elements (there are pq.size() - totalHits).
-        /// for (int i = pq.size() - totalHits; i &gt; 0; i--) pq.pop();
+        /// // First, pop all the sentinel elements (there are pq.Count - totalHits).
+        /// for (int i = pq.Count - totalHits; i &gt; 0; i--) pq.Pop();
         ///
         /// // Now pop the truly added elements.
         /// ScoreDoc[] results = new ScoreDoc[totalHits];
-        /// for (int i = totalHits - 1; i &gt;= 0; i--) {
-        ///   results[i] = (ScoreDoc) pq.pop();
+        /// for (int i = totalHits - 1; i &gt;= 0; i--) 
+        /// {
+        ///     results[i] = (ScoreDoc)pq.Pop();
         /// }
-        /// </pre>
+        /// </code>
         ///
-        /// <p><b>NOTE</b>: this class pre-allocate a full array of
-        /// length <code>size</code>.
+        /// <para/><b>NOTE</b>: this class pre-allocate a full array of
+        /// length <paramref name="size"/>.
         /// </summary>
         /// <param name="size">
-        ///          the requested size of this queue. </param>
+        ///          The requested size of this queue. </param>
         /// <param name="prePopulate">
-        ///          specifies whether to pre-populate the queue with sentinel values. </param>
-        /// <seealso cref= #getSentinelObject() </seealso>
+        ///          Specifies whether to pre-populate the queue with sentinel values. </param>
+        /// <seealso cref="GetSentinelObject()"/>
         internal HitQueue(int size, bool prePopulate)
             : base(size, prePopulate)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/IMaxNonCompetitiveBoostAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/IMaxNonCompetitiveBoostAttribute.cs b/src/Lucene.Net/Search/IMaxNonCompetitiveBoostAttribute.cs
deleted file mode 100644
index 4b2a2d0..0000000
--- a/src/Lucene.Net/Search/IMaxNonCompetitiveBoostAttribute.cs
+++ /dev/null
@@ -1,46 +0,0 @@
-using Lucene.Net.Util;
-using BytesRef = Lucene.Net.Util.BytesRef;
-
-namespace Lucene.Net.Search
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    /// <summary>
-    /// Add this <seealso cref="Attribute"/> to a fresh <seealso cref="AttributeSource"/> before calling
-    /// <seealso cref="MultiTermQuery#getTermsEnum(Terms,AttributeSource)"/>.
-    /// <seealso cref="FuzzyQuery"/> is using this to control its internal behaviour
-    /// to only return competitive terms.
-    /// <p><b>Please note:</b> this attribute is intended to be added by the <seealso cref="MultiTermQuery.RewriteMethod"/>
-    /// to an empty <seealso cref="AttributeSource"/> that is shared for all segments
-    /// during query rewrite. this attribute source is passed to all segment enums
-    /// on <seealso cref="MultiTermQuery#getTermsEnum(Terms,AttributeSource)"/>.
-    /// <seealso cref="TopTermsRewrite"/> uses this attribute to
-    /// inform all enums about the current boost, that is not competitive.
-    /// @lucene.internal
-    /// </summary>
-    public interface IMaxNonCompetitiveBoostAttribute : IAttribute
-    {
-        /// <summary>
-        /// this is the maximum boost that would not be competitive. </summary>
-        float MaxNonCompetitiveBoost { set; get; }
-
-        /// <summary>
-        /// this is the term or <code>null</code> of the term that triggered the boost change. </summary>
-        BytesRef CompetitiveTerm { set; get; }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ITopTermsRewrite.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ITopTermsRewrite.cs b/src/Lucene.Net/Search/ITopTermsRewrite.cs
deleted file mode 100644
index 747fc00..0000000
--- a/src/Lucene.Net/Search/ITopTermsRewrite.cs
+++ /dev/null
@@ -1,24 +0,0 @@
-namespace Lucene.Net.Search
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    internal interface ITopTermsRewrite
-    {
-        int Count { get; } // LUCENENET NOTE: This was size() in Lucene.
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/IndexSearcher.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/IndexSearcher.cs b/src/Lucene.Net/Search/IndexSearcher.cs
index 89bf357..e7acb6e 100644
--- a/src/Lucene.Net/Search/IndexSearcher.cs
+++ b/src/Lucene.Net/Search/IndexSearcher.cs
@@ -40,30 +40,30 @@ namespace Lucene.Net.Search
     using Terms = Lucene.Net.Index.Terms;
 
     /// <summary>
-    /// Implements search over a single IndexReader.
+    /// Implements search over a single <see cref="Index.IndexReader"/>.
     ///
-    /// <p>Applications usually need only call the inherited
-    /// <seealso cref="#search(Query,int)"/>
-    /// or <seealso cref="#search(Query,Filter,int)"/> methods. For
+    /// <para/>Applications usually need only call the inherited
+    /// <see cref="Search(Query,int)"/>
+    /// or <see cref="Search(Query,Filter,int)"/> methods. For
     /// performance reasons, if your index is unchanging, you
-    /// should share a single IndexSearcher instance across
+    /// should share a single <see cref="IndexSearcher"/> instance across
     /// multiple searches instead of creating a new one
     /// per-search.  If your index has changed and you wish to
     /// see the changes reflected in searching, you should
-    /// use <seealso cref="DirectoryReader#openIfChanged(DirectoryReader)"/>
+    /// use <see cref="Index.DirectoryReader.OpenIfChanged(Index.DirectoryReader)"/>
     /// to obtain a new reader and
-    /// then create a new IndexSearcher from that.  Also, for
+    /// then create a new <see cref="IndexSearcher"/> from that.  Also, for
     /// low-latency turnaround it's best to use a near-real-time
-    /// reader (<seealso cref="DirectoryReader#open(IndexWriter,boolean)"/>).
-    /// Once you have a new <seealso cref="IndexReader"/>, it's relatively
-    /// cheap to create a new IndexSearcher from it.
+    /// reader (<see cref="Index.DirectoryReader.Open(Index.IndexWriter,bool)"/>).
+    /// Once you have a new <see cref="Index.IndexReader"/>, it's relatively
+    /// cheap to create a new <see cref="IndexSearcher"/> from it.
     ///
-    /// <a name="thread-safety"></a><p><b>NOTE</b>: <code>{@link
-    /// IndexSearcher}</code> instances are completely
+    /// <para/><a name="thread-safety"></a><p><b>NOTE</b>: 
+    /// <see cref="IndexSearcher"/> instances are completely
     /// thread safe, meaning multiple threads can call any of its
     /// methods, concurrently.  If your application requires
     /// external synchronization, you should <b>not</b>
-    /// synchronize on the <code>IndexSearcher</code> instance;
+    /// synchronize on the <see cref="IndexSearcher"/> instance;
     /// use your own (non-Lucene) objects instead.</p>
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -80,7 +80,7 @@ namespace Lucene.Net.Search
         protected internal readonly IList<AtomicReaderContext> m_leafContexts;
 
         /// <summary>
-        /// used with executor - each slice holds a set of leafs executed within one thread </summary>
+        /// Used with executor - each slice holds a set of leafs executed within one thread </summary>
         protected readonly LeafSlice[] m_leafSlices;
 
         // These are only used for multi-threaded search
@@ -90,10 +90,11 @@ namespace Lucene.Net.Search
         private static readonly Similarity defaultSimilarity = new DefaultSimilarity();
 
         /// <summary>
-        /// Expert: returns a default Similarity instance.
+        /// Expert: returns a default <see cref="Similarities.Similarity"/> instance.
         /// In general, this method is only called to initialize searchers and writers.
         /// User code and query implementations should respect
-        /// <seealso cref="IndexSearcher#getSimilarity()"/>.
+        /// <see cref="IndexSearcher.Similarity"/>.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public static Similarity DefaultSimilarity
@@ -105,7 +106,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// The Similarity implementation used by this searcher. </summary>
+        /// The <see cref="Similarities.Similarity"/> implementation used by this searcher. </summary>
         private Similarity similarity = defaultSimilarity;
 
         /// <summary>
@@ -117,15 +118,10 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Runs searches for each segment separately, using the
-        ///  provided ExecutorService.  IndexSearcher will not
-        ///  shutdown/awaitTermination this ExecutorService on
-        ///  close; you must do so, eventually, on your own.  NOTE:
-        ///  if you are using <seealso cref="NIOFSDirectory"/>, do not use
-        ///  the shutdownNow method of ExecutorService as this uses
-        ///  Thread.interrupt under-the-hood which can silently
-        ///  close file descriptors (see <a
-        ///  href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
-        ///
+        /// provided <see cref="TaskScheduler"/>.  <see cref="IndexSearcher"/> will not
+        /// shutdown/awaitTermination this <see cref="TaskScheduler"/> on
+        /// dispose; you must do so, eventually, on your own.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public IndexSearcher(IndexReader r, TaskScheduler executor)
@@ -134,20 +130,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a searcher searching the provided top-level <seealso cref="IndexReaderContext"/>.
-        /// <p>
-        /// Given a non-<code>null</code> <seealso cref="ExecutorService"/> this method runs
-        /// searches for each segment separately, using the provided ExecutorService.
-        /// IndexSearcher will not shutdown/awaitTermination this ExecutorService on
-        /// close; you must do so, eventually, on your own. NOTE: if you are using
-        /// <seealso cref="NIOFSDirectory"/>, do not use the shutdownNow method of
-        /// ExecutorService as this uses Thread.interrupt under-the-hood which can
-        /// silently close file descriptors (see <a
-        /// href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
+        /// Creates a searcher searching the provided top-level <see cref="IndexReaderContext"/>.
+        /// <para/>
+        /// Given a non-<c>null</c> <see cref="TaskScheduler"/> this method runs
+        /// searches for each segment separately, using the provided <see cref="TaskScheduler"/>.
+        /// <see cref="IndexSearcher"/> will not shutdown/awaitTermination this <see cref="TaskScheduler"/> on
+        /// close; you must do so, eventually, on your own.
+        /// <para/>
+        /// @lucene.experimental 
         /// </summary>
-        /// <seealso cref= IndexReaderContext </seealso>
-        /// <seealso cref= IndexReader#getContext()
-        /// @lucene.experimental </seealso>
+        /// <seealso cref="IndexReaderContext"/>
+        /// <seealso cref="IndexReader.Context"/>
         public IndexSearcher(IndexReaderContext context, TaskScheduler executor)
         {
             Debug.Assert(context.IsTopLevel, "IndexSearcher's ReaderContext must be topLevel for reader" + context.Reader);
@@ -159,11 +152,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a searcher searching the provided top-level <seealso cref="IndexReaderContext"/>.
+        /// Creates a searcher searching the provided top-level <see cref="IndexReaderContext"/>.
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
-        /// <seealso cref= IndexReaderContext </seealso>
-        /// <seealso cref= IndexReader#getContext()
-        /// @lucene.experimental </seealso>
+        /// <seealso cref="IndexReaderContext"/>
+        /// <seealso cref="IndexReader.Context"/>
         public IndexSearcher(IndexReaderContext context)
             : this(context, null)
         {
@@ -171,8 +165,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: Creates an array of leaf slices each holding a subset of the given leaves.
-        /// Each <seealso cref="LeafSlice"/> is executed in a single thread. By default there
-        /// will be one <seealso cref="LeafSlice"/> per leaf (<seealso cref="AtomicReaderContext"/>).
+        /// Each <see cref="LeafSlice"/> is executed in a single thread. By default there
+        /// will be one <see cref="LeafSlice"/> per leaf (<see cref="AtomicReaderContext"/>).
         /// </summary>
         protected virtual LeafSlice[] Slices(IList<AtomicReaderContext> leaves)
         {
@@ -185,7 +179,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Return the <seealso cref="IndexReader"/> this searches. </summary>
+        /// Return the <see cref="Index.IndexReader"/> this searches. </summary>
         public virtual IndexReader IndexReader
         {
             get
@@ -195,30 +189,30 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Sugar for <code>.getIndexReader().document(docID)</code> </summary>
-        /// <seealso cref= IndexReader#document(int)  </seealso>
+        /// Sugar for <code>.IndexReader.Document(docID)</code> </summary>
+        /// <seealso cref="IndexReader.Document(int)"/>
         public virtual Document Doc(int docID)
         {
             return reader.Document(docID);
         }
 
         /// <summary>
-        /// Sugar for <code>.getIndexReader().document(docID, fieldVisitor)</code> </summary>
-        /// <seealso cref= IndexReader#document(int, StoredFieldVisitor)  </seealso>
+        /// Sugar for <code>.IndexReader.Document(docID, fieldVisitor)</code> </summary>
+        /// <seealso cref="IndexReader.Document(int, StoredFieldVisitor)"/>
         public virtual void Doc(int docID, StoredFieldVisitor fieldVisitor)
         {
             reader.Document(docID, fieldVisitor);
         }
 
         /// <summary>
-        /// Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code> </summary>
-        /// <seealso cref= IndexReader#document(int, Set)  </seealso>
+        /// Sugar for <code>.IndexReader.Document(docID, fieldsToLoad)</code> </summary>
+        /// <seealso cref="IndexReader.Document(int, ISet{string})"/>
         public virtual Document Doc(int docID, ISet<string> fieldsToLoad)
         {
             return reader.Document(docID, fieldsToLoad);
         }
 
-        /// @deprecated Use <seealso cref="#doc(int, Set)"/> instead.
+        /// @deprecated Use <see cref="Doc(int, ISet{string})"/> instead.
         [Obsolete("Use <seealso cref=#doc(int, java.util.Set)/> instead.")]
         public Document Document(int docID, ISet<string> fieldsToLoad)
         {
@@ -226,8 +220,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: Set the Similarity implementation used by this IndexSearcher.
-        ///
+        /// Expert: Set the <see cref="Similarities.Similarity"/> implementation used by this IndexSearcher.
         /// </summary>
         public virtual Similarity Similarity
         {
@@ -249,54 +242,54 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code> where all results are after a previous
-        /// result (<code>after</code>).
-        /// <p>
-        /// By passing the bottom result from a previous page as <code>after</code>,
+        /// Finds the top <paramref name="n"/>
+        /// hits for top <paramref name="query"/> where all results are after a previous
+        /// result (top <paramref name="after"/>).
+        /// <para/>
+        /// By passing the bottom result from a previous page as <paramref name="after"/>,
         /// this method can be used for efficient 'deep-paging' across potentially
         /// large result sets.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs SearchAfter(ScoreDoc after, Query query, int n)
         {
             return Search(CreateNormalizedWeight(query), after, n);
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code>, applying <code>filter</code> if non-null,
-        /// where all results are after a previous result (<code>after</code>).
-        /// <p>
-        /// By passing the bottom result from a previous page as <code>after</code>,
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/>, applying <paramref name="filter"/> if non-null,
+        /// where all results are after a previous result (<paramref name="after"/>).
+        /// <para/>
+        /// By passing the bottom result from a previous page as <paramref name="after"/>,
         /// this method can be used for efficient 'deep-paging' across potentially
         /// large result sets.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs SearchAfter(ScoreDoc after, Query query, Filter filter, int n)
         {
             return Search(CreateNormalizedWeight(WrapFilter(query, filter)), after, n);
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code>.
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/>.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs Search(Query query, int n)
         {
             return Search(query, null, n);
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code>, applying <code>filter</code> if non-null.
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/>, applying <paramref name="filter"/> if non-null.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs Search(Query query, Filter filter, int n)
         {
             return Search(CreateNormalizedWeight(WrapFilter(query, filter)), null, n);
@@ -305,14 +298,14 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Lower-level search API.
         ///
-        /// <p><seealso cref="ICollector#collect(int)"/> is called for every matching
+        /// <para/><see cref="ICollector.Collect(int)"/> is called for every matching
         /// document.
         /// </summary>
-        /// <param name="query"> to match documents </param>
-        /// <param name="filter"> if non-null, used to permit documents to be collected. </param>
-        /// <param name="results"> to receive hits </param>
+        /// <param name="query"> To match documents </param>
+        /// <param name="filter"> Ef non-null, used to permit documents to be collected. </param>
+        /// <param name="results"> To receive hits </param>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual void Search(Query query, Filter filter, ICollector results)
         {
             Search(m_leafContexts, CreateNormalizedWeight(WrapFilter(query, filter)), results);
@@ -321,10 +314,10 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Lower-level search API.
         ///
-        /// <p><seealso cref="ICollector#collect(int)"/> is called for every matching document.
+        /// <para/><seealso cref="ICollector.Collect(int)"/> is called for every matching document.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual void Search(Query query, ICollector results)
         {
             Search(m_leafContexts, CreateNormalizedWeight(query), results);
@@ -332,16 +325,16 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Search implementation with arbitrary sorting.  Finds
-        /// the top <code>n</code> hits for <code>query</code>, applying
-        /// <code>filter</code> if non-null, and sorting the hits by the criteria in
-        /// <code>sort</code>.
+        /// the top <paramref name="n"/> hits for <paramref name="query"/>, applying
+        /// <paramref name="filter"/> if non-null, and sorting the hits by the criteria in
+        /// <paramref name="sort"/>.
         ///
-        /// <p>NOTE: this does not compute scores by default; use
-        /// <seealso cref="IndexSearcher#search(Query,Filter,int,Sort,boolean,boolean)"/> to
+        /// <para/>NOTE: this does not compute scores by default; use
+        /// <see cref="IndexSearcher.Search(Query,Filter,int,Sort,bool,bool)"/> to
         /// control scoring.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopFieldDocs Search(Query query, Filter filter, int n, Sort sort)
         {
             return Search(CreateNormalizedWeight(WrapFilter(query, filter)), n, sort, false, false);
@@ -351,32 +344,32 @@ namespace Lucene.Net.Search
         /// Search implementation with arbitrary sorting, plus
         /// control over whether hit scores and max score
         /// should be computed.  Finds
-        /// the top <code>n</code> hits for <code>query</code>, applying
-        /// <code>filter</code> if non-null, and sorting the hits by the criteria in
-        /// <code>sort</code>.  If <code>doDocScores</code> is <code>true</code>
+        /// the top <paramref name="n"/> hits for <paramref name="query"/>, applying
+        /// <paramref name="filter"/> if non-null, and sorting the hits by the criteria in
+        /// <paramref name="sort"/>.  If <paramref name="doDocScores"/> is <c>true</c>
         /// then the score of each hit will be computed and
-        /// returned.  If <code>doMaxScore</code> is
-        /// <code>true</code> then the maximum score over all
+        /// returned.  If <paramref name="doMaxScore"/> is
+        /// <c>true</c> then the maximum score over all
         /// collected hits will be computed.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopFieldDocs Search(Query query, Filter filter, int n, Sort sort, bool doDocScores, bool doMaxScore)
         {
             return Search(CreateNormalizedWeight(WrapFilter(query, filter)), n, sort, doDocScores, doMaxScore);
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code>, applying <code>filter</code> if non-null,
-        /// where all results are after a previous result (<code>after</code>).
-        /// <p>
-        /// By passing the bottom result from a previous page as <code>after</code>,
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/>, applying <paramref name="filter"/> if non-null,
+        /// where all results are after a previous result (<paramref name="after"/>).
+        /// <para/>
+        /// By passing the bottom result from a previous page as <paramref name="after"/>,
         /// this method can be used for efficient 'deep-paging' across potentially
         /// large result sets.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <seealso cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs SearchAfter(ScoreDoc after, Query query, Filter filter, int n, Sort sort)
         {
             if (after != null && !(after is FieldDoc))
@@ -392,25 +385,25 @@ namespace Lucene.Net.Search
         /// Search implementation with arbitrary sorting and no filter. </summary>
         /// <param name="query"> The query to search for </param>
         /// <param name="n"> Return only the top n results </param>
-        /// <param name="sort"> The <seealso cref="Lucene.Net.Search.Sort"/> object </param>
-        /// <returns> The top docs, sorted according to the supplied <seealso cref="Lucene.Net.Search.Sort"/> instance </returns>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <param name="sort"> The <see cref="Lucene.Net.Search.Sort"/> object </param>
+        /// <returns> The top docs, sorted according to the supplied <see cref="Lucene.Net.Search.Sort"/> instance </returns>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public virtual TopFieldDocs Search(Query query, int n, Sort sort)
         {
             return Search(CreateNormalizedWeight(query), n, sort, false, false);
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code> where all results are after a previous
-        /// result (<code>after</code>).
-        /// <p>
-        /// By passing the bottom result from a previous page as <code>after</code>,
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/> where all results are after a previous
+        /// result (<paramref name="after"/>).
+        /// <para/>
+        /// By passing the bottom result from a previous page as <paramref name="after"/>,
         /// this method can be used for efficient 'deep-paging' across potentially
         /// large result sets.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs SearchAfter(ScoreDoc after, Query query, int n, Sort sort)
         {
             if (after != null && !(after is FieldDoc))
@@ -423,21 +416,21 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Finds the top <code>n</code>
-        /// hits for <code>query</code> where all results are after a previous
-        /// result (<code>after</code>), allowing control over
+        /// Finds the top <paramref name="n"/>
+        /// hits for <paramref name="query"/> where all results are after a previous
+        /// result (<paramref name="after"/>), allowing control over
         /// whether hit scores and max score should be computed.
-        /// <p>
-        /// By passing the bottom result from a previous page as <code>after</code>,
+        /// <para/>
+        /// By passing the bottom result from a previous page as <paramref name="after"/>,
         /// this method can be used for efficient 'deep-paging' across potentially
-        /// large result sets.  If <code>doDocScores</code> is <code>true</code>
+        /// large result sets.  If <paramref name="doDocScores"/> is <c>true</c>
         /// then the score of each hit will be computed and
-        /// returned.  If <code>doMaxScore</code> is
-        /// <code>true</code> then the maximum score over all
+        /// returned.  If <paramref name="doMaxScore"/> is
+        /// <c>true</c> then the maximum score over all
         /// collected hits will be computed.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual TopDocs SearchAfter(ScoreDoc after, Query query, Filter filter, int n, Sort sort, bool doDocScores, bool doMaxScore)
         {
             if (after != null && !(after is FieldDoc))
@@ -450,13 +443,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: Low-level search implementation.  Finds the top <code>n</code>
-        /// hits for <code>query</code>, applying <code>filter</code> if non-null.
+        /// Expert: Low-level search implementation.  Finds the top <paramref name="nDocs"/>
+        /// hits for <c>query</c>, applying <c>filter</c> if non-null.
         ///
-        /// <p>Applications should usually call <seealso cref="IndexSearcher#search(Query,int)"/> or
-        /// <seealso cref="IndexSearcher#search(Query,Filter,int)"/> instead. </summary>
+        /// <para/>Applications should usually call <see cref="IndexSearcher.Search(Query,int)"/> or
+        /// <see cref="IndexSearcher.Search(Query,Filter,int)"/> instead. </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         protected virtual TopDocs Search(Weight weight, ScoreDoc after, int nDocs)
         {
             int limit = reader.MaxDoc;
@@ -508,12 +501,12 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: Low-level search implementation.  Finds the top <code>n</code>
-        /// hits for <code>query</code>.
+        /// hits for <c>query</c>.
         ///
-        /// <p>Applications should usually call <seealso cref="IndexSearcher#search(Query,int)"/> or
-        /// <seealso cref="IndexSearcher#search(Query,Filter,int)"/> instead. </summary>
+        /// <para/>Applications should usually call <see cref="IndexSearcher.Search(Query,int)"/> or
+        /// <see cref="IndexSearcher.Search(Query,Filter,int)"/> instead. </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         protected virtual TopDocs Search(IList<AtomicReaderContext> leaves, Weight weight, ScoreDoc after, int nDocs)
         {
             // single thread
@@ -532,23 +525,23 @@ namespace Lucene.Net.Search
         /// Expert: Low-level search implementation with arbitrary
         /// sorting and control over whether hit scores and max
         /// score should be computed.  Finds
-        /// the top <code>n</code> hits for <code>query</code> and sorting the hits
-        /// by the criteria in <code>sort</code>.
+        /// the top <paramref name="nDocs"/> hits for <c>query</c> and sorting the hits
+        /// by the criteria in <paramref name="sort"/>.
         ///
-        /// <p>Applications should usually call {@link
-        /// IndexSearcher#search(Query,Filter,int,Sort)} instead.
+        /// <para/>Applications should usually call 
+        /// <see cref="IndexSearcher.Search(Query,Filter,int,Sort)"/> instead.
         /// </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         protected virtual TopFieldDocs Search(Weight weight, int nDocs, Sort sort, bool doDocScores, bool doMaxScore)
         {
             return Search(weight, null, nDocs, sort, true, doDocScores, doMaxScore);
         }
 
         /// <summary>
-        /// Just like <seealso cref="#search(Weight, int, Sort, boolean, boolean)"/>, but you choose
-        /// whether or not the fields in the returned <seealso cref="FieldDoc"/> instances should
-        /// be set by specifying fillFields.
+        /// Just like <see cref="Search(Weight, int, Sort, bool, bool)"/>, but you choose
+        /// whether or not the fields in the returned <see cref="FieldDoc"/> instances should
+        /// be set by specifying <paramref name="fillFields"/>.
         /// </summary>
         protected virtual TopFieldDocs Search(Weight weight, FieldDoc after, int nDocs, Sort sort, bool fillFields, bool doDocScores, bool doMaxScore)
         {
@@ -597,9 +590,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Just like <seealso cref="#search(Weight, int, Sort, boolean, boolean)"/>, but you choose
-        /// whether or not the fields in the returned <seealso cref="FieldDoc"/> instances should
-        /// be set by specifying fillFields.
+        /// Just like <see cref="Search(Weight, int, Sort, bool, bool)"/>, but you choose
+        /// whether or not the fields in the returned <see cref="FieldDoc"/> instances should
+        /// be set by specifying <paramref name="fillFields"/>.
         /// </summary>
         protected virtual TopFieldDocs Search(IList<AtomicReaderContext> leaves, Weight weight, FieldDoc after, int nDocs, Sort sort, bool fillFields, bool doDocScores, bool doMaxScore)
         {
@@ -619,21 +612,21 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Lower-level search API.
         ///
-        /// <p>
-        /// <seealso cref="ICollector#collect(int)"/> is called for every document. <br>
+        /// <para/>
+        /// <seealso cref="ICollector.Collect(int)"/> is called for every document. 
         ///
-        /// <p>
+        /// <para/>
         /// NOTE: this method executes the searches on all given leaves exclusively.
-        /// To search across all the searchers leaves use <seealso cref="#leafContexts"/>.
+        /// To search across all the searchers leaves use <see cref="m_leafContexts"/>.
         /// </summary>
         /// <param name="leaves">
-        ///          the searchers leaves to execute the searches on </param>
+        ///          The searchers leaves to execute the searches on </param>
         /// <param name="weight">
-        ///          to match documents </param>
+        ///          To match documents </param>
         /// <param name="collector">
-        ///          to receive hits </param>
+        ///          To receive hits </param>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         protected virtual void Search(IList<AtomicReaderContext> leaves, Weight weight, ICollector collector)
         {
             // TODO: should we make this
@@ -670,7 +663,7 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Expert: called to re-write queries into primitive queries. </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         public virtual Query Rewrite(Query original)
         {
             Query query = original;
@@ -682,10 +675,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns an Explanation that describes how <code>doc</code> scored against
-        /// <code>query</code>.
+        /// Returns an <see cref="Explanation"/> that describes how <paramref name="doc"/> scored against
+        /// <paramref name="query"/>.
         ///
-        /// <p>this is intended to be used in developing Similarity implementations,
+        /// <para/>This is intended to be used in developing <see cref="Similarities.Similarity"/> implementations,
         /// and, for good performance, should not be displayed with every hit.
         /// Computing an explanation is as expensive as executing the query over the
         /// entire index.
@@ -697,16 +690,16 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: low-level implementation method
-        /// Returns an Explanation that describes how <code>doc</code> scored against
-        /// <code>weight</code>.
+        /// Returns an <see cref="Explanation"/> that describes how <paramref name="doc"/> scored against
+        /// <paramref name="weight"/>.
         ///
-        /// <p>this is intended to be used in developing Similarity implementations,
+        /// <para/>This is intended to be used in developing <see cref="Similarities.Similarity"/> implementations,
         /// and, for good performance, should not be displayed with every hit.
         /// Computing an explanation is as expensive as executing the query over the
         /// entire index.
-        /// <p>Applications should call <seealso cref="IndexSearcher#explain(Query, int)"/>. </summary>
+        /// <para/>Applications should call <see cref="IndexSearcher.Explain(Query, int)"/>. </summary>
         /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed
-        ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
+        ///         <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception>
         protected virtual Explanation Explain(Weight weight, int doc)
         {
             int n = ReaderUtil.SubIndex(doc, m_leafContexts);
@@ -717,10 +710,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates a normalized weight for a top-level <seealso cref="Query"/>.
-        /// The query is rewritten by this method and <seealso cref="Query#createWeight"/> called,
-        /// afterwards the <seealso cref="Weight"/> is normalized. The returned {@code Weight}
-        /// can then directly be used to get a <seealso cref="Scorer"/>.
+        /// Creates a normalized weight for a top-level <see cref="Query"/>.
+        /// The query is rewritten by this method and <see cref="Query.CreateWeight(IndexSearcher)"/> called,
+        /// afterwards the <see cref="Weight"/> is normalized. The returned <see cref="Weight"/>
+        /// can then directly be used to get a <see cref="Scorer"/>.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public virtual Weight CreateNormalizedWeight(Query query)
@@ -738,8 +732,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns this searchers the top-level <seealso cref="IndexReaderContext"/>. </summary>
-        /// <seealso cref= IndexReader#getContext() </seealso>
+        /// Returns this searchers the top-level <see cref="IndexReaderContext"/>. </summary>
+        /// <seealso cref="IndexReader.Context"/>
         /* sugar for #getReader().getTopReaderContext() */
 
         public virtual IndexReaderContext TopReaderContext
@@ -870,11 +864,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A helper class that wraps a <seealso cref="CompletionService"/> and provides an
-        /// iterable interface to the completed <seealso cref="Callable"/> instances.
+        /// A helper class that wraps a <see cref="ICompletionService{T}"/> and provides an
+        /// iterable interface to the completed <see cref="ICallable{V}"/> instances.
         /// </summary>
-        /// @param <T>
-        ///          the type of the <seealso cref="Callable"/> return value </param>
+        /// <typeparam name="T">the type of the <see cref="ICallable{V}"/> return value</typeparam>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
@@ -969,9 +962,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A class holding a subset of the <seealso cref="IndexSearcher"/>s leaf contexts to be
+        /// A class holding a subset of the <see cref="IndexSearcher"/>s leaf contexts to be
         /// executed within a single thread.
-        ///
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -993,10 +986,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <seealso cref="TermStatistics"/> for a term.
-        ///
-        /// this can be overridden for example, to return a term's statistics
+        /// Returns <see cref="Search.TermStatistics"/> for a term.
+        /// <para/>
+        /// This can be overridden for example, to return a term's statistics
         /// across a distributed collection.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public virtual TermStatistics TermStatistics(Term term, TermContext context)
@@ -1005,10 +999,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <seealso cref="CollectionStatistics"/> for a field.
-        ///
-        /// this can be overridden for example, to return a field's statistics
+        /// Returns <see cref="Search.CollectionStatistics"/> for a field.
+        /// <para/>
+        /// This can be overridden for example, to return a field's statistics
         /// across a distributed collection.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public virtual CollectionStatistics CollectionStatistics(string field)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/LiveFieldValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/LiveFieldValues.cs b/src/Lucene.Net/Search/LiveFieldValues.cs
index 0251d10..3cfa3f7 100644
--- a/src/Lucene.Net/Search/LiveFieldValues.cs
+++ b/src/Lucene.Net/Search/LiveFieldValues.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Tracks live field values across NRT reader reopens.
-    ///  this holds a map for all updated ids since
-    ///  the last reader reopen.  Once the NRT reader is reopened,
-    ///  it prunes the map.  this means you must reopen your NRT
-    ///  reader periodically otherwise the RAM consumption of
-    ///  this class will grow unbounded!
+    /// This holds a map for all updated ids since
+    /// the last reader reopen.  Once the NRT reader is reopened,
+    /// it prunes the map.  This means you must reopen your NRT
+    /// reader periodically otherwise the RAM consumption of
+    /// this class will grow unbounded!
     ///
-    ///  <p>NOTE: you must ensure the same id is never updated at
-    ///  the same time by two threads, because in this case you
-    ///  cannot in general know which thread "won".
+    /// <para/>NOTE: you must ensure the same id is never updated at
+    /// the same time by two threads, because in this case you
+    /// cannot in general know which thread "won".
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -79,8 +79,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Call this after you've successfully added a document
-        ///  to the index, to record what value you just set the
-        ///  field to.
+        /// to the index, to record what value you just set the
+        /// field to.
         /// </summary>
         public virtual void Add(string id, T value)
         {
@@ -89,7 +89,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Call this after you've successfully deleted a document
-        ///  from the index.
+        /// from the index.
         /// </summary>
         public virtual void Delete(string id)
         {
@@ -99,6 +99,7 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Returns the [approximate] number of id/value pairs
         /// buffered in RAM.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public virtual int Count
@@ -107,8 +108,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the current value for this id, or null if the
-        ///  id isn't in the index or was deleted.
+        /// Returns the current value for this id, or <c>null</c> if the
+        /// id isn't in the index or was deleted.
         /// </summary>
         public virtual T Get(string id)
         {
@@ -157,10 +158,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this is called when the id/value was already flushed & opened
-        ///  in an NRT IndexSearcher.  You must implement this to
-        ///  go look up the value (eg, via doc values, field cache,
-        ///  stored fields, etc.).
+        /// This is called when the id/value was already flushed &amp; opened
+        /// in an NRT IndexSearcher.  You must implement this to
+        /// go look up the value (eg, via doc values, field cache,
+        /// stored fields, etc.).
         /// </summary>
         protected abstract T LookupFromSearcher(S s, string id);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MatchAllDocsQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MatchAllDocsQuery.cs b/src/Lucene.Net/Search/MatchAllDocsQuery.cs
index 608586c..8c7bdcd 100644
--- a/src/Lucene.Net/Search/MatchAllDocsQuery.cs
+++ b/src/Lucene.Net/Search/MatchAllDocsQuery.cs
@@ -30,7 +30,6 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// A query that matches all documents.
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttribute.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttribute.cs b/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttribute.cs
index 9aa36ea..7aa3e7e 100644
--- a/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttribute.cs
+++ b/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttribute.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Util;
+using BytesRef = Lucene.Net.Util.BytesRef;
 
 namespace Lucene.Net.Search
 {
@@ -19,57 +20,28 @@ namespace Lucene.Net.Search
      * limitations under the License.
      */
 
-    using Attribute = Lucene.Net.Util.Attribute;
-    using IAttribute = Lucene.Net.Util.IAttribute;
-    using BytesRef = Lucene.Net.Util.BytesRef;
-
     /// <summary>
-    /// Implementation class for <seealso cref="MaxNonCompetitiveBoostAttribute"/>.
+    /// Add this <see cref="IAttribute"/> to a fresh <see cref="AttributeSource"/> before calling
+    /// <see cref="MultiTermQuery.GetTermsEnum(Index.Terms, AttributeSource)"/>.
+    /// <see cref="FuzzyQuery"/> is using this to control its internal behaviour
+    /// to only return competitive terms.
+    /// <para/><b>Please note:</b> this attribute is intended to be added by the <see cref="MultiTermQuery.RewriteMethod"/>
+    /// to an empty <see cref="AttributeSource"/> that is shared for all segments
+    /// during query rewrite. This attribute source is passed to all segment enums
+    /// on <see cref="MultiTermQuery.GetTermsEnum(Index.Terms,AttributeSource)"/>.
+    /// <see cref="TopTermsRewrite{Q}"/> uses this attribute to
+    /// inform all enums about the current boost, that is not competitive.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
-#if FEATURE_SERIALIZABLE
-    [Serializable]
-#endif
-    public sealed class MaxNonCompetitiveBoostAttribute : Attribute, IMaxNonCompetitiveBoostAttribute
+    public interface IMaxNonCompetitiveBoostAttribute : IAttribute
     {
-        private float maxNonCompetitiveBoost = float.NegativeInfinity;
-        private BytesRef competitiveTerm = null;
-
-        public float MaxNonCompetitiveBoost
-        {
-            set
-            {
-                this.maxNonCompetitiveBoost = value;
-            }
-            get
-            {
-                return maxNonCompetitiveBoost;
-            }
-        }
-
-        public BytesRef CompetitiveTerm
-        {
-            set
-            {
-                this.competitiveTerm = value;
-            }
-            get
-            {
-                return competitiveTerm;
-            }
-        }
-
-        public override void Clear()
-        {
-            maxNonCompetitiveBoost = float.NegativeInfinity;
-            competitiveTerm = null;
-        }
+        /// <summary>
+        /// This is the maximum boost that would not be competitive. </summary>
+        float MaxNonCompetitiveBoost { set; get; }
 
-        public override void CopyTo(IAttribute target)
-        {
-            MaxNonCompetitiveBoostAttribute t = (MaxNonCompetitiveBoostAttribute)target;
-            t.MaxNonCompetitiveBoost = maxNonCompetitiveBoost;
-            t.CompetitiveTerm = competitiveTerm;
-        }
+        /// <summary>
+        /// This is the term or <c>null</c> of the term that triggered the boost change. </summary>
+        BytesRef CompetitiveTerm { set; get; }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttributeImpl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttributeImpl.cs b/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttributeImpl.cs
new file mode 100644
index 0000000..7ce0318
--- /dev/null
+++ b/src/Lucene.Net/Search/MaxNonCompetitiveBoostAttributeImpl.cs
@@ -0,0 +1,76 @@
+using System;
+
+namespace Lucene.Net.Search
+{
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+
+    using Attribute = Lucene.Net.Util.Attribute;
+    using IAttribute = Lucene.Net.Util.IAttribute;
+    using BytesRef = Lucene.Net.Util.BytesRef;
+
+    /// <summary>
+    /// Implementation class for <see cref="IMaxNonCompetitiveBoostAttribute"/>.
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+#if FEATURE_SERIALIZABLE
+    [Serializable]
+#endif
+    public sealed class MaxNonCompetitiveBoostAttribute : Attribute, IMaxNonCompetitiveBoostAttribute
+    {
+        private float maxNonCompetitiveBoost = float.NegativeInfinity;
+        private BytesRef competitiveTerm = null;
+
+        public float MaxNonCompetitiveBoost
+        {
+            set
+            {
+                this.maxNonCompetitiveBoost = value;
+            }
+            get
+            {
+                return maxNonCompetitiveBoost;
+            }
+        }
+
+        public BytesRef CompetitiveTerm
+        {
+            set
+            {
+                this.competitiveTerm = value;
+            }
+            get
+            {
+                return competitiveTerm;
+            }
+        }
+
+        public override void Clear()
+        {
+            maxNonCompetitiveBoost = float.NegativeInfinity;
+            competitiveTerm = null;
+        }
+
+        public override void CopyTo(IAttribute target)
+        {
+            MaxNonCompetitiveBoostAttribute t = (MaxNonCompetitiveBoostAttribute)target;
+            t.MaxNonCompetitiveBoost = maxNonCompetitiveBoost;
+            t.CompetitiveTerm = competitiveTerm;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MinShouldMatchSumScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MinShouldMatchSumScorer.cs b/src/Lucene.Net/Search/MinShouldMatchSumScorer.cs
index b269756..c2b2760 100644
--- a/src/Lucene.Net/Search/MinShouldMatchSumScorer.cs
+++ b/src/Lucene.Net/Search/MinShouldMatchSumScorer.cs
@@ -26,12 +26,12 @@ namespace Lucene.Net.Search
     using ArrayUtil = Lucene.Net.Util.ArrayUtil;
 
     /// <summary>
-    /// A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
-    /// this Scorer implements <seealso cref="Scorer#advance(int)"/> and uses advance() on the given Scorers.
-    ///
-    /// this implementation uses the minimumMatch constraint actively to efficiently
-    /// prune the number of candidates, it is hence a mixture between a pure DisjunctionScorer
-    /// and a ConjunctionScorer.
+    /// A <see cref="Scorer"/> for OR like queries, counterpart of <see cref="ConjunctionScorer"/>.
+    /// This <see cref="Scorer"/> implements <see cref="DocIdSetIterator.Advance(int)"/> and uses Advance() on the given <see cref="Scorer"/>s.
+    /// <para/>
+    /// This implementation uses the minimumMatch constraint actively to efficiently
+    /// prune the number of candidates, it is hence a mixture between a pure <see cref="DisjunctionScorer"/>
+    /// and a <see cref="ConjunctionScorer"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -59,8 +59,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// mmStack is supposed to contain the most costly subScorers that still did
-        ///  not run out of docs, sorted by increasing sparsity of docs returned by that subScorer.
-        ///  For now, the cost of subscorers is assumed to be inversely correlated with sparsity.
+        /// not run out of docs, sorted by increasing sparsity of docs returned by that subScorer.
+        /// For now, the cost of subscorers is assumed to be inversely correlated with sparsity.
         /// </summary>
         private readonly Scorer[] mmStack; // of size mm-1: 0..mm-2, always full
 
@@ -75,16 +75,16 @@ namespace Lucene.Net.Search
         private double score = float.NaN;
 
         /// <summary>
-        /// Construct a <code>MinShouldMatchSumScorer</code>.
+        /// Construct a <see cref="MinShouldMatchSumScorer"/>.
         /// </summary>
         /// <param name="weight"> The weight to be used. </param>
         /// <param name="subScorers"> A collection of at least two subscorers. </param>
         /// <param name="minimumNrMatchers"> The positive minimum number of subscorers that should
         /// match to match this query.
-        /// <br>When <code>minimumNrMatchers</code> is bigger than
-        /// the number of <code>subScorers</code>, no matches will be produced.
-        /// <br>When minimumNrMatchers equals the number of subScorers,
-        /// it is more efficient to use <code>ConjunctionScorer</code>. </param>
+        /// <para/>When <paramref name="minimumNrMatchers"/> is bigger than
+        /// the number of <paramref name="subScorers"/>, no matches will be produced.
+        /// <para/>When <paramref name="minimumNrMatchers"/> equals the number of <paramref name="subScorers"/>,
+        /// it is more efficient to use <see cref="ConjunctionScorer"/>. </param>
         public MinShouldMatchSumScorer(Weight weight, IList<Scorer> subScorers, int minimumNrMatchers)
             : base(weight)
         {
@@ -138,8 +138,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Construct a <code>DisjunctionScorer</code>, using one as the minimum number
-        /// of matching subscorers.
+        /// Construct a <see cref="DisjunctionScorer"/>, using one as the minimum number
+        /// of matching <paramref name="subScorers"/>.
         /// </summary>
         public MinShouldMatchSumScorer(Weight weight, IList<Scorer> subScorers)
             : this(weight, subScorers, 1)
@@ -272,7 +272,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the score of the current document matching the query. Initially
-        /// invalid, until <seealso cref="#nextDoc()"/> is called the first time.
+        /// invalid, until <see cref="NextDoc()"/> is called the first time.
         /// </summary>
         public override float GetScore()
         {
@@ -291,11 +291,12 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Advances to the first match beyond the current whose document number is
-        /// greater than or equal to a given target. <br>
-        /// The implementation uses the advance() method on the subscorers.
+        /// greater than or equal to a given target.
+        /// <para/>
+        /// The implementation uses the Advance() method on the subscorers.
         /// </summary>
-        /// <param name="target"> the target document number. </param>
-        /// <returns> the document whose number is greater than or equal to the given
+        /// <param name="target"> The target document number. </param>
+        /// <returns> The document whose number is greater than or equal to the given
         ///         target, or -1 if none exist. </returns>
         public override int Advance(int target)
         {
@@ -349,7 +350,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Organize subScorers into a min heap with scorers generating the earliest document on top.
+        /// Organize <see cref="subScorers"/> into a min heap with scorers generating the earliest document on top.
         /// </summary>
         protected void MinheapHeapify()
         {
@@ -360,7 +361,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// The subtree of subScorers at root is a min heap except possibly for its root element.
+        /// The subtree of <see cref="subScorers"/> at root is a min heap except possibly for its root element.
         /// Bubble the root down as required to make the subtree a heap.
         /// </summary>
         protected void MinheapSiftDown(int root)
@@ -433,7 +434,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Remove the root Scorer from subScorers and re-establish it as a heap
+        /// Remove the root <see cref="Scorer"/> from <see cref="subScorers"/> and re-establish it as a heap
         /// </summary>
         protected void MinheapRemoveRoot()
         {
@@ -452,7 +453,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Removes a given Scorer from the heap by placing end of heap at that
+        /// Removes a given <see cref="Scorer"/> from the heap by placing end of heap at that
         /// position and bubbling it either up or down
         /// </summary>
         protected bool MinheapRemove(Scorer scorer)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MultiCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MultiCollector.cs b/src/Lucene.Net/Search/MultiCollector.cs
index 6551bc3..00bbe2e 100644
--- a/src/Lucene.Net/Search/MultiCollector.cs
+++ b/src/Lucene.Net/Search/MultiCollector.cs
@@ -22,10 +22,10 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// A <seealso cref="ICollector"/> which allows running a search with several
-    /// <seealso cref="ICollector"/>s. It offers a static <seealso cref="#wrap"/> method which accepts a
-    /// list of collectors and wraps them with <seealso cref="MultiCollector"/>, while
-    /// filtering out the <code>null</code> null ones.
+    /// A <see cref="ICollector"/> which allows running a search with several
+    /// <see cref="ICollector"/>s. It offers a static <see cref="Wrap(ICollector[])"/> method which accepts a
+    /// list of collectors and wraps them with <see cref="MultiCollector"/>, while
+    /// filtering out the <c>null</c> ones.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -33,20 +33,20 @@ namespace Lucene.Net.Search
     public class MultiCollector : ICollector
     {
         /// <summary>
-        /// Wraps a list of <seealso cref="ICollector"/>s with a <seealso cref="MultiCollector"/>. this
+        /// Wraps a list of <see cref="ICollector"/>s with a <see cref="MultiCollector"/>. This
         /// method works as follows:
-        /// <ul>
-        /// <li>Filters out the <code>null</code> collectors, so they are not used
-        /// during search time.
-        /// <li>If the input contains 1 real collector (i.e. non-<code>null</code> ),
-        /// it is returned.
-        /// <li>Otherwise the method returns a <seealso cref="MultiCollector"/> which wraps the
-        /// non-<code>null</code> ones.
-        /// </ul>
+        /// <list type="bullet">
+        /// <item><description>Filters out the <c>null</c> collectors, so they are not used
+        /// during search time.</description></item>
+        /// <item><description>If the input contains 1 real collector (i.e. non-<c>null</c> ),
+        /// it is returned.</description></item>
+        /// <item><description>Otherwise the method returns a <see cref="MultiCollector"/> which wraps the
+        /// non-<code>null</code> ones.</description></item>
+        /// </list>
         /// </summary>
-        /// <exception cref="IllegalArgumentException">
+        /// <exception cref="ArgumentException">
         ///           if either 0 collectors were input, or all collectors are
-        ///           <code>null</code>. </exception>
+        ///           <c>null</c>. </exception>
         public static ICollector Wrap(params ICollector[] collectors)
         {
             // For the user's convenience, we allow null collectors to be passed.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MultiPhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MultiPhraseQuery.cs b/src/Lucene.Net/Search/MultiPhraseQuery.cs
index cb98931..5e2491d 100644
--- a/src/Lucene.Net/Search/MultiPhraseQuery.cs
+++ b/src/Lucene.Net/Search/MultiPhraseQuery.cs
@@ -44,13 +44,13 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// MultiPhraseQuery is a generalized version of PhraseQuery, with an added
-    /// method <seealso cref="#add(Term[])"/>.
+    /// <see cref="MultiPhraseQuery"/> is a generalized version of <see cref="PhraseQuery"/>, with an added
+    /// method <see cref="Add(Term[])"/>.
+    /// <para/>
     /// To use this class, to search for the phrase "Microsoft app*" first use
-    /// add(Term) on the term "Microsoft", then find all terms that have "app" as
-    /// prefix using IndexReader.terms(Term), and use MultiPhraseQuery.add(Term[]
-    /// terms) to add them to the query.
-    ///
+    /// <see cref="Add(Term)"/> on the term "Microsoft", then find all terms that have "app" as
+    /// prefix using <c>MultiFields.GetFields(IndexReader).GetTerms(string)</c>, and use <see cref="MultiPhraseQuery.Add(Term[])"/>
+    /// to add them to the query.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -65,7 +65,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Sets the phrase slop for this query. </summary>
-        /// <seealso cref= PhraseQuery#setSlop(int) </seealso>
+        /// <seealso cref="PhraseQuery.Slop"/>
         public virtual int Slop
         {
             set
@@ -84,7 +84,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Add a single term at the next position in the phrase. </summary>
-        /// <seealso cref= PhraseQuery#add(Term) </seealso>
+        /// <seealso cref="PhraseQuery.Add(Term)"/>
         public virtual void Add(Term term)
         {
             Add(new Term[] { term });
@@ -94,7 +94,7 @@ namespace Lucene.Net.Search
         /// Add multiple terms at the next position in the phrase.  Any of the terms
         /// may match.
         /// </summary>
-        /// <seealso cref= PhraseQuery#add(Term) </seealso>
+        /// <seealso cref="PhraseQuery.Add(Term)"/>
         public virtual void Add(Term[] terms)
         {
             int position = 0;
@@ -109,7 +109,7 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Allows to specify the relative position of terms within the phrase.
         /// </summary>
-        /// <seealso cref= PhraseQuery#add(Term, int) </seealso>
+        /// <seealso cref="PhraseQuery.Add(Term, int)"/>
         public virtual void Add(Term[] terms, int position)
         {
             if (termArrays.Count == 0)
@@ -151,7 +151,11 @@ namespace Lucene.Net.Search
             return result;
         }
 
-        // inherit javadoc
+        /// <summary>
+        /// Expert: adds all terms occurring in this query to the terms set. Only
+        /// works if this query is in its rewritten (<see cref="Rewrite(IndexReader)"/>) form.
+        /// </summary>
+        /// <exception cref="InvalidOperationException"> If this query is not yet rewritten </exception>
         public override void ExtractTerms(ISet<Term> terms)
         {
             foreach (Term[] arr in termArrays)
@@ -436,7 +440,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!(o is MultiPhraseQuery))
@@ -502,7 +506,7 @@ namespace Lucene.Net.Search
     }
 
     /// <summary>
-    /// Takes the logical union of multiple DocsEnum iterators.
+    /// Takes the logical union of multiple <see cref="DocsEnum"/> iterators.
     /// </summary>
 
     // TODO: if ever we allow subclassing of the *PhraseScorer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MultiTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MultiTermQuery.cs b/src/Lucene.Net/Search/MultiTermQuery.cs
index 199e46f..8fbad5e 100644
--- a/src/Lucene.Net/Search/MultiTermQuery.cs
+++ b/src/Lucene.Net/Search/MultiTermQuery.cs
@@ -28,37 +28,37 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// An abstract <seealso cref="Query"/> that matches documents
-    /// containing a subset of terms provided by a {@link
-    /// FilteredTermsEnum} enumeration.
+    /// An abstract <see cref="Query"/> that matches documents
+    /// containing a subset of terms provided by a 
+    /// <see cref="Index.FilteredTermsEnum"/> enumeration.
     ///
-    /// <p>this query cannot be used directly; you must subclass
-    /// it and define <seealso cref="#getTermsEnum(Terms,AttributeSource)"/> to provide a {@link
-    /// FilteredTermsEnum} that iterates through the terms to be
+    /// <para/>This query cannot be used directly; you must subclass
+    /// it and define <see cref="GetTermsEnum(Terms,AttributeSource)"/> to provide a 
+    /// <see cref="Index.FilteredTermsEnum"/> that iterates through the terms to be
     /// matched.
     ///
-    /// <p><b>NOTE</b>: if <seealso cref="#setRewriteMethod"/> is either
-    /// <seealso cref="#CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE"/> or {@link
-    /// #SCORING_BOOLEAN_QUERY_REWRITE}, you may encounter a
-    /// <seealso cref="BooleanQuery.TooManyClausesException"/> exception during
+    /// <para/><b>NOTE</b>: if <see cref="MultiTermRewriteMethod"/> is either
+    /// <see cref="CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE"/> or
+    /// <see cref="SCORING_BOOLEAN_QUERY_REWRITE"/>, you may encounter a
+    /// <see cref="BooleanQuery.TooManyClausesException"/> exception during
     /// searching, which happens when the number of terms to be
-    /// searched exceeds {@link
-    /// BooleanQuery#getMaxClauseCount()}.  Setting {@link
-    /// #setRewriteMethod} to <seealso cref="#CONSTANT_SCORE_FILTER_REWRITE"/>
+    /// searched exceeds 
+    /// <see cref="BooleanQuery.MaxClauseCount"/>.  Setting 
+    /// <see cref="MultiTermRewriteMethod"/> to <see cref="CONSTANT_SCORE_FILTER_REWRITE"/>
     /// prevents this.
     ///
-    /// <p>The recommended rewrite method is {@link
-    /// #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}: it doesn't spend CPU
+    /// <para/>The recommended rewrite method is 
+    /// <see cref="CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/>: it doesn't spend CPU
     /// computing unhelpful scores, and it tries to pick the most
     /// performant rewrite method given the query. If you
-    /// need scoring (like <seealso cref="FuzzyQuery"/>, use
-    /// <seealso cref="TopTermsScoringBooleanQueryRewrite"/> which uses
+    /// need scoring (like <seea cref="FuzzyQuery"/>, use
+    /// <see cref="TopTermsScoringBooleanQueryRewrite"/> which uses
     /// a priority queue to only collect competitive terms
     /// and not hit this limitation.
     ///
-    /// Note that queryparser.classic.QueryParser produces
-    /// MultiTermQueries using {@link
-    /// #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} by default.
+    /// <para/>Note that QueryParsers.Classic.QueryParser produces
+    /// <see cref="MultiTermQuery"/>s using 
+    /// <see cref="CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/> by default.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -78,8 +78,8 @@ namespace Lucene.Net.Search
             public abstract Query Rewrite(IndexReader reader, MultiTermQuery query);
 
             /// <summary>
-            /// Returns the <seealso cref="MultiTermQuery"/>s <seealso cref="TermsEnum"/> </summary>
-            /// <seealso cref= MultiTermQuery#getTermsEnum(Terms, AttributeSource) </seealso>
+            /// Returns the <see cref="MultiTermQuery"/>s <see cref="TermsEnum"/> </summary>
+            /// <seealso cref="MultiTermQuery.GetTermsEnum(Terms, AttributeSource)"/>
             protected virtual TermsEnum GetTermsEnum(MultiTermQuery query, Terms terms, AttributeSource atts)
             {
                 return query.GetTermsEnum(terms, atts); // allow RewriteMethod subclasses to pull a TermsEnum from the MTQ
@@ -87,18 +87,18 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A rewrite method that first creates a private Filter,
-        ///  by visiting each term in sequence and marking all docs
-        ///  for that term.  Matching documents are assigned a
-        ///  constant score equal to the query's boost.
+        /// A rewrite method that first creates a private <see cref="Filter"/>,
+        /// by visiting each term in sequence and marking all docs
+        /// for that term.  Matching documents are assigned a
+        /// constant score equal to the query's boost.
         ///
-        ///  <p> this method is faster than the BooleanQuery
-        ///  rewrite methods when the number of matched terms or
-        ///  matched documents is non-trivial. Also, it will never
-        ///  hit an errant <seealso cref="BooleanQuery.TooManyClausesException"/>
-        ///  exception.
+        /// <para/> This method is faster than the <see cref="BooleanQuery"/>
+        /// rewrite methods when the number of matched terms or
+        /// matched documents is non-trivial. Also, it will never
+        /// hit an errant <see cref="BooleanQuery.TooManyClausesException"/>
+        /// exception.
         /// </summary>
-        ///  <seealso cref= #setRewriteMethod  </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
         public static readonly RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new RewriteMethodAnonymousInnerClassHelper();
 
 #if FEATURE_SERIALIZABLE
@@ -120,55 +120,55 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// A rewrite method that first translates each term into
-        ///  <seealso cref="Occur#SHOULD"/> clause in a
-        ///  BooleanQuery, and keeps the scores as computed by the
-        ///  query.  Note that typically such scores are
-        ///  meaningless to the user, and require non-trivial CPU
-        ///  to compute, so it's almost always better to use {@link
-        ///  #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} instead.
+        /// <see cref="Occur.SHOULD"/> clause in a
+        /// <see cref="BooleanQuery"/>, and keeps the scores as computed by the
+        /// query.  Note that typically such scores are
+        /// meaningless to the user, and require non-trivial CPU
+        /// to compute, so it's almost always better to use 
+        /// <see cref="CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/> instead.
         ///
-        ///  <p><b>NOTE</b>: this rewrite method will hit {@link
-        ///  BooleanQuery.TooManyClauses} if the number of terms
-        ///  exceeds <seealso cref="BooleanQuery#getMaxClauseCount"/>.
+        /// <para/><b>NOTE</b>: this rewrite method will hit 
+        /// <see cref="BooleanQuery.TooManyClausesException"/> if the number of terms
+        /// exceeds <see cref="BooleanQuery.MaxClauseCount"/>.
         /// </summary>
-        ///  <seealso cref= #setRewriteMethod  </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
         public static readonly RewriteMethod SCORING_BOOLEAN_QUERY_REWRITE = ScoringRewrite<MultiTermQuery>.SCORING_BOOLEAN_QUERY_REWRITE;
 
         /// <summary>
-        /// Like <seealso cref="#SCORING_BOOLEAN_QUERY_REWRITE"/> except
-        ///  scores are not computed.  Instead, each matching
-        ///  document receives a constant score equal to the
-        ///  query's boost.
+        /// Like <see cref="SCORING_BOOLEAN_QUERY_REWRITE"/> except
+        /// scores are not computed.  Instead, each matching
+        /// document receives a constant score equal to the
+        /// query's boost.
         ///
-        ///  <p><b>NOTE</b>: this rewrite method will hit {@link
-        ///  BooleanQuery.TooManyClauses} if the number of terms
-        ///  exceeds <seealso cref="BooleanQuery#getMaxClauseCount"/>.
+        /// <para/><b>NOTE</b>: this rewrite method will hit 
+        /// <see cref="BooleanQuery.TooManyClausesException"/> if the number of terms
+        /// exceeds <see cref="BooleanQuery.MaxClauseCount"/>.
         /// </summary>
-        ///  <seealso cref= #setRewriteMethod  </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
         public static readonly RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = ScoringRewrite<MultiTermQuery>.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
 
         /// <summary>
         /// A rewrite method that first translates each term into
-        /// <seealso cref="Occur#SHOULD"/> clause in a BooleanQuery, and keeps the
+        /// <see cref="Occur.SHOULD"/> clause in a <see cref="BooleanQuery"/>, and keeps the
         /// scores as computed by the query.
         ///
-        /// <p>
-        /// this rewrite method only uses the top scoring terms so it will not overflow
+        /// <para/>
+        /// This rewrite method only uses the top scoring terms so it will not overflow
         /// the boolean max clause count. It is the default rewrite method for
-        /// <seealso cref="FuzzyQuery"/>.
+        /// <see cref="FuzzyQuery"/>.
         /// </summary>
-        /// <seealso cref= #setRewriteMethod </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public sealed class TopTermsScoringBooleanQueryRewrite : TopTermsRewrite<BooleanQuery>
         {
             /// <summary>
-            /// Create a TopTermsScoringBooleanQueryRewrite for
-            /// at most <code>size</code> terms.
-            /// <p>
-            /// NOTE: if <seealso cref="BooleanQuery#getMaxClauseCount"/> is smaller than
-            /// <code>size</code>, then it will be used instead.
+            /// Create a <see cref="TopTermsScoringBooleanQueryRewrite"/> for
+            /// at most <paramref name="size"/> terms.
+            /// <para/>
+            /// NOTE: if <see cref="BooleanQuery.MaxClauseCount"/> is smaller than
+            /// <paramref name="size"/>, then it will be used instead.
             /// </summary>
             public TopTermsScoringBooleanQueryRewrite(int size)
                 : base(size)
@@ -198,24 +198,24 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// A rewrite method that first translates each term into
-        /// <seealso cref="Occur#SHOULD"/> clause in a BooleanQuery, but the scores
+        /// <see cref="Occur.SHOULD"/> clause in a <see cref="BooleanQuery"/>, but the scores
         /// are only computed as the boost.
-        /// <p>
-        /// this rewrite method only uses the top scoring terms so it will not overflow
+        /// <para/>
+        /// This rewrite method only uses the top scoring terms so it will not overflow
         /// the boolean max clause count.
         /// </summary>
-        /// <seealso cref= #setRewriteMethod </seealso>
+        /// <seealso cref="MultiTermRewriteMethod"/>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
         public sealed class TopTermsBoostOnlyBooleanQueryRewrite : TopTermsRewrite<BooleanQuery>
         {
             /// <summary>
-            /// Create a TopTermsBoostOnlyBooleanQueryRewrite for
-            /// at most <code>size</code> terms.
-            /// <p>
-            /// NOTE: if <seealso cref="BooleanQuery#getMaxClauseCount"/> is smaller than
-            /// <code>size</code>, then it will be used instead.
+            /// Create a <see cref="TopTermsBoostOnlyBooleanQueryRewrite"/> for
+            /// at most <paramref name="size"/> terms.
+            /// <para/>
+            /// NOTE: if <see cref="BooleanQuery.MaxClauseCount"/> is smaller than
+            /// <paramref name="size"/>, then it will be used instead.
             /// </summary>
             public TopTermsBoostOnlyBooleanQueryRewrite(int size)
                 : base(size)
@@ -245,33 +245,31 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// A rewrite method that tries to pick the best
-        ///  constant-score rewrite method based on term and
-        ///  document counts from the query.  If both the number of
-        ///  terms and documents is small enough, then {@link
-        ///  #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} is used.
-        ///  Otherwise, <seealso cref="#CONSTANT_SCORE_FILTER_REWRITE"/> is
-        ///  used.
+        /// constant-score rewrite method based on term and
+        /// document counts from the query.  If both the number of
+        /// terms and documents is small enough, then 
+        /// <see cref="CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE"/> is used.
+        /// Otherwise, <see cref="CONSTANT_SCORE_FILTER_REWRITE"/> is
+        /// used.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif
-        public class ConstantScoreAutoRewrite : Lucene.Net.Search.ConstantScoreAutoRewrite
+        public class ConstantScoreAutoRewrite : Lucene.Net.Search.ConstantScoreAutoRewrite // LUCENENET TODO: API Remove duplicate type with same name (confusing)
         {
         }
 
         /// <summary>
-        /// Read-only default instance of {@link
-        ///  ConstantScoreAutoRewrite}, with {@link
-        ///  ConstantScoreAutoRewrite#setTermCountCutoff} set to
-        ///  {@link
-        ///  ConstantScoreAutoRewrite#DEFAULT_TERM_COUNT_CUTOFF}
-        ///  and {@link
-        ///  ConstantScoreAutoRewrite#setDocCountPercent} set to
-        ///  {@link
-        ///  ConstantScoreAutoRewrite#DEFAULT_DOC_COUNT_PERCENT}.
-        ///  Note that you cannot alter the configuration of this
-        ///  instance; you'll need to create a private instance
-        ///  instead.
+        /// Read-only default instance of
+        /// <see cref="ConstantScoreAutoRewrite"/>, with 
+        /// <see cref="Search.ConstantScoreAutoRewrite.TermCountCutoff"/> set to
+        /// <see cref="Search.ConstantScoreAutoRewrite.DEFAULT_TERM_COUNT_CUTOFF"/>
+        /// and 
+        /// <see cref="Search.ConstantScoreAutoRewrite.DocCountPercent"/> set to
+        /// <see cref="Search.ConstantScoreAutoRewrite.DEFAULT_DOC_COUNT_PERCENT"/>.
+        /// Note that you cannot alter the configuration of this
+        /// instance; you'll need to create a private instance
+        /// instead.
         /// </summary>
         public static readonly RewriteMethod CONSTANT_SCORE_AUTO_REWRITE_DEFAULT = new ConstantScoreAutoRewriteAnonymousInnerClassHelper();
 
@@ -311,7 +309,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructs a query matching terms that cannot be represented with a single
-        /// Term.
+        /// <see cref="Term"/>.
         /// </summary>
         public MultiTermQuery(string field)
         {
@@ -334,22 +332,22 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Construct the enumeration to be used, expanding the
-        ///  pattern term.  this method should only be called if
-        ///  the field exists (ie, implementations can assume the
-        ///  field does exist).  this method should not return null
-        ///  (should instead return <seealso cref="TermsEnum#EMPTY"/> if no
-        ///  terms match).  The TermsEnum must already be
-        ///  positioned to the first matching term.
-        /// The given <seealso cref="AttributeSource"/> is passed by the <seealso cref="RewriteMethod"/> to
+        /// pattern term.  this method should only be called if
+        /// the field exists (ie, implementations can assume the
+        /// field does exist).  this method should not return null
+        /// (should instead return <see cref="TermsEnum.EMPTY"/> if no
+        /// terms match).  The <see cref="TermsEnum"/> must already be
+        /// positioned to the first matching term.
+        /// The given <see cref="AttributeSource"/> is passed by the <see cref="RewriteMethod"/> to
         /// provide attributes, the rewrite method uses to inform about e.g. maximum competitive boosts.
-        /// this is currently only used by <seealso cref="TopTermsRewrite"/>
+        /// this is currently only used by <see cref="TopTermsRewrite{Q}"/>.
         /// </summary>
         protected abstract TermsEnum GetTermsEnum(Terms terms, AttributeSource atts);
 
         /// <summary>
         /// Convenience method, if no attributes are needed:
         /// this simply passes empty attributes and is equal to:
-        /// <code>getTermsEnum(terms, new AttributeSource())</code>
+        /// <code>GetTermsEnum(terms, new AttributeSource())</code>
         /// </summary>
         public TermsEnum GetTermsEnum(Terms terms)
         {
@@ -358,8 +356,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// To rewrite to a simpler form, instead return a simpler
-        /// enum from <seealso cref="#getTermsEnum(Terms, AttributeSource)"/>.  For example,
-        /// to rewrite to a single term, return a <seealso cref="SingleTermsEnum"/>
+        /// enum from <see cref="GetTermsEnum(Terms, AttributeSource)"/>.  For example,
+        /// to rewrite to a single term, return a <see cref="Index.SingleTermsEnum"/>.
         /// </summary>
         public override sealed Query Rewrite(IndexReader reader)
         {


[18/48] lucenenet git commit: SWEEP: Updated all project.json files to suppress documenation warnings for CS1591 and CS1573

Posted by ni...@apache.org.
SWEEP: Updated all project.json files to suppress documenation warnings for CS1591 and CS1573


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/6bde1efb
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/6bde1efb
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/6bde1efb

Branch: refs/heads/master
Commit: 6bde1efbb83361b317b241d050bb340fbe179847
Parents: 864dcf7
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:53:40 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:53:40 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Analysis.Common/project.json  | 3 ++-
 src/Lucene.Net.Analysis.Stempel/project.json | 3 ++-
 src/Lucene.Net.Classification/project.json   | 3 ++-
 src/Lucene.Net.Codecs/project.json           | 3 ++-
 src/Lucene.Net.Expressions/project.json      | 3 ++-
 src/Lucene.Net.Facet/project.json            | 3 ++-
 src/Lucene.Net.Grouping/project.json         | 3 ++-
 src/Lucene.Net.Highlighter/project.json      | 3 ++-
 src/Lucene.Net.ICU/project.json              | 3 ++-
 src/Lucene.Net.Join/project.json             | 3 ++-
 src/Lucene.Net.Memory/project.json           | 3 ++-
 src/Lucene.Net.Misc/project.json             | 3 ++-
 src/Lucene.Net.Queries/project.json          | 3 ++-
 src/Lucene.Net.QueryParser/project.json      | 3 ++-
 src/Lucene.Net.Sandbox/project.json          | 3 ++-
 src/Lucene.Net.Spatial/project.json          | 3 ++-
 src/Lucene.Net.Suggest/project.json          | 3 ++-
 src/Lucene.Net/project.json                  | 3 ++-
 18 files changed, 36 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Analysis.Common/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/project.json b/src/Lucene.Net.Analysis.Common/project.json
index 67e2682..e974f8f 100644
--- a/src/Lucene.Net.Analysis.Common/project.json
+++ b/src/Lucene.Net.Analysis.Common/project.json
@@ -25,7 +25,8 @@
         "Analysis/Pt/portuguese.rslp",
         "Analysis/Compound/Hyphenation/hyphenation.dtd"
       ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Analysis.Stempel/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Stempel/project.json b/src/Lucene.Net.Analysis.Stempel/project.json
index 4696854..2063edd 100644
--- a/src/Lucene.Net.Analysis.Stempel/project.json
+++ b/src/Lucene.Net.Analysis.Stempel/project.json
@@ -20,7 +20,8 @@
         "Pl/stemmer_20000.tbl",
         "Pl/stopwords.txt"
       ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Analysis.Common": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Classification/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Classification/project.json b/src/Lucene.Net.Classification/project.json
index e24d80b..5f25504 100644
--- a/src/Lucene.Net.Classification/project.json
+++ b/src/Lucene.Net.Classification/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Codecs/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/project.json b/src/Lucene.Net.Codecs/project.json
index c3e761e..3d440b0 100644
--- a/src/Lucene.Net.Codecs/project.json
+++ b/src/Lucene.Net.Codecs/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Expressions/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Expressions/project.json b/src/Lucene.Net.Expressions/project.json
index ac5becd..5d7a172 100644
--- a/src/Lucene.Net.Expressions/project.json
+++ b/src/Lucene.Net.Expressions/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "antlr3.runtime.netcore": "3.6.0-rc2",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Facet/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/project.json b/src/Lucene.Net.Facet/project.json
index b269cd5..eb4ca44 100644
--- a/src/Lucene.Net.Facet/project.json
+++ b/src/Lucene.Net.Facet/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Join": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Grouping/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Grouping/project.json b/src/Lucene.Net.Grouping/project.json
index f44b7b1..10dba33 100644
--- a/src/Lucene.Net.Grouping/project.json
+++ b/src/Lucene.Net.Grouping/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Highlighter/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/project.json b/src/Lucene.Net.Highlighter/project.json
index b94b440..e36a4e4 100644
--- a/src/Lucene.Net.Highlighter/project.json
+++ b/src/Lucene.Net.Highlighter/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Memory": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.ICU/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.ICU/project.json b/src/Lucene.Net.ICU/project.json
index 0b7c18a..f075fc7 100644
--- a/src/Lucene.Net.ICU/project.json
+++ b/src/Lucene.Net.ICU/project.json
@@ -36,7 +36,8 @@
     },
     "embed": {
       "includeFiles": [ "Analysis/Th/stopwords.txt" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "icu.net": "54.1.1-alpha",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Join/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Join/project.json b/src/Lucene.Net.Join/project.json
index 63db858..7e4527b 100644
--- a/src/Lucene.Net.Join/project.json
+++ b/src/Lucene.Net.Join/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Grouping": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Memory/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Memory/project.json b/src/Lucene.Net.Memory/project.json
index ce84d87..9dbd59c 100644
--- a/src/Lucene.Net.Memory/project.json
+++ b/src/Lucene.Net.Memory/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Misc/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Misc/project.json b/src/Lucene.Net.Misc/project.json
index d2c13ab..3b1d279 100644
--- a/src/Lucene.Net.Misc/project.json
+++ b/src/Lucene.Net.Misc/project.json
@@ -15,7 +15,8 @@
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ],
       "exclude": [ "Store/*" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Analysis.Common": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Queries/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/project.json b/src/Lucene.Net.Queries/project.json
index 405e443..fbb3425 100644
--- a/src/Lucene.Net.Queries/project.json
+++ b/src/Lucene.Net.Queries/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.QueryParser/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.QueryParser/project.json b/src/Lucene.Net.QueryParser/project.json
index 8e97320..a49f881 100644
--- a/src/Lucene.Net.QueryParser/project.json
+++ b/src/Lucene.Net.QueryParser/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Analysis.Common": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Sandbox/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Sandbox/project.json b/src/Lucene.Net.Sandbox/project.json
index ec481f8..1e554ac 100644
--- a/src/Lucene.Net.Sandbox/project.json
+++ b/src/Lucene.Net.Sandbox/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net": "4.8.0"

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Spatial/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Spatial/project.json b/src/Lucene.Net.Spatial/project.json
index a012c36..ead4ac8 100644
--- a/src/Lucene.Net.Spatial/project.json
+++ b/src/Lucene.Net.Spatial/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Queries": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net.Suggest/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Suggest/project.json b/src/Lucene.Net.Suggest/project.json
index 157ade9..c177678 100644
--- a/src/Lucene.Net.Suggest/project.json
+++ b/src/Lucene.Net.Suggest/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "dependencies": {
     "Lucene.Net.Analysis.Common": "4.8.0",

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6bde1efb/src/Lucene.Net/project.json
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/project.json b/src/Lucene.Net/project.json
index 1020317..9a38729 100644
--- a/src/Lucene.Net/project.json
+++ b/src/Lucene.Net/project.json
@@ -14,7 +14,8 @@
   "buildOptions": {
     "compile": {
       "includeFiles": [ "../CommonAssemblyInfo.cs" ]
-    }
+    },
+    "nowarn": [ "1591", "1573" ]
   },
   "frameworks": {
     "netstandard1.5": {


[29/48] lucenenet git commit: Lucene.Net.Support: Fixed XML documentation comment warnings (except types that are not yet documented).

Posted by ni...@apache.org.
Lucene.Net.Support: Fixed XML documentation comment warnings (except types that are not yet documented).


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/7303348a
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/7303348a
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/7303348a

Branch: refs/heads/master
Commit: 7303348a1d4b915e91ee02a1d28ec63100c61e72
Parents: 268e78d
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 23:40:19 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Mon Jun 5 06:16:25 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  7 +++---
 src/Lucene.Net/Support/Arrays.cs                |  2 +-
 src/Lucene.Net/Support/BitArrayExtensions.cs    |  2 +-
 src/Lucene.Net/Support/Character.cs             |  6 ++---
 src/Lucene.Net/Support/EquatableList.cs         | 13 +++++-----
 src/Lucene.Net/Support/EquatableSet.cs          | 15 +++++------
 src/Lucene.Net/Support/IO/ByteBuffer.cs         | 26 ++++++++++----------
 src/Lucene.Net/Support/IO/DataInputStream.cs    |  2 +-
 src/Lucene.Net/Support/IO/DataOutputStream.cs   |  2 +-
 src/Lucene.Net/Support/IO/FileSupport.cs        |  2 +-
 .../Support/IndexWriterConfigExtensions.cs      |  2 +-
 src/Lucene.Net/Support/ListExtensions.cs        |  3 ++-
 src/Lucene.Net/Support/PriorityQueue.cs         | 10 +++-----
 src/Lucene.Net/Support/StringExtensions.cs      |  2 +-
 14 files changed, 47 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9fb8f3c..0f04d03 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,10 +52,9 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Support (namespace)
-   3. Util.Automaton (namespace)
-   4. Util.Mutable (namespace)
-   5. Util.Packed (namespace)
+   2. Util.Automaton (namespace)
+   3. Util.Mutable (namespace)
+   4. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
 
 See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/Arrays.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Arrays.cs b/src/Lucene.Net/Support/Arrays.cs
index 01f179a..1d7a6c7 100644
--- a/src/Lucene.Net/Support/Arrays.cs
+++ b/src/Lucene.Net/Support/Arrays.cs
@@ -107,7 +107,7 @@ namespace Lucene.Net.Support
         /// the same order. Also, two array references are considered equal if
         /// both are null.
         /// <para/>
-        /// Note that if the type of <paramref name="T"/> is a <see cref="IDictionary{TKey, TValue}"/>,
+        /// Note that if the type of <typeparam name="T"/> is a <see cref="IDictionary{TKey, TValue}"/>,
         /// <see cref="IList{T}"/>, or <see cref="ISet{T}"/>, its values and any nested collection values
         /// will be compared for equality as well.
         /// </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/BitArrayExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/BitArrayExtensions.cs b/src/Lucene.Net/Support/BitArrayExtensions.cs
index 70bde6b..21e322d 100644
--- a/src/Lucene.Net/Support/BitArrayExtensions.cs
+++ b/src/Lucene.Net/Support/BitArrayExtensions.cs
@@ -169,7 +169,7 @@ namespace Lucene.Net.Support
         }
 
         /// <summary>
-        /// Sets the bit at the given <paramref name="index"/> to true.
+        /// Sets the bit at the given index range to true.
         /// </summary>
         /// <param name="bits">The BitArray object.</param>
         /// <param name="fromIndex">The start of the range to set(inclusive)</param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/Character.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/Character.cs b/src/Lucene.Net/Support/Character.cs
index 03192f4..81fdffb 100644
--- a/src/Lucene.Net/Support/Character.cs
+++ b/src/Lucene.Net/Support/Character.cs
@@ -378,7 +378,7 @@ namespace Lucene.Net.Support
         }
 
         /// <summary>
-        /// LUCENENET safe way to get unicode category. The .NET <seealso cref="char.ConvertFromUtf32(int)"/>
+        /// LUCENENET safe way to get unicode category. The .NET <see cref="char.ConvertFromUtf32(int)"/>
         /// method should be used first to be safe for surrogate pairs. However, if the value falls between
         /// 0x00d800 and 0x00dfff, that method throws an exception. So this is a wrapper that converts the
         /// codepoint to a char in those cases.
@@ -386,8 +386,8 @@ namespace Lucene.Net.Support
         /// This mimics the behavior of the Java Character.GetType class, but returns the .NET UnicodeCategory
         /// enumeration for easy consumption.
         /// </summary>
-        /// <param name="c"></param>
-        /// <returns></returns>
+        /// <param name="codePoint"></param>
+        /// <returns> A <see cref="UnicodeCategory"/> representing the <paramref name="codePoint"/>. </returns>
         public static UnicodeCategory GetType(int codePoint)
         {
             if ((codePoint >= 0x00d800) && (codePoint <= 0x00dfff))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/EquatableList.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/EquatableList.cs b/src/Lucene.Net/Support/EquatableList.cs
index 422b61a..7dcc15d 100644
--- a/src/Lucene.Net/Support/EquatableList.cs
+++ b/src/Lucene.Net/Support/EquatableList.cs
@@ -58,7 +58,7 @@ namespace Lucene.Net.Support
         /// <see cref="EquatableList{T}.EquatableList(IEnumerable{T})"/> overload). 
         /// <para/>
         /// The internal <paramref name="collection"/> is used for
-        /// all operations except for <see cref="Equals()"/>, <see cref="GetHashCode()"/>,
+        /// all operations except for <see cref="Equals(object)"/>, <see cref="GetHashCode()"/>,
         /// and <see cref="ToString()"/>, which are all based on deep analysis
         /// of this collection and any nested collections.
         /// </summary>
@@ -85,7 +85,8 @@ namespace Lucene.Net.Support
         /// Initializes a new 
         /// instance of the <see cref="EquatableList{T}"/>
         /// class that contains elements copied from the specified collection and has
-        /// sufficient capacity to accommodate the number of elements copied. 
+        /// sufficient capacity to accommodate the number of elements copied.
+        /// </summary>
         /// <param name="collection">The collection whose elements are copied to the new list.</param>
         public EquatableList(IEnumerable<T> collection)
         {
@@ -338,17 +339,17 @@ namespace Lucene.Net.Support
         /// Therefore, <see cref="EquatableList{T}"/> can equal any <see cref="IList{T}"/>
         /// with the exact same values in the same order.
         /// </summary>
-        /// <param name="obj">The other object
+        /// <param name="other">The other object
         /// to compare against.</param>
         /// <returns><c>true</c> if the sequence in <paramref name="other"/>
         /// is the same as this one.</returns>
-        public override bool Equals(object obj)
+        public override bool Equals(object other)
         {
-            if (!(obj is IList<T>))
+            if (!(other is IList<T>))
             {
                 return false;
             }
-            return this.Equals(obj as IList<T>);
+            return this.Equals(other as IList<T>);
         }
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/EquatableSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/EquatableSet.cs b/src/Lucene.Net/Support/EquatableSet.cs
index a209fdd..d5efcb8 100644
--- a/src/Lucene.Net/Support/EquatableSet.cs
+++ b/src/Lucene.Net/Support/EquatableSet.cs
@@ -58,7 +58,7 @@ namespace Lucene.Net.Support
         /// <see cref="EquatableSet{T}.EquatableSet(ICollection{T})"/> overload). 
         /// <para/>
         /// The internal <paramref name="collection"/> is used for
-        /// all operations except for <see cref="Equals()"/>, <see cref="GetHashCode()"/>,
+        /// all operations except for <see cref="Equals(object)"/>, <see cref="GetHashCode()"/>,
         /// and <see cref="ToString()"/>, which are all based on deep analysis
         /// of this collection and any nested collections.
         /// </summary>
@@ -85,7 +85,8 @@ namespace Lucene.Net.Support
         /// Initializes a new 
         /// instance of the <see cref="EquatableSet{T}"/>
         /// class that contains elements copied from the specified collection and has
-        /// sufficient capacity to accommodate the number of elements copied. 
+        /// sufficient capacity to accommodate the number of elements copied.
+        /// </summary>
         /// <param name="collection">The collection whose elements are copied to the new set.</param>
         public EquatableSet(ICollection<T> collection)
         {
@@ -380,7 +381,7 @@ namespace Lucene.Net.Support
         /// Therefore, <see cref="EquatableSet{T}"/> can equal any <see cref="ISet{T}"/>
         /// with the exact same values (in any order).
         /// </summary>
-        /// <param name="obj">The other object
+        /// <param name="other">The other object
         /// to compare against.</param>
         /// <returns><c>true</c> if the sequence in <paramref name="other"/>
         /// is the same as this one.</returns>
@@ -417,17 +418,17 @@ namespace Lucene.Net.Support
         /// Therefore, <see cref="EquatableSet{T}"/> can equal any <see cref="ISet{T}"/>
         /// with the exact same values (in any order).
         /// </summary>
-        /// <param name="obj">The other object
+        /// <param name="other">The other object
         /// to compare against.</param>
         /// <returns><c>true</c> if the sequence in <paramref name="other"/>
         /// is the same as this one.</returns>
-        public override bool Equals(object obj)
+        public override bool Equals(object other)
         {
-            if (!(obj is ISet<T>))
+            if (!(other is ISet<T>))
             {
                 return false;
             }
-            return Equals(obj as ISet<T>);
+            return Equals(other as ISet<T>);
         }
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/IO/ByteBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/ByteBuffer.cs b/src/Lucene.Net/Support/IO/ByteBuffer.cs
index 709d1aa..f2056f2 100644
--- a/src/Lucene.Net/Support/IO/ByteBuffer.cs
+++ b/src/Lucene.Net/Support/IO/ByteBuffer.cs
@@ -194,7 +194,7 @@ namespace Lucene.Net.Support.IO
         /// <para/>
         /// The remaining bytes will be moved to the head of the
         /// buffer, starting from position zero. Then the position is set to
-        /// <see cref="Remaining"/>; the limit is set to capacity; the mark is
+        /// <see cref="Buffer.Remaining"/>; the limit is set to capacity; the mark is
         /// cleared.
         /// </summary>
         /// <returns>this buffer.</returns>
@@ -303,7 +303,7 @@ namespace Lucene.Net.Support.IO
         /// </summary>
         /// <param name="dest">the destination byte array.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="BufferUnderflowException">if <c>dest.Length</c> is greater than <see cref="Remaining"/>.</exception>
+        /// <exception cref="BufferUnderflowException">if <c>dest.Length</c> is greater than <see cref="Buffer.Remaining"/>.</exception>
         public virtual ByteBuffer Get(byte[] dest)
         {
             return Get(dest, 0, dest.Length);
@@ -324,7 +324,7 @@ namespace Lucene.Net.Support.IO
         /// </param>
         /// <returns>this buffer.</returns>
         /// <exception cref="IndexOutOfRangeException">if either <paramref name="off"/> or <paramref name="len"/> is invalid.</exception>
-        /// <exception cref="BufferUnderflowException">if <paramref name="len"/> is greater than <see cref="Remaining"/>.</exception>
+        /// <exception cref="BufferUnderflowException">if <paramref name="len"/> is greater than <see cref="Buffer.Remaining"/>.</exception>
         public virtual ByteBuffer Get(byte[] dest, int off, int len)
         {
             int length = dest.Length;
@@ -602,7 +602,7 @@ namespace Lucene.Net.Support.IO
         /// </summary>
         /// <param name="src">the source byte array.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="BufferOverflowException">if <see cref="Remaining"/> is less than <c>src.Length</c>.</exception>
+        /// <exception cref="BufferOverflowException">if <see cref="Buffer.Remaining"/> is less than <c>src.Length</c>.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public ByteBuffer Put(byte[] src)
         {
@@ -624,7 +624,7 @@ namespace Lucene.Net.Support.IO
         /// greater than <c>src.Length - off</c>.
         /// </param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="BufferOverflowException">if <see cref="Remaining"/> is less than <paramref name="len"/>.</exception>
+        /// <exception cref="BufferOverflowException">if <see cref="Buffer.Remaining"/> is less than <paramref name="len"/>.</exception>
         /// <exception cref="IndexOutOfRangeException">if either <paramref name="off"/> or <paramref name="len"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public virtual ByteBuffer Put(byte[] src, int off, int len)
@@ -653,7 +653,7 @@ namespace Lucene.Net.Support.IO
         /// </summary>
         /// <param name="src">the source byte buffer.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="BufferOverflowException">if <c>src.Remaining</c> is greater than this buffer's <see cref="Remaining"/>.</exception>
+        /// <exception cref="BufferOverflowException">if <c>src.Remaining</c> is greater than this buffer's <see cref="Buffer.Remaining"/>.</exception>
         /// <exception cref="ArgumentException">if <paramref name="src"/> is this buffer.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public virtual ByteBuffer Put(ByteBuffer src)
@@ -704,7 +704,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 2</c>.</param>
         /// <param name="value">the <see cref="char"/> to write.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutChar(int index, char value);
 
@@ -729,7 +729,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 8</c>.</param>
         /// <param name="value">the <see cref="double"/> to write.</param>
         /// <returns></returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutDouble(int index, double value);
 
@@ -758,7 +758,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 4</c>.</param>
         /// <param name="value">the <see cref="float"/> to write.</param>
         /// <returns></returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutSingle(int index, float value);
 
@@ -787,7 +787,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 4</c>.</param>
         /// <param name="value">the <see cref="int"/> to write.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutInt32(int index, int value);
 
@@ -816,7 +816,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 8</c>.</param>
         /// <param name="value">the <see cref="long"/> to write.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutInt64(int index, long value);
 
@@ -845,7 +845,7 @@ namespace Lucene.Net.Support.IO
         /// <param name="index">the index, must not be negative and equal or less than <c>limit - 2</c>.</param>
         /// <param name="value">the <see cref="short"/> to write.</param>
         /// <returns>this buffer.</returns>
-        /// <exception cref="IndexOutOfRangeException">if <paramref name=""index/> is invalid.</exception>
+        /// <exception cref="IndexOutOfRangeException">if <paramref name="index"/> is invalid.</exception>
         /// <exception cref="ReadOnlyBufferException">if no changes may be made to the contents of this buffer.</exception>
         public abstract ByteBuffer PutInt16(int index, short value);
 
@@ -853,7 +853,7 @@ namespace Lucene.Net.Support.IO
         /// Returns a sliced buffer that shares its content with this buffer.
         /// <para/>
         /// The sliced buffer's capacity will be this buffer's
-        /// <see cref="Remaining"/>, and it's zero position will correspond to
+        /// <see cref="Buffer.Remaining"/>, and it's zero position will correspond to
         /// this buffer's current position. The new buffer's position will be 0,
         /// limit will be its capacity, and its mark is cleared. The new buffer's
         /// read-only property and byte order are the same as this buffer's.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/IO/DataInputStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/DataInputStream.cs b/src/Lucene.Net/Support/IO/DataInputStream.cs
index 8054746..3a7dbdf 100644
--- a/src/Lucene.Net/Support/IO/DataInputStream.cs
+++ b/src/Lucene.Net/Support/IO/DataInputStream.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Support.IO
     /// This is a port of DataInputStream that is fully compatible with Java's DataOutputStream.
     /// <para>
     /// Usage Note: Always favor BinaryReader over DataInputStream unless you specifically need
-    /// the modified UTF-8 format and/or the <see cref="ReadUTF(IDataInput)"/> method.
+    /// the modified UTF-8 format and/or the <see cref="ReadUTF()"/> or <see cref="DecodeUTF(int)"/> method.
     /// </para>
     /// </summary>
     public class DataInputStream : IDataInput, IDisposable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/IO/DataOutputStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/DataOutputStream.cs b/src/Lucene.Net/Support/IO/DataOutputStream.cs
index 13515da..3680347 100644
--- a/src/Lucene.Net/Support/IO/DataOutputStream.cs
+++ b/src/Lucene.Net/Support/IO/DataOutputStream.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Support.IO
     /// This is a port of DataOutputStream that is fully compatible with Java's DataInputStream.
     /// <para>
     /// Usage Note: Always favor BinaryWriter over DataOutputStream unless you specifically need
-    /// the modified UTF-8 format and/or the <see cref="WriteUTF(IDataOutput)"/> method.
+    /// the modified UTF-8 format and/or the <see cref="WriteUTF(string)"/> method.
     /// </para>
     /// </summary>
     public class DataOutputStream : IDataOutput, IDisposable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/IO/FileSupport.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IO/FileSupport.cs b/src/Lucene.Net/Support/IO/FileSupport.cs
index 90f470a..4724b26 100644
--- a/src/Lucene.Net/Support/IO/FileSupport.cs
+++ b/src/Lucene.Net/Support/IO/FileSupport.cs
@@ -208,7 +208,7 @@ namespace Lucene.Net.Support.IO
 
         /// <summary>
         /// Generates a new random file name with the provided <paramref name="directory"/>, 
-        /// <paramref name="prefix"/> and optional <see cref="suffix"/>.
+        /// <paramref name="prefix"/> and optional <paramref name="suffix"/>.
         /// </summary>
         /// <param name="prefix">The prefix string to be used in generating the file's name</param>
         /// <param name="suffix">The suffix string to be used in generating the file's name; may be null, in which case a random suffix will be generated</param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/IndexWriterConfigExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/IndexWriterConfigExtensions.cs b/src/Lucene.Net/Support/IndexWriterConfigExtensions.cs
index 0311eef..522cd8c 100644
--- a/src/Lucene.Net/Support/IndexWriterConfigExtensions.cs
+++ b/src/Lucene.Net/Support/IndexWriterConfigExtensions.cs
@@ -91,7 +91,7 @@ namespace Lucene.Net.Support
         }
 
         /// <summary>
-        /// Builder method for <see cref="LiveLiveIndexWriterConfig.ReaderTermsIndexDivisor"/>.
+        /// Builder method for <see cref="LiveIndexWriterConfig.ReaderTermsIndexDivisor"/>.
         /// </summary>
         /// <param name="config">this <see cref="LiveIndexWriterConfig"/> instance</param>
         /// <param name="divisor"></param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/ListExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/ListExtensions.cs b/src/Lucene.Net/Support/ListExtensions.cs
index 647f386..86fdddf 100644
--- a/src/Lucene.Net/Support/ListExtensions.cs
+++ b/src/Lucene.Net/Support/ListExtensions.cs
@@ -59,7 +59,7 @@ namespace Lucene.Net.Support
 
         /// <summary>
         /// If the underlying type is <see cref="List{T}"/>,
-        /// calls <see cref="List{T}.Sort"/>. If not, 
+        /// calls <see cref="List{T}.Sort()"/>. If not, 
         /// uses <see cref="Util.CollectionUtil.TimSort{T}(IList{T})"/>
         /// </summary>
         /// <typeparam name="T"></typeparam>
@@ -135,6 +135,7 @@ namespace Lucene.Net.Support
             Util.CollectionUtil.TimSort(list, comparer);
         }
 
+        /// <summary>
         /// Sorts the given <see cref="IList{T}"/> using the <see cref="IComparer{T}"/>.
         /// This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small lists. 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/PriorityQueue.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/PriorityQueue.cs b/src/Lucene.Net/Support/PriorityQueue.cs
index 89d56e6..86b97bc 100644
--- a/src/Lucene.Net/Support/PriorityQueue.cs
+++ b/src/Lucene.Net/Support/PriorityQueue.cs
@@ -187,9 +187,8 @@ namespace Lucene.Net.Support
         /// cannot be compared to one another according to <paramref name="collection"/>'s
         /// ordering
         /// </exception>
-        /// <see cref="ArgumentNullException">if the specified collection or any
-        /// of its elements are null</see>
-        /// </summary>
+        /// <exception cref="ArgumentNullException">if the specified collection or any
+        /// of its elements are null</exception>
         public PriorityQueue(SortedSet<T> collection)
         {
             if (collection == null)
@@ -215,9 +214,8 @@ namespace Lucene.Net.Support
         /// cannot be compared to one another according to <paramref name="collection"/>'s
         /// ordering
         /// </exception>
-        /// <see cref="ArgumentNullException">if the specified collection or any
-        /// of its elements are null</see>
-        /// </summary>
+        /// <exception cref="ArgumentNullException">if the specified collection or any
+        /// of its elements are null</exception>
         public PriorityQueue(TreeSet<T> collection)
         {
             if (collection == null)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7303348a/src/Lucene.Net/Support/StringExtensions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Support/StringExtensions.cs b/src/Lucene.Net/Support/StringExtensions.cs
index e8513f9..3828a72 100644
--- a/src/Lucene.Net/Support/StringExtensions.cs
+++ b/src/Lucene.Net/Support/StringExtensions.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Support
         /// </list>
         /// This method is a convenience to replace the .NET CompareTo method 
         /// on all strings, provided the logic does not expect specific values
-        /// but is simply comparing them with <code>></code> or <code><</code>.
+        /// but is simply comparing them with <c>&gt;</c> or <c>&lt;</c>.
         /// </summary>
         /// <param name="str"></param>
         /// <param name="value">The string to compare with.</param>


[11/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DisjunctionMaxQuery.cs b/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
index ccc2731..dab4dea 100644
--- a/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
+++ b/src/Lucene.Net/Search/DisjunctionMaxQuery.cs
@@ -34,11 +34,13 @@ namespace Lucene.Net.Search
     /// score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
     /// this is useful when searching for a word in multiple fields with different boost factors (so that the fields cannot be
     /// combined equivalently into a single search field).  We want the primary score to be the one associated with the highest boost,
-    /// not the sum of the field scores (as BooleanQuery would give).
+    /// not the sum of the field scores (as <see cref="BooleanQuery"/> would give).
+    /// <para/>
     /// If the query is "albino elephant" this ensures that "albino" matching one field and "elephant" matching
     /// another gets a higher score than "albino" matching both fields.
-    /// To get this result, use both BooleanQuery and DisjunctionMaxQuery:  for each term a DisjunctionMaxQuery searches for it in
-    /// each field, while the set of these DisjunctionMaxQuery's is combined into a BooleanQuery.
+    /// <para/>
+    /// To get this result, use both <see cref="BooleanQuery"/> and <see cref="DisjunctionMaxQuery"/>:  for each term a <see cref="DisjunctionMaxQuery"/> searches for it in
+    /// each field, while the set of these <see cref="DisjunctionMaxQuery"/>'s is combined into a <see cref="BooleanQuery"/>.
     /// The tie breaker capability allows results that include the same term in multiple fields to be judged better than results that
     /// include this term in only the best of those multiple fields, without confusing this with the better case of two different terms
     /// in the multiple fields.
@@ -48,27 +50,31 @@ namespace Lucene.Net.Search
 #endif
     public class DisjunctionMaxQuery : Query, IEnumerable<Query>
     {
-        /* The subqueries */
+        /// <summary>
+        /// The subqueries
+        /// </summary>
         private EquatableList<Query> disjuncts = new EquatableList<Query>();
 
-        /* Multiple of the non-max disjunct scores added into our final score.  Non-zero values support tie-breaking. */
+        /// <summary>
+        /// Multiple of the non-max disjunct scores added into our final score.  Non-zero values support tie-breaking.
+        /// </summary>
         private float tieBreakerMultiplier = 0.0f;
 
         /// <summary>
-        /// Creates a new empty DisjunctionMaxQuery.  Use add() to add the subqueries. </summary>
-        /// <param name="tieBreakerMultiplier"> the score of each non-maximum disjunct for a document is multiplied by this weight
+        /// Creates a new empty <see cref="DisjunctionMaxQuery"/>.  Use <see cref="Add(Query)"/> to add the subqueries. </summary>
+        /// <param name="tieBreakerMultiplier"> The score of each non-maximum disjunct for a document is multiplied by this weight
         ///        and added into the final score.  If non-zero, the value should be small, on the order of 0.1, which says that
         ///        10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique
-        ///        word in the lower scored field (i.e., one that is not in any higher scored field. </param>
+        ///        word in the lower scored field (i.e., one that is not in any higher scored field). </param>
         public DisjunctionMaxQuery(float tieBreakerMultiplier)
         {
             this.tieBreakerMultiplier = tieBreakerMultiplier;
         }
 
         /// <summary>
-        /// Creates a new DisjunctionMaxQuery </summary>
-        /// <param name="disjuncts"> a {@code Collection<Query>} of all the disjuncts to add </param>
-        /// <param name="tieBreakerMultiplier">   the weight to give to each matching non-maximum disjunct </param>
+        /// Creates a new <see cref="DisjunctionMaxQuery"/> </summary>
+        /// <param name="disjuncts"> A <see cref="T:ICollection{Query}"/> of all the disjuncts to add </param>
+        /// <param name="tieBreakerMultiplier"> The weight to give to each matching non-maximum disjunct </param>
         public DisjunctionMaxQuery(ICollection<Query> disjuncts, float tieBreakerMultiplier)
         {
             this.tieBreakerMultiplier = tieBreakerMultiplier;
@@ -77,7 +83,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Add a subquery to this disjunction </summary>
-        /// <param name="query"> the disjunct added </param>
+        /// <param name="query"> The disjunct added </param>
         public virtual void Add(Query query)
         {
             disjuncts.Add(query);
@@ -85,14 +91,14 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Add a collection of disjuncts to this disjunction
-        /// via {@code Iterable<Query>} </summary>
-        /// <param name="disjuncts"> a collection of queries to add as disjuncts. </param>
-        public virtual void Add(ICollection<Query> disjuncts)
+        /// via <see cref="T:ICollection{Query}"/> </summary>
+        /// <param name="disjuncts"> A collection of queries to add as disjuncts. </param>
+        public virtual void Add(ICollection<Query> disjuncts) // LUCENENET TODO: API: change back to IEnumerable<Query>. Rename AddRange?
         {
             this.disjuncts.AddRange(disjuncts);
         }
 
-        /// <returns> An {@code Iterator<Query>} over the disjuncts </returns>
+        /// <returns> An <see cref="T:IEnumerator{Query}"/> over the disjuncts </returns>
         public virtual IEnumerator<Query> GetEnumerator()
         {
             return disjuncts.GetEnumerator();
@@ -103,7 +109,7 @@ namespace Lucene.Net.Search
             return GetEnumerator();
         }
 
-        /// <returns> the disjuncts. </returns>
+        /// <returns> The disjuncts. </returns>
         public virtual IList<Query> Disjuncts
         {
             get
@@ -112,7 +118,7 @@ namespace Lucene.Net.Search
             }
         }
 
-        /// <returns> tie breaker value for multiple matches. </returns>
+        /// <returns> Tie breaker value for multiple matches. </returns>
         public virtual float TieBreakerMultiplier
         {
             get
@@ -125,8 +131,8 @@ namespace Lucene.Net.Search
         /// Expert: the Weight for DisjunctionMaxQuery, used to
         /// normalize, score and explain these queries.
         ///
-        /// <p>NOTE: this API and implementation is subject to
-        /// change suddenly in the next release.</p>
+        /// <para>NOTE: this API and implementation is subject to
+        /// change suddenly in the next release.</para>
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -136,11 +142,11 @@ namespace Lucene.Net.Search
             private readonly DisjunctionMaxQuery outerInstance;
 
             /// <summary>
-            /// The Weights for our subqueries, in 1-1 correspondence with disjuncts </summary>
+            /// The <see cref="Weight"/>s for our subqueries, in 1-1 correspondence with disjuncts </summary>
             protected List<Weight> m_weights = new List<Weight>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
 
             /// <summary>
-            /// Construct the Weight for this Query searched by searcher.  Recursively construct subquery weights. </summary>
+            /// Construct the <see cref="Weight"/> for this <see cref="Search.Query"/> searched by <paramref name="searcher"/>.  Recursively construct subquery weights. </summary>
             public DisjunctionMaxWeight(DisjunctionMaxQuery outerInstance, IndexSearcher searcher)
             {
                 this.outerInstance = outerInstance;
@@ -151,17 +157,17 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Return our associated DisjunctionMaxQuery </summary>
+            /// Return our associated <see cref="DisjunctionMaxQuery"/> </summary>
             public override Query Query
             {
                 get
-                /// <summary>
-                /// Compute the sub of squared weights of us applied to our subqueries.  Used for normalization. </summary>
                 {
                     return outerInstance;
                 }
             }
 
+            /// <summary>
+            /// Compute the sub of squared weights of us applied to our subqueries.  Used for normalization. </summary>
             public override float GetValueForNormalization()
             {
                 float max = 0.0f, sum = 0.0f;
@@ -187,7 +193,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Create the scorer used to score our associated DisjunctionMaxQuery </summary>
+            /// Create the scorer used to score our associated <see cref="DisjunctionMaxQuery"/> </summary>
             public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs)
             {
                 IList<Scorer> scorers = new List<Scorer>();
@@ -237,7 +243,7 @@ namespace Lucene.Net.Search
         } // end of DisjunctionMaxWeight inner class
 
         /// <summary>
-        /// Create the Weight used to score us </summary>
+        /// Create the <see cref="Weight"/> used to score us </summary>
         public override Weight CreateWeight(IndexSearcher searcher)
         {
             return new DisjunctionMaxWeight(this, searcher);
@@ -245,8 +251,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Optimize our representation and our subqueries representations </summary>
-        /// <param name="reader"> the IndexReader we query </param>
-        /// <returns> an optimized copy of us (which may not be a copy if there is nothing to optimize)  </returns>
+        /// <param name="reader"> The <see cref="IndexReader"/> we query </param>
+        /// <returns> An optimized copy of us (which may not be a copy if there is nothing to optimize)  </returns>
         public override Query Rewrite(IndexReader reader)
         {
             int numDisjunctions = disjuncts.Count;
@@ -290,7 +296,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Create a shallow copy of us -- used in rewriting if necessary </summary>
-        /// <returns> a copy of us (but reuse, don't copy, our subqueries)  </returns>
+        /// <returns> A copy of us (but reuse, don't copy, our subqueries)  </returns>
         public override object Clone()
         {
             DisjunctionMaxQuery clone = (DisjunctionMaxQuery)base.Clone();
@@ -298,7 +304,11 @@ namespace Lucene.Net.Search
             return clone;
         }
 
-        // inherit javadoc
+        /// <summary>
+        /// Expert: adds all terms occurring in this query to the terms set. Only
+        /// works if this query is in its rewritten (<see cref="Rewrite(IndexReader)"/>) form.
+        /// </summary>
+        /// <exception cref="InvalidOperationException"> If this query is not yet rewritten </exception>
         public override void ExtractTerms(ISet<Term> terms)
         {
             foreach (Query query in disjuncts)
@@ -309,8 +319,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Prettyprint us. </summary>
-        /// <param name="field"> the field to which we are applied </param>
-        /// <returns> a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost" </returns>
+        /// <param name="field"> The field to which we are applied </param>
+        /// <returns> A string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost" </returns>
         public override string ToString(string field)
         {
             StringBuilder buffer = new StringBuilder();
@@ -349,9 +359,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Return true iff we represent the same query as o </summary>
-        /// <param name="o"> another object </param>
-        /// <returns> true iff o is a DisjunctionMaxQuery with the same boost and the same subqueries, in the same order, as us </returns>
+        /// Return <c>true</c> if we represent the same query as <paramref name="o"/> </summary>
+        /// <param name="o"> Another object </param>
+        /// <returns> <c>true</c> if <paramref name="o"/> is a <see cref="DisjunctionMaxQuery"/> with the same boost and the same subqueries, in the same order, as us </returns>
         public override bool Equals(object o)
         {
             if (!(o is DisjunctionMaxQuery))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DisjunctionMaxScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DisjunctionMaxScorer.cs b/src/Lucene.Net/Search/DisjunctionMaxScorer.cs
index d915790..45bddea 100644
--- a/src/Lucene.Net/Search/DisjunctionMaxScorer.cs
+++ b/src/Lucene.Net/Search/DisjunctionMaxScorer.cs
@@ -20,9 +20,9 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// The Scorer for DisjunctionMaxQuery.  The union of all documents generated by the the subquery scorers
+    /// The <see cref="Scorer"/> for <see cref="DisjunctionMaxQuery"/>.  The union of all documents generated by the the subquery scorers
     /// is generated in document number order.  The score for each document is the maximum of the scores computed
-    /// by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores
+    /// by the subquery scorers that generate that document, plus <see cref="tieBreakerMultiplier"/> times the sum of the scores
     /// for the other subqueries that generate the document.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -30,24 +30,24 @@ namespace Lucene.Net.Search
 #endif
     internal class DisjunctionMaxScorer : DisjunctionScorer
     {
-        /* Multiplier applied to non-maximum-scoring subqueries for a document as they are summed into the result. */
+        /// <summary>Multiplier applied to non-maximum-scoring subqueries for a document as they are summed into the result.</summary>
         private readonly float tieBreakerMultiplier;
         private int freq = -1;
 
-        /* Used when scoring currently matching doc. */
+        /// <summary>Used when scoring currently matching doc.</summary>
         private float scoreSum;
         private float scoreMax;
 
         /// <summary>
-        /// Creates a new instance of DisjunctionMaxScorer
+        /// Creates a new instance of <see cref="DisjunctionMaxScorer"/>
         /// </summary>
         /// <param name="weight">
-        ///          The Weight to be used. </param>
+        ///          The <see cref="Weight"/> to be used. </param>
         /// <param name="tieBreakerMultiplier">
         ///          Multiplier applied to non-maximum-scoring subqueries for a
         ///          document as they are summed into the result. </param>
         /// <param name="subScorers">
-        ///          The sub scorers this Scorer should iterate on </param>
+        ///          The sub scorers this <see cref="Scorer"/> should iterate on </param>
         public DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers)
             : base(weight, subScorers)
         {
@@ -55,8 +55,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Determine the current document score.  Initially invalid, until <seealso cref="#nextDoc()"/> is called the first time. </summary>
-        /// <returns> the score of the current generated document </returns>
+        /// Determine the current document score.  Initially invalid, until <see cref="DocIdSetIterator.NextDoc()"/> is called the first time. </summary>
+        /// <returns> The score of the current generated document </returns>
         public override float GetScore()
         {
             return scoreMax + (scoreSum - scoreMax) * tieBreakerMultiplier;
@@ -74,7 +74,9 @@ namespace Lucene.Net.Search
             }
         }
 
-        // Recursively iterate all subScorers that generated last doc computing sum and max
+        /// <summary>
+        /// Recursively iterate all subScorers that generated last doc computing sum and max
+        /// </summary>
         private void ScoreAll(int root)
         {
             if (root < m_numScorers && m_subScorers[root].DocID == m_doc)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DisjunctionScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DisjunctionScorer.cs b/src/Lucene.Net/Search/DisjunctionScorer.cs
index 528c977..d35e073 100644
--- a/src/Lucene.Net/Search/DisjunctionScorer.cs
+++ b/src/Lucene.Net/Search/DisjunctionScorer.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Base class for Scorers that score disjunctions.
+    /// Base class for <see cref="Scorer"/>s that score disjunctions.
     /// Currently this just provides helper methods to manage the heap.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -107,7 +107,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Remove the root Scorer from subScorers and re-establish it as a heap
+        /// Remove the root <see cref="Scorer"/> from subScorers and re-establish it as a heap
         /// </summary>
         protected void HeapRemoveRoot()
         {
@@ -201,13 +201,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Called after next() or advance() land on a new document.
-        /// <p>
-        /// {@code subScorers[0]} will be positioned to the new docid,
-        /// which could be {@code NO_MORE_DOCS} (subclass must handle this).
-        /// <p>
-        /// implementations should assign {@code doc} appropriately, and do any
-        /// other work necessary to implement {@code score()} and {@code freq()}
+        /// Called after <see cref="NextDoc()"/> or <see cref="Advance(int)"/> land on a new document.
+        /// <para/>
+        /// <c>subScorers[0]</c> will be positioned to the new docid,
+        /// which could be <c>NO_MORE_DOCS</c> (subclass must handle this).
+        /// <para/>
+        /// Implementations should assign <c>doc</c> appropriately, and do any
+        /// other work necessary to implement <see cref="Scorer.GetScore()"/> and <see cref="Index.DocsEnum.Freq"/>
         /// </summary>
         // TODO: make this less horrible
         protected abstract void AfterNext();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DisjunctionSumScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DisjunctionSumScorer.cs b/src/Lucene.Net/Search/DisjunctionSumScorer.cs
index 372ae9c..e7ece8e 100644
--- a/src/Lucene.Net/Search/DisjunctionSumScorer.cs
+++ b/src/Lucene.Net/Search/DisjunctionSumScorer.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
-    /// this Scorer implements <seealso cref="Scorer#advance(int)"/> and uses advance() on the given Scorers.
+    /// A <see cref="Scorer"/> for OR like queries, counterpart of <see cref="ConjunctionScorer"/>.
+    /// This <see cref="Scorer"/> implements <see cref="DocIdSetIterator.Advance(int)"/> and uses Advance() on the given <see cref="Scorer"/>s.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -36,7 +36,7 @@ namespace Lucene.Net.Search
         private readonly float[] coord;
 
         /// <summary>
-        /// Construct a <code>DisjunctionScorer</code>. </summary>
+        /// Construct a <see cref="DisjunctionScorer"/>. </summary>
         /// <param name="weight"> The weight to be used. </param>
         /// <param name="subScorers"> Array of at least two subscorers. </param>
         /// <param name="coord"> Table of coordination factors </param>
@@ -80,7 +80,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the score of the current document matching the query.
-        /// Initially invalid, until <seealso cref="#nextDoc()"/> is called the first time.
+        /// Initially invalid, until <see cref="DisjunctionScorer.NextDoc()"/> is called the first time.
         /// </summary>
         public override float GetScore()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DocIdSet.cs b/src/Lucene.Net/Search/DocIdSet.cs
index 154ab0a..7528f20 100644
--- a/src/Lucene.Net/Search/DocIdSet.cs
+++ b/src/Lucene.Net/Search/DocIdSet.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Search
     using IBits = Lucene.Net.Util.IBits;
 
     /// <summary>
-    /// A DocIdSet contains a set of doc ids. Implementing classes must
-    /// only implement <seealso cref="#iterator"/> to provide access to the set.
+    /// A <see cref="DocIdSet"/> contains a set of doc ids. Implementing classes must
+    /// only implement <see cref="GetIterator()"/> to provide access to the set.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -31,8 +31,8 @@ namespace Lucene.Net.Search
     public abstract class DocIdSet
     {
         /// <summary>
-        /// Provides a <seealso cref="DocIdSetIterator"/> to access the set.
-        /// this implementation can return <code>null</code> if there
+        /// Provides a <see cref="DocIdSetIterator"/> to access the set.
+        /// This implementation can return <c>null</c> if there
         /// are no docs that match.
         /// </summary>
         public abstract DocIdSetIterator GetIterator();
@@ -45,29 +45,29 @@ namespace Lucene.Net.Search
         // (down-low filtering using e.g. FixedBitSet)
 
         /// <summary>
-        /// Optionally provides a <seealso cref="GetBits"/> interface for random access
+        /// Optionally provides a <see cref="IBits"/> interface for random access
         /// to matching documents. </summary>
-        /// <returns> {@code null}, if this {@code DocIdSet} does not support random access.
-        /// In contrast to <see cref="GetIterator()"/>, a return value of {@code null}
+        /// <returns> <c>null</c>, if this <see cref="DocIdSet"/> does not support random access.
+        /// In contrast to <see cref="GetIterator()"/>, a return value of <c>null</c>
         /// <b>does not</b> imply that no documents match the filter!
         /// The default implementation does not provide random access, so you
-        /// only need to implement this method if your DocIdSet can
+        /// only need to implement this method if your <see cref="DocIdSet"/> can
         /// guarantee random access to every docid in O(1) time without
-        /// external disk access (as <seealso cref="GetBits"/> interface cannot throw
-        /// <seealso cref="IOException"/>). this is generally true for bit sets
-        /// like <seealso cref="Lucene.Net.Util.FixedBitSet"/>, which return
-        /// itself if they are used as {@code DocIdSet}. </returns>
+        /// external disk access (as <see cref="IBits"/> interface cannot throw
+        /// <see cref="System.IO.IOException"/>). This is generally true for bit sets
+        /// like <see cref="Lucene.Net.Util.FixedBitSet"/>, which return
+        /// itself if they are used as <see cref="DocIdSet"/>. </returns>
         public virtual IBits Bits // LUCENENET NOTE: This isn't a great candidate for a property, but it makes more sense to call this Bits than Bits(). GetBits() was already taken in the same context.
         {
             get { return null; }
         }
 
         /// <summary>
-        /// this method is a hint for <seealso cref="CachingWrapperFilter"/>, if this <code>DocIdSet</code>
+        /// This method is a hint for <see cref="CachingWrapperFilter"/>, if this <see cref="DocIdSet"/>
         /// should be cached without copying it. The default is to return
-        /// <code>false</code>. If you have an own <code>DocIdSet</code> implementation
+        /// <c>false</c>. If you have an own <see cref="DocIdSet"/> implementation
         /// that does its iteration very effective and fast without doing disk I/O,
-        /// override this method and return <code>true</code>.
+        /// override this property and return <c>true</c>.
         /// </summary>
         public virtual bool IsCacheable
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DocIdSetIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DocIdSetIterator.cs b/src/Lucene.Net/Search/DocIdSetIterator.cs
index cca215c..ca91594 100644
--- a/src/Lucene.Net/Search/DocIdSetIterator.cs
+++ b/src/Lucene.Net/Search/DocIdSetIterator.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// this abstract class defines methods to iterate over a set of non-decreasing
+    /// This abstract class defines methods to iterate over a set of non-decreasing
     /// doc ids. Note that this class assumes it iterates on doc Ids, and therefore
-    /// <seealso cref="#NO_MORE_DOCS"/> is set to {@value #NO_MORE_DOCS} in order to be used as
+    /// <see cref="NO_MORE_DOCS"/> is set to <see cref="int.MaxValue"/> in order to be used as
     /// a sentinel object. Implementations of this class are expected to consider
     /// <see cref="int.MaxValue"/> as an invalid value.
     /// </summary>
@@ -33,7 +33,7 @@ namespace Lucene.Net.Search
     public abstract class DocIdSetIterator
     {
         /// <summary>
-        /// An empty {@code DocIdSetIterator} instance </summary>
+        /// An empty <see cref="DocIdSetIterator"/> instance </summary>
         public static DocIdSetIterator GetEmpty()
         {
             return new DocIdSetIteratorAnonymousInnerClassHelper();
@@ -74,20 +74,20 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// When returned by <seealso cref="#nextDoc()"/>, <seealso cref="#advance(int)"/> and
-        /// <seealso cref="#docID()"/> it means there are no more docs in the iterator.
+        /// When returned by <see cref="NextDoc()"/>, <see cref="Advance(int)"/> and
+        /// <see cref="DocID()"/> it means there are no more docs in the iterator.
         /// </summary>
         public const int NO_MORE_DOCS = int.MaxValue;
 
         /// <summary>
         /// Returns the following:
-        /// <ul>
-        /// <li>-1 or <seealso cref="#NO_MORE_DOCS"/> if <seealso cref="#nextDoc()"/> or
-        /// <seealso cref="#advance(int)"/> were not called yet.
-        /// <li><seealso cref="#NO_MORE_DOCS"/> if the iterator has exhausted.
-        /// <li>Otherwise it should return the doc ID it is currently on.
-        /// </ul>
-        /// <p>
+        /// <list type="bullet">
+        /// <item><description>-1 or <see cref="NO_MORE_DOCS"/> if <see cref="NextDoc()"/> or
+        /// <seealso cref="Advance(int)"/> were not called yet.</description></item>
+        /// <item><description><see cref="NO_MORE_DOCS"/> if the iterator has exhausted.</description></item>
+        /// <item><description>Otherwise it should return the doc ID it is currently on.</description></item>
+        /// </list>
+        /// <para/>
         ///
         /// @since 2.9
         /// </summary>
@@ -95,12 +95,12 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Advances to the next document in the set and returns the doc it is
-        /// currently on, or <seealso cref="#NO_MORE_DOCS"/> if there are no more docs in the
-        /// set.<br>
+        /// currently on, or <see cref="NO_MORE_DOCS"/> if there are no more docs in the
+        /// set.
         ///
-        /// <b>NOTE:</b> after the iterator has exhausted you should not call this
+        /// <para/><b>NOTE:</b> after the iterator has exhausted you should not call this
         /// method, as it may result in unpredicted behavior.
-        ///
+        /// <para/>
         /// @since 2.9
         /// </summary>
         public abstract int NextDoc();
@@ -108,39 +108,41 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Advances to the first beyond the current whose document number is greater
         /// than or equal to <i>target</i>, and returns the document number itself.
-        /// Exhausts the iterator and returns <seealso cref="#NO_MORE_DOCS"/> if <i>target</i>
+        /// Exhausts the iterator and returns <see cref="NO_MORE_DOCS"/> if <i>target</i>
         /// is greater than the highest document number in the set.
-        /// <p>
+        /// <para/>
         /// The behavior of this method is <b>undefined</b> when called with
-        /// <code> target &lt;= current</code>, or after the iterator has exhausted.
+        /// <c> target &lt;= current</c>, or after the iterator has exhausted.
         /// Both cases may result in unpredicted behavior.
-        /// <p>
-        /// When <code> target &gt; current</code> it behaves as if written:
+        /// <para/>
+        /// When <c> target &gt; current</c> it behaves as if written:
         ///
-        /// <pre class="prettyprint">
-        /// int advance(int target) {
-        ///   int doc;
-        ///   while ((doc = nextDoc()) &lt; target) {
-        ///   }
-        ///   return doc;
+        /// <code>
+        /// int Advance(int target) 
+        /// {
+        ///     int doc;
+        ///     while ((doc = NextDoc()) &lt; target) 
+        ///     {
+        ///     }
+        ///     return doc;
         /// }
-        /// </pre>
+        /// </code>
         ///
         /// Some implementations are considerably more efficient than that.
-        /// <p>
-        /// <b>NOTE:</b> this method may be called with <seealso cref="#NO_MORE_DOCS"/> for
-        /// efficiency by some Scorers. If your implementation cannot efficiently
+        /// <para/>
+        /// <b>NOTE:</b> this method may be called with <see cref="NO_MORE_DOCS"/> for
+        /// efficiency by some <see cref="Scorer"/>s. If your implementation cannot efficiently
         /// determine that it should exhaust, it is recommended that you check for that
         /// value in each call to this method.
-        /// <p>
+        /// <para/>
         ///
         /// @since 2.9
         /// </summary>
         public abstract int Advance(int target);
 
         /// <summary>
-        /// Slow (linear) implementation of <seealso cref="#advance"/> relying on
-        ///  <seealso cref="#nextDoc()"/> to advance beyond the target position.
+        /// Slow (linear) implementation of <see cref="Advance(int)"/> relying on
+        /// <see cref="NextDoc()"/> to advance beyond the target position.
         /// </summary>
         protected internal int SlowAdvance(int target)
         {
@@ -154,9 +156,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the estimated cost of this <seealso cref="DocIdSetIterator"/>.
-        /// <p>
-        /// this is generally an upper bound of the number of documents this iterator
+        /// Returns the estimated cost of this <see cref="DocIdSetIterator"/>.
+        /// <para/>
+        /// This is generally an upper bound of the number of documents this iterator
         /// might match, but may be a rough heuristic, hardcoded value, or otherwise
         /// completely inaccurate.
         /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs b/src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs
index 5d59a29..b2830d3 100644
--- a/src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs
+++ b/src/Lucene.Net/Search/DocTermOrdsRangeFilter.cs
@@ -27,11 +27,11 @@ namespace Lucene.Net.Search
     using SortedSetDocValues = Lucene.Net.Index.SortedSetDocValues;
 
     /// <summary>
-    /// A range filter built on top of a cached multi-valued term field (in <seealso cref="IFieldCache"/>).
+    /// A range filter built on top of a cached multi-valued term field (in <see cref="IFieldCache"/>).
     ///
-    /// <p>Like <seealso cref="FieldCacheRangeFilter"/>, this is just a specialized range query versus
-    ///    using a TermRangeQuery with <seealso cref="DocTermOrdsRewriteMethod"/>: it will only do
-    ///    two ordinal to term lookups.</p>
+    /// <para>Like <see cref="FieldCacheRangeFilter"/>, this is just a specialized range query versus
+    ///    using a <see cref="TermRangeQuery"/> with <see cref="DocTermOrdsRewriteMethod"/>: it will only do
+    ///    two ordinal to term lookups.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -54,13 +54,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this method is implemented for each data type </summary>
+        /// This method is implemented for each data type </summary>
         public override abstract DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
 
         /// <summary>
-        /// Creates a BytesRef range filter using <seealso cref="IFieldCache#getTermsIndex"/>. this works with all
+        /// Creates a BytesRef range filter using <see cref="IFieldCache.GetTermsIndex(Index.AtomicReader, string, float)"/>. This works with all
         /// fields containing zero or one term in the field. The range can be half-open by setting one
-        /// of the values to <code>null</code>.
+        /// of the values to <c>null</c>.
         /// </summary>
         public static DocTermOrdsRangeFilter NewBytesRefRange(string field, BytesRef lowerVal, BytesRef upperVal, bool includeLower, bool includeUpper)
         {
@@ -232,14 +232,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public virtual bool IncludesLower
         {
             get { return includeLower; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public virtual bool IncludesUpper
         {
             get { return includeUpper; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/DocTermOrdsRewriteMethod.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/DocTermOrdsRewriteMethod.cs b/src/Lucene.Net/Search/DocTermOrdsRewriteMethod.cs
index 28f2326..77ddffa 100644
--- a/src/Lucene.Net/Search/DocTermOrdsRewriteMethod.cs
+++ b/src/Lucene.Net/Search/DocTermOrdsRewriteMethod.cs
@@ -31,9 +31,10 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// Rewrites MultiTermQueries into a filter, using DocTermOrds for term enumeration.
-    /// <p>
-    /// this can be used to perform these queries against an unindexed docvalues field.
+    /// Rewrites <see cref="MultiTermQuery"/>s into a filter, using DocTermOrds for term enumeration.
+    /// <para>
+    /// This can be used to perform these queries against an unindexed docvalues field.
+    /// </para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -56,7 +57,7 @@ namespace Lucene.Net.Search
             protected readonly MultiTermQuery m_query;
 
             /// <summary>
-            /// Wrap a <seealso cref="MultiTermQuery"/> as a Filter.
+            /// Wrap a <see cref="MultiTermQuery"/> as a <see cref="Filter"/>.
             /// </summary>
             protected internal MultiTermQueryDocTermOrdsWrapperFilter(MultiTermQuery query)
             {
@@ -102,7 +103,7 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Returns a DocIdSet with documents that should be permitted in search
+            /// Returns a <see cref="DocIdSet"/> with documents that should be permitted in search
             /// results.
             /// </summary>
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Explanation.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Explanation.cs b/src/Lucene.Net/Search/Explanation.cs
index 09e146a..f2c2de1 100644
--- a/src/Lucene.Net/Search/Explanation.cs
+++ b/src/Lucene.Net/Search/Explanation.cs
@@ -43,12 +43,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Indicates whether or not this Explanation models a good match.
+        /// Indicates whether or not this <see cref="Explanation"/> models a good match.
         ///
-        /// <p>
+        /// <para>
         /// By default, an Explanation represents a "match" if the value is positive.
-        /// </p> </summary>
-        /// <seealso cref= #getValue </seealso>
+        /// </para> </summary>
+        /// <seealso cref="Value"/>
         public virtual bool IsMatch
         {
             get
@@ -58,7 +58,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// The value assigned to this explanation node. </summary>
+        /// Gets or Sets the value assigned to this explanation node. </summary>
         public virtual float Value
         {
             get
@@ -72,7 +72,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A description of this explanation node. </summary>
+        /// Gets or Sets the description of this explanation node. </summary>
         public virtual string Description
         {
             get
@@ -87,7 +87,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// A short one line summary which should contain all high level
-        /// information about this Explanation, without the "Details"
+        /// information about this <see cref="Explanation"/>, without the "Details"
         /// </summary>
         protected virtual string GetSummary()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/FakeScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/FakeScorer.cs b/src/Lucene.Net/Search/FakeScorer.cs
index 33f454b..c2e87aa 100644
--- a/src/Lucene.Net/Search/FakeScorer.cs
+++ b/src/Lucene.Net/Search/FakeScorer.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Used by <seealso cref="BulkScorer"/>s that need to pass a {@link
-    ///  Scorer} to <seealso cref="ICollector#setScorer"/>.
+    /// Used by <see cref="BulkScorer"/>s that need to pass a
+    /// <see cref="Scorer"/> to <see cref="ICollector.SetScorer(Scorer)"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]


[14/48] lucenenet git commit: Lucene.Net.Analysis.Common: Fixed XML documenation warnings

Posted by ni...@apache.org.
Lucene.Net.Analysis.Common: Fixed XML documenation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/93eef424
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/93eef424
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/93eef424

Branch: refs/heads/master
Commit: 93eef424080dd74533632459d41fe846ca841a47
Parents: d7cb70c
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:18:09 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:22:58 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs   | 4 ++--
 .../Analysis/Bg/BulgarianAnalyzer.cs                           | 4 ++--
 .../Analysis/Br/BrazilianAnalyzer.cs                           | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs    | 4 ++--
 .../Analysis/Hu/HungarianAnalyzer.cs                           | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs | 4 ++--
 .../Analysis/Id/IndonesianAnalyzer.cs                          | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs  | 4 ++--
 .../Analysis/NGram/NGramTokenizer.cs                           | 6 +++---
 .../Analysis/No/NorwegianAnalyzer.cs                           | 4 ++--
 .../Analysis/No/NorwegianLightStemFilter.cs                    | 2 +-
 .../Analysis/No/NorwegianMinimalStemFilter.cs                  | 6 +++---
 .../Analysis/Pt/PortugueseAnalyzer.cs                          | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs  | 4 ++--
 .../Analysis/Util/BufferedCharFilter.cs                        | 2 +-
 36 files changed, 72 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
index 095d92f..6076d11 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
@@ -127,10 +127,10 @@ namespace Lucene.Net.Analysis.Ar
         }
 
         /// <summary>
-        /// Creates <see cref="Analyzer.TokenStreamComponents"/>
+        /// Creates <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="ArabicNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
index cac88af..adeb9cc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Bg
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, 
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
index 1b5edf7..4252701 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
@@ -120,10 +120,10 @@ namespace Lucene.Net.Analysis.Br
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="StandardFilter"/>, <see cref="StopFilter"/>,
         ///         and <see cref="BrazilianStemFilter"/>. </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index ba84523..5266f30 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -119,11 +119,11 @@ namespace Lucene.Net.Analysis.Ca
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, 
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
index 64648e9..647645d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
@@ -116,7 +116,7 @@ namespace Lucene.Net.Analysis.Cjk
         private bool exhausted;
 
         /// <summary>
-        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int)">
+        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, CJKScript)">
         ///       CJKBigramFilter(@in, CJKScript.HAN | CJKScript.HIRAGANA | CJKScript.KATAKANA | CJKScript.HANGUL)</see>
         /// </summary>
         /// <param name="in">
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Cjk
         }
 
         /// <summary>
-        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int, bool)">
+        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, CJKScript, bool)">
         ///       CJKBigramFilter(in, flags, false)</see>
         /// </summary>
         /// <param name="in">

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
index e99d70f..2e177a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
@@ -109,11 +109,11 @@ namespace Lucene.Net.Analysis.Ckb
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="SoraniNormalizationFilter"/>, 
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
index de0b5e7..6105ec8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
@@ -31,10 +31,10 @@ namespace Lucene.Net.Analysis.Cn
     {
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="ChineseTokenizer"/> filtered with
         ///         <see cref="ChineseFilter"/> </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
index 0a4d34c..991a12f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
@@ -95,10 +95,10 @@ namespace Lucene.Net.Analysis.Core
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="LowerCaseTokenizer"/> filtered with
         ///         <see cref="StopFilter"/> </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
index cffbe49..7138e72 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
@@ -124,10 +124,10 @@ namespace Lucene.Net.Analysis.Cz
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         and <see cref="CzechStemFilter"/> (only if version is >= LUCENE_31). If

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
index 453e9c6..af436da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Da
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 1a6a350..7af943b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -165,10 +165,10 @@ namespace Lucene.Net.Analysis.De
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index 061ed9e..d496322 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -105,10 +105,10 @@ namespace Lucene.Net.Analysis.El
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="GreekLowerCaseFilter"/>, <see cref="StandardFilter"/>,
         ///         <see cref="StopFilter"/>, and <see cref="GreekStemFilter"/> </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
index 3c6e0ff..4c4d16c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
@@ -87,11 +87,11 @@ namespace Lucene.Net.Analysis.En
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="EnglishPossessiveFilter"/>, 
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index b537856..655c653 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -119,11 +119,11 @@ namespace Lucene.Net.Analysis.Es
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index 9e4a7e9..099ffdd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -106,11 +106,11 @@ namespace Lucene.Net.Analysis.Eu
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
index 7c4fe02..df88f5a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
@@ -108,10 +108,10 @@ namespace Lucene.Net.Analysis.Fa
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="ArabicNormalizationFilter"/>,
         ///         <see cref="PersianNormalizationFilter"/> and Persian Stop words </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
index 18f7df4..89d67ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Fi
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index 1d117a8..495d081 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -176,10 +176,10 @@ namespace Lucene.Net.Analysis.Fr
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>,
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
index 1d5d0ce..b88988a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
@@ -124,11 +124,11 @@ namespace Lucene.Net.Analysis.Ga
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="IrishLowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
index 7130348..fb06c84 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
@@ -109,11 +109,11 @@ namespace Lucene.Net.Analysis.Gl
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 28198f2..2e465d6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -115,10 +115,10 @@ namespace Lucene.Net.Analysis.Hi
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="IndicNormalizationFilter"/>,
         ///         <see cref="HindiNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
index 6e7fe57..1ae25bf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Hu
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
index 6611208..7242b60 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
@@ -107,11 +107,11 @@ namespace Lucene.Net.Analysis.Hy
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
index eb74551..22e19ea 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
@@ -109,10 +109,10 @@ namespace Lucene.Net.Analysis.Id
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index d428e63..058e560 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -128,11 +128,11 @@ namespace Lucene.Net.Analysis.It
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
index fb643f8..2e60a58 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
@@ -110,11 +110,11 @@ namespace Lucene.Net.Analysis.Lv
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
index bd62835..83ad83a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
@@ -86,11 +86,11 @@ namespace Lucene.Net.Analysis.NGram
     /// <para>This tokenizer changed a lot in Lucene 4.4 in order to:
     /// <list type="bullet">
     ///     <item><description>tokenize in a streaming fashion to support streams which are larger
-    ///         than 1024 chars (limit of the previous version),</item>
+    ///         than 1024 chars (limit of the previous version),</description></item>
     ///     <item><description>count grams based on unicode code points instead of java chars (and
-    ///         never split in the middle of surrogate pairs),</item>
+    ///         never split in the middle of surrogate pairs),</description></item>
     ///     <item><description>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
-    ///         before computing n-grams.</item>
+    ///         before computing n-grams.</description></item>
     /// </list>
     /// </para>
     /// <para>Additionally, this class doesn't trim trailing whitespaces and emits

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
index 206e45d..d22eec3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.No
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
index ca36da0..5df5074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.No
         private readonly IKeywordAttribute keywordAttr;
 
         /// <summary>
-        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, int)"/>
+        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, NorwegianStandard)"/>
         /// - NorwegianLightStemFilter(input, BOKMAAL)
         /// </summary>
         /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
index 996a8a9..7e17ed9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.No
         private readonly IKeywordAttribute keywordAttr;
 
         /// <summary>
-        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, int)"/> -
+        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, NorwegianStandard)"/> -
         /// NorwegianMinimalStemFilter(input, BOKMAAL)
         /// </summary>
         public NorwegianMinimalStemFilter(TokenStream input)
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
         /// <summary>
         /// Creates a new <see cref="NorwegianLightStemFilter"/> </summary>
         /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>
-        /// <param name="flags"> set to <see cref="NorwegianLightStemmer.BOKMAAL"/>, 
-        ///                     <see cref="NorwegianLightStemmer.NYNORSK"/>, or both. </param>
+        /// <param name="flags"> set to <see cref="NorwegianStandard.BOKMAAL"/>, 
+        ///                     <see cref="NorwegianStandard.NYNORSK"/>, or both. </param>
         public NorwegianMinimalStemFilter(TokenStream input, NorwegianStandard flags)
             : base(input)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index 5f09576..410c58f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -116,11 +116,11 @@ namespace Lucene.Net.Analysis.Pt
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
         ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
index 83f9b53..5212e7f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Ro
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index bb086a7..139a710 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -142,10 +142,10 @@ namespace Lucene.Net.Analysis.Ru
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
         ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
index a54ec89..2ca5af7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Sv
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
index c7212a0..9d550a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
@@ -113,11 +113,11 @@ namespace Lucene.Net.Analysis.Tr
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="TurkishLowerCaseFilter"/>,
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
index b26993a..c9c4426 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
@@ -489,7 +489,7 @@ namespace Lucene.Net.Analysis.Util
         /// Indicates whether this reader is ready to be read without blocking.
         /// </summary>
         /// <returns>
-        /// <c>true</c> if this reader will not block when <see cref="Read"/> is
+        /// <c>true</c> if this reader will not block when <see cref="Read()"/> is
         /// called, <c>false</c> if unknown or blocking will occur.
         /// </returns>
         public override bool IsReady


[22/48] lucenenet git commit: Lucene.Net.Facet: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Facet: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/33f31f53
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/33f31f53
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/33f31f53

Branch: refs/heads/master
Commit: 33f31f534c6033b1cb522ffea4bc39718c97a112
Parents: 3052070
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 04:17:17 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 04:17:17 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Facet/Range/Range.cs             | 4 ++--
 src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/33f31f53/src/Lucene.Net.Facet/Range/Range.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Range/Range.cs b/src/Lucene.Net.Facet/Range/Range.cs
index ca2e2f4..7e5df1d 100644
--- a/src/Lucene.Net.Facet/Range/Range.cs
+++ b/src/Lucene.Net.Facet/Range/Range.cs
@@ -47,7 +47,7 @@
         /// Returns a new <see cref="Filter"/> accepting only documents
         /// in this range.  This filter is not general-purpose;
         /// you should either use it with <see cref="DrillSideways"/> by
-        /// adding it to <see cref="DrillDownQuery.Add"/>, or pass it to
+        /// adding it to <see cref="DrillDownQuery.Add(string, Filter)"/>, or pass it to
         /// <see cref="Search.FilteredQuery"/> using its 
         /// <see cref="Search.FilteredQuery.QUERY_FIRST_FILTER_STRATEGY"/>.
         /// If the <see cref="ValueSource"/> is static, e.g. an indexed numeric
@@ -63,7 +63,7 @@
         /// Returns a new <see cref="Filter"/> accepting only documents
         ///  in this range.  This filter is not general-purpose;
         ///  you should either use it with <see cref="DrillSideways"/> by
-        ///  adding it to <see cref="DrillDownQuery.Add"/>, or pass it to
+        ///  adding it to <see cref="DrillDownQuery.Add(string, Filter)"/>, or pass it to
         ///  <see cref="Search.FilteredQuery"/> using its 
         ///  <see cref="Search.FilteredQuery.QUERY_FIRST_FILTER_STRATEGY"/>.  If the
         ///  <see cref="ValueSource"/> is static, e.g. an indexed numeric

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/33f31f53/src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs b/src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs
index 7e9f47d..e4edd4c 100644
--- a/src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs
+++ b/src/Lucene.Net.Facet/Taxonomy/TaxonomyWriter.cs
@@ -76,7 +76,7 @@ namespace Lucene.Net.Facet.Taxonomy
         /// parent is as trivial as dropping the last component of the path.
         /// <see cref="GetParent"/> is functionally equivalent to calling <see cref="TaxonomyReader.GetPath"/> on the
         /// given ordinal, dropping the last component of the path, and then calling
-        /// <see cref="TaxonomyReader.GetOrdinal"/> to get an ordinal back.
+        /// <see cref="TaxonomyReader.GetOrdinal(FacetLabel)"/> to get an ordinal back.
         /// </para>
         /// <para>
         /// If the given ordinal is the <see cref="TaxonomyReader.ROOT_ORDINAL"/>, an 


[02/48] lucenenet git commit: Lucene.Net.Search.Similarities: Fixed up documentation comments

Posted by ni...@apache.org.
Lucene.Net.Search.Similarities: Fixed up documentation comments


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/2a1541c1
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/2a1541c1
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/2a1541c1

Branch: refs/heads/master
Commit: 2a1541c184dd66a4d710c1f15590f27c9f5e7c53
Parents: 1197b1a
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Jun 2 23:43:49 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Jun 2 23:43:49 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   2 +-
 src/Lucene.Net/Lucene.Net.csproj                |   1 -
 .../Search/Similarities/AfterEffect.cs          |   9 +-
 .../Search/Similarities/AfterEffectB.cs         |   1 +
 .../Search/Similarities/AfterEffectL.cs         |   1 +
 .../Search/Similarities/BM25Similarity.cs       |  67 +-
 .../Search/Similarities/BasicModel.cs           |  11 +-
 .../Search/Similarities/BasicModelBE.cs         |  14 +-
 .../Search/Similarities/BasicModelD.cs          |   9 +-
 .../Search/Similarities/BasicModelG.cs          |   5 +-
 .../Search/Similarities/BasicModelIF.cs         |   1 +
 .../Search/Similarities/BasicModelIn.cs         |   1 +
 .../Search/Similarities/BasicModelIne.cs        |   1 +
 .../Search/Similarities/BasicModelP.cs          |   5 +-
 .../Search/Similarities/BasicStats.cs           |  17 +-
 .../Search/Similarities/DFRSimilarity.cs        | 105 +--
 .../Search/Similarities/DefaultSimilarity.cs    |  69 +-
 .../Search/Similarities/Distribution.cs         |  10 +-
 .../Search/Similarities/DistributionLL.cs       |   4 +-
 .../Search/Similarities/DistributionSPL.cs      |   4 +-
 .../Search/Similarities/IBSimilarity.cs         | 107 +--
 .../Similarities/LMDirichletSimilarity.cs       |  18 +-
 .../Similarities/LMJelinekMercerSimilarity.cs   |  12 +-
 .../Search/Similarities/LMSimilarity.cs         |  38 +-
 src/Lucene.Net/Search/Similarities/Lambda.cs    |  10 +-
 src/Lucene.Net/Search/Similarities/LambdaDF.cs  |   3 +-
 src/Lucene.Net/Search/Similarities/LambdaTTF.cs |   3 +-
 .../Search/Similarities/MultiSimilarity.cs      |   5 +-
 .../Search/Similarities/Normalization.cs        |  13 +-
 .../Search/Similarities/NormalizationH1.cs      |  16 +-
 .../Search/Similarities/NormalizationH2.cs      |  14 +-
 .../Search/Similarities/NormalizationH3.cs      |  11 +-
 .../Search/Similarities/NormalizationZ.cs       |  11 +-
 .../Similarities/PerFieldSimilarityWrapper.cs   |  12 +-
 .../Search/Similarities/Similarity.cs           | 139 ++--
 .../Search/Similarities/SimilarityBase.cs       |  63 +-
 .../Search/Similarities/TFIDFSimilarity.cs      | 680 +++++++++----------
 37 files changed, 762 insertions(+), 730 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c54ac21..68b0f1c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -52,7 +52,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 1. Lucene.Net.Core (project)
    1. Codecs (namespace)
-   2. Search (namespace) (Except for Search.Payloads)
+   2. Search (namespace) (Except for Search.Payloads and Search.Similarities)
    3. Support (namespace)
    4. Util (namespace) (Except for Util.Fst)
 2. Lucene.Net.Codecs (project)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Lucene.Net.csproj b/src/Lucene.Net/Lucene.Net.csproj
index 0dd30df..eb166ac 100644
--- a/src/Lucene.Net/Lucene.Net.csproj
+++ b/src/Lucene.Net/Lucene.Net.csproj
@@ -42,7 +42,6 @@
     <WarningLevel>4</WarningLevel>
     <PlatformTarget>AnyCPU</PlatformTarget>
     <Prefer32Bit>false</Prefer32Bit>
-    <DocumentationFile>bin\Debug\Lucene.Net.XML</DocumentationFile>
   </PropertyGroup>
   <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
     <DebugType>pdbonly</DebugType>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/AfterEffect.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/AfterEffect.cs b/src/Lucene.Net/Search/Similarities/AfterEffect.cs
index 0e9515d..560da03 100644
--- a/src/Lucene.Net/Search/Similarities/AfterEffect.cs
+++ b/src/Lucene.Net/Search/Similarities/AfterEffect.cs
@@ -20,14 +20,15 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// this class acts as the base class for the implementations of the <em>first
-    /// normalization of the informative content</em> in the DFR framework. this
+    /// This class acts as the base class for the implementations of the <em>first
+    /// normalization of the informative content</em> in the DFR framework. This
     /// component is also called the <em>after effect</em> and is defined by the
     /// formula <em>Inf<sub>2</sub> = 1 - Prob<sub>2</sub></em>, where
     /// <em>Prob<sub>2</sub></em> measures the <em>information gain</em>.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= DFRSimilarity
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="DFRSimilarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/AfterEffectB.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/AfterEffectB.cs b/src/Lucene.Net/Search/Similarities/AfterEffectB.cs
index ef355c3..e50aa01 100644
--- a/src/Lucene.Net/Search/Similarities/AfterEffectB.cs
+++ b/src/Lucene.Net/Search/Similarities/AfterEffectB.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Model of the information gain based on the ratio of two Bernoulli processes.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/AfterEffectL.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/AfterEffectL.cs b/src/Lucene.Net/Search/Similarities/AfterEffectL.cs
index 023c366..80244b8 100644
--- a/src/Lucene.Net/Search/Similarities/AfterEffectL.cs
+++ b/src/Lucene.Net/Search/Similarities/AfterEffectL.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Model of the information gain based on Laplace's law of succession.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BM25Similarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BM25Similarity.cs b/src/Lucene.Net/Search/Similarities/BM25Similarity.cs
index a7a992d..8f920e0 100644
--- a/src/Lucene.Net/Search/Similarities/BM25Similarity.cs
+++ b/src/Lucene.Net/Search/Similarities/BM25Similarity.cs
@@ -30,6 +30,7 @@ namespace Lucene.Net.Search.Similarities
     /// Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. Okapi at TREC-3.
     /// In Proceedings of the Third <b>T</b>ext <b>RE</b>trieval <b>C</b>onference (TREC 1994).
     /// Gaithersburg, USA, November 1994.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -53,10 +54,10 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// BM25 with these default values:
-        /// <ul>
-        ///   <li>{@code k1 = 1.2},
-        ///   <li>{@code b = 0.75}.</li>
-        /// </ul>
+        /// <list type="bullet">
+        ///   <item><description><c>k1 = 1.2</c>,</description></item>
+        ///   <item><description><c>b = 0.75</c>.</description></item>
+        /// </list>
         /// </summary>
         public BM25Similarity()
         {
@@ -65,29 +66,29 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Implemented as <code>log(1 + (numDocs - docFreq + 0.5)/(docFreq + 0.5))</code>. </summary>
+        /// Implemented as <c>log(1 + (numDocs - docFreq + 0.5)/(docFreq + 0.5))</c>. </summary>
         protected internal virtual float Idf(long docFreq, long numDocs)
         {
             return (float)Math.Log(1 + (numDocs - docFreq + 0.5D) / (docFreq + 0.5D));
         }
 
         /// <summary>
-        /// Implemented as <code>1 / (distance + 1)</code>. </summary>
+        /// Implemented as <c>1 / (distance + 1)</c>. </summary>
         protected internal virtual float SloppyFreq(int distance)
         {
             return 1.0f / (distance + 1);
         }
 
         /// <summary>
-        /// The default implementation returns <code>1</code> </summary>
+        /// The default implementation returns <c>1</c> </summary>
         protected internal virtual float ScorePayload(int doc, int start, int end, BytesRef payload)
         {
             return 1;
         }
 
         /// <summary>
-        /// The default implementation computes the average as <code>sumTotalTermFreq / maxDoc</code>,
-        /// or returns <code>1</code> if the index does not store sumTotalTermFreq (Lucene 3.x indexes
+        /// The default implementation computes the average as <c>sumTotalTermFreq / maxDoc</c>,
+        /// or returns <c>1</c> if the index does not store sumTotalTermFreq (Lucene 3.x indexes
         /// or any field that omits frequency information).
         /// </summary>
         protected internal virtual float AvgFieldLength(CollectionStatistics collectionStats)
@@ -104,10 +105,10 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// The default implementation encodes <code>boost / sqrt(length)</code>
-        /// with <seealso cref="SmallSingle#floatToByte315(float)"/>.  this is compatible with
+        /// The default implementation encodes <c>boost / sqrt(length)</c>
+        /// with <see cref="SmallSingle.SingleToByte315(float)"/>.  This is compatible with
         /// Lucene's default implementation.  If you change this, then you should
-        /// change <seealso cref="#decodeNormValue(byte)"/> to match.
+        /// change <see cref="DecodeNormValue(byte)"/> to match.
         /// </summary>
         protected internal virtual byte EncodeNormValue(float boost, int fieldLength) 
         {
@@ -115,8 +116,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// The default implementation returns <code>1 / f<sup>2</sup></code>
-        /// where <code>f</code> is <seealso cref="SmallSingle#byte315ToFloat(byte)"/>.
+        /// The default implementation returns <c>1 / f<sup>2</sup></c>
+        /// where <c>f</c> is <see cref="SmallSingle.Byte315ToSingle(byte)"/>.
         /// </summary>
         protected internal virtual float DecodeNormValue(byte b)
         {
@@ -130,9 +131,9 @@ namespace Lucene.Net.Search.Similarities
         private bool discountOverlaps = true; // LUCENENET specific: made private, since value can be set/get through propery
 
         /// <summary>
-        /// Sets whether overlap tokens (Tokens with 0 position increment) are
-        ///  ignored when computing norm.  By default this is true, meaning overlap
-        ///  tokens do not count when computing norms.
+        /// Gets or Sets whether overlap tokens (Tokens with 0 position increment) are
+        /// ignored when computing norm.  By default this is true, meaning overlap
+        /// tokens do not count when computing norms.
         /// </summary>
         public virtual bool DiscountOverlaps
         {
@@ -169,22 +170,22 @@ namespace Lucene.Net.Search.Similarities
         /// Computes a score factor for a simple term and returns an explanation
         /// for that score factor.
         ///
-        /// <p>
+        /// <para/>
         /// The default implementation uses:
         ///
-        /// <pre class="prettyprint">
-        /// idf(docFreq, searcher.maxDoc());
-        /// </pre>
+        /// <code>
+        ///     Idf(docFreq, searcher.MaxDoc);
+        /// </code>
         ///
-        /// Note that <seealso cref="CollectionStatistics#maxDoc()"/> is used instead of
-        /// <seealso cref="Lucene.Net.Index.IndexReader#numDocs() IndexReader#numDocs()"/> because also
-        /// <seealso cref="TermStatistics#docFreq()"/> is used, and when the latter
-        /// is inaccurate, so is <seealso cref="CollectionStatistics#maxDoc()"/>, and in the same direction.
-        /// In addition, <seealso cref="CollectionStatistics#maxDoc()"/> is more efficient to compute
+        /// Note that <see cref="CollectionStatistics.MaxDoc"/> is used instead of
+        /// <see cref="Lucene.Net.Index.IndexReader.NumDocs"/> because also
+        /// <see cref="TermStatistics.DocFreq"/> is used, and when the latter
+        /// is inaccurate, so is <see cref="CollectionStatistics.MaxDoc"/>, and in the same direction.
+        /// In addition, <see cref="CollectionStatistics.MaxDoc"/> is more efficient to compute
         /// </summary>
         /// <param name="collectionStats"> collection-level statistics </param>
         /// <param name="termStats"> term-level statistics for the term </param>
-        /// <returns> an Explain object that includes both an idf score factor
+        /// <returns> an <see cref="Explanation"/> object that includes both an idf score factor
         ///           and an explanation for the term. </returns>
         public virtual Explanation IdfExplain(CollectionStatistics collectionStats, TermStatistics termStats)
         {
@@ -197,13 +198,13 @@ namespace Lucene.Net.Search.Similarities
         /// <summary>
         /// Computes a score factor for a phrase.
         ///
-        /// <p>
+        /// <para/>
         /// The default implementation sums the idf factor for
         /// each term in the phrase.
         /// </summary>
         /// <param name="collectionStats"> collection-level statistics </param>
         /// <param name="termStats"> term-level statistics for the terms in the phrase </param>
-        /// <returns> an Explain object that includes both an idf
+        /// <returns> an <see cref="Explanation"/> object that includes both an idf
         ///         score factor for the phrase and an explanation
         ///         for each term. </returns>
         public virtual Explanation IdfExplain(CollectionStatistics collectionStats, TermStatistics[] termStats)
@@ -388,8 +389,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the <code>k1</code> parameter </summary>
-        /// <seealso cref= #BM25Similarity(float, float)  </seealso>
+        /// Returns the <c>k1</c> parameter </summary>
+        /// <seealso cref="BM25Similarity(float, float)"/>
         public virtual float K1
         {
             get
@@ -399,8 +400,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the <code>b</code> parameter </summary>
-        /// <seealso cref= #BM25Similarity(float, float)  </seealso>
+        /// Returns the <c>b</c> parameter </summary>
+        /// <seealso cref="BM25Similarity(float, float)"/>
         public virtual float B
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModel.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModel.cs b/src/Lucene.Net/Search/Similarities/BasicModel.cs
index 5213253..28365de 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModel.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModel.cs
@@ -20,13 +20,14 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// this class acts as the base class for the specific <em>basic model</em>
+    /// This class acts as the base class for the specific <em>basic model</em>
     /// implementations in the DFR framework. Basic models compute the
     /// <em>informative content Inf<sub>1</sub> = -log<sub>2</sub>Prob<sub>1</sub>
     /// </em>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= DFRSimilarity
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="DFRSimilarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -46,10 +47,10 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Returns an explanation for the score.
-        /// <p>Most basic models use the number of documents and the total term
+        /// <para>Most basic models use the number of documents and the total term
         /// frequency to compute Inf<sub>1</sub>. this method provides a generic
         /// explanation for such models. Subclasses that use other statistics must
-        /// override this method.</p>
+        /// override this method.</para>
         /// </summary>
         public virtual Explanation Explain(BasicStats stats, float tfn)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelBE.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelBE.cs b/src/Lucene.Net/Search/Similarities/BasicModelBE.cs
index bcba387..b0da29a 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelBE.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelBE.cs
@@ -21,12 +21,14 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Limiting form of the Bose-Einstein model. The formula used in Lucene differs
-    /// slightly from the one in the original paper: {@code F} is increased by {@code tfn+1}
-    /// and {@code N} is increased by {@code F}
+    /// slightly from the one in the original paper: <c>F</c> is increased by <c>tfn+1</c>
+    /// and <c>N</c> is increased by <c>F</c>
+    /// <para/>
     /// @lucene.experimental
+    /// <para/>
     /// NOTE: in some corner cases this model may give poor performance with Normalizations that
-    /// return large values for {@code tfn} such as NormalizationH3. Consider using the
-    /// geometric approximation (<seealso cref="BasicModelG"/>) instead, which provides the same relevance
+    /// return large values for <c>tfn</c> such as <see cref="NormalizationH3"/>. Consider using the
+    /// geometric approximation (<see cref="BasicModelG"/>) instead, which provides the same relevance
     /// but with less practical problems.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -45,12 +47,12 @@ namespace Lucene.Net.Search.Similarities
             double F = stats.TotalTermFreq + 1 + tfn;
             // approximation only holds true when F << N, so we use N += F
             double N = F + stats.NumberOfDocuments;
-            return (float)(-SimilarityBase.Log2((N - 1) * Math.E) + f(N + F - 1, N + F - tfn - 2) - f(F, F - tfn));
+            return (float)(-SimilarityBase.Log2((N - 1) * Math.E) + this.F(N + F - 1, N + F - tfn - 2) - this.F(F, F - tfn));
         }
 
         /// <summary>
         /// The <em>f</em> helper function defined for <em>B<sub>E</sub></em>. </summary>
-        private double f(double n, double m)
+        private double F(double n, double m)
         {
             return (m + 0.5) * SimilarityBase.Log2(n / m) + (n - m) * SimilarityBase.Log2(n);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelD.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelD.cs b/src/Lucene.Net/Search/Similarities/BasicModelD.cs
index b2dda6b..fc5f4d7 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelD.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelD.cs
@@ -22,13 +22,14 @@ namespace Lucene.Net.Search.Similarities
     /// <summary>
     /// Implements the approximation of the binomial model with the divergence
     /// for DFR. The formula used in Lucene differs slightly from the one in the
-    /// original paper: to avoid underflow for small values of {@code N} and
-    /// {@code F}, {@code N} is increased by {@code 1} and
-    /// {@code F} is always increased by {@code tfn+1}.
-    /// <p>
+    /// original paper: to avoid underflow for small values of <c>N</c> and
+    /// <c>F</c>, <c>N</c> is increased by <c>1</c> and
+    /// <c>F</c> is always increased by <c>tfn+1</c>.
+    /// <para/>
     /// WARNING: for terms that do not meet the expected random distribution
     /// (e.g. stopwords), this model may give poor performance, such as
     /// abnormally high scores for low tf values.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelG.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelG.cs b/src/Lucene.Net/Search/Similarities/BasicModelG.cs
index a2d0a74..d95b2ec 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelG.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelG.cs
@@ -21,8 +21,9 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Geometric as limiting form of the Bose-Einstein model.  The formula used in Lucene differs
-    /// slightly from the one in the original paper: {@code F} is increased by {@code 1}
-    /// and {@code N} is increased by {@code F}.
+    /// slightly from the one in the original paper: <c>F</c> is increased by <c>1</c>
+    /// and <c>N</c> is increased by <c>F</c>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelIF.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelIF.cs b/src/Lucene.Net/Search/Similarities/BasicModelIF.cs
index 89aa9b8..3bf3a8e 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelIF.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelIF.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// An approximation of the <em>I(n<sub>e</sub>)</em> model.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelIn.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelIn.cs b/src/Lucene.Net/Search/Similarities/BasicModelIn.cs
index 196e929..fac653a 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelIn.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelIn.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// The basic tf-idf model of randomness.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelIne.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelIne.cs b/src/Lucene.Net/Search/Similarities/BasicModelIne.cs
index 27d15cf..25047b7 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelIne.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelIne.cs
@@ -22,6 +22,7 @@ namespace Lucene.Net.Search.Similarities
     /// <summary>
     /// Tf-idf model of randomness, based on a mixture of Poisson and inverse
     /// document frequency.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicModelP.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicModelP.cs b/src/Lucene.Net/Search/Similarities/BasicModelP.cs
index 9167bd6..c42cfd3 100644
--- a/src/Lucene.Net/Search/Similarities/BasicModelP.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicModelP.cs
@@ -21,8 +21,9 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Implements the Poisson approximation for the binomial model for DFR.
+    /// <para/>
     /// @lucene.experimental
-    /// <p>
+    /// <para/>
     /// WARNING: for terms that do not meet the expected random distribution
     /// (e.g. stopwords), this model may give poor performance, such as
     /// abnormally high scores for low tf values.
@@ -33,7 +34,7 @@ namespace Lucene.Net.Search.Similarities
     public class BasicModelP : BasicModel
     {
         /// <summary>
-        /// {@code log2(Math.E)}, precomputed. </summary>
+        /// <c>log2(Math.E)</c>, precomputed. </summary>
         protected internal static double LOG2_E = SimilarityBase.Log2(Math.E);
 
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/BasicStats.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/BasicStats.cs b/src/Lucene.Net/Search/Similarities/BasicStats.cs
index aefef8c..0c534bb 100644
--- a/src/Lucene.Net/Search/Similarities/BasicStats.cs
+++ b/src/Lucene.Net/Search/Similarities/BasicStats.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Stores all statistics commonly used ranking methods.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -28,7 +29,7 @@ namespace Lucene.Net.Search.Similarities
 #endif
     public class BasicStats : Similarity.SimWeight
     {
-        protected internal readonly string m_field;
+        protected internal readonly string m_field; // LUCENENET TODO: API This was internal in Lucene
 
         /// <summary>
         /// The number of documents. </summary>
@@ -79,7 +80,7 @@ namespace Lucene.Net.Search.Similarities
         // ------------------------- Getter/setter methods -------------------------
 
         /// <summary>
-        /// Returns the number of documents. </summary>
+        /// Gets or Sets the number of documents. </summary>
         public virtual long NumberOfDocuments
         {
             get
@@ -94,7 +95,7 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Returns the total number of tokens in the field. </summary>
-        /// <seealso cref= Terms#getSumTotalTermFreq() </seealso>
+        /// <seealso cref="Index.Terms.SumTotalTermFreq"/>
         public virtual long NumberOfFieldTokens
         {
             get
@@ -149,7 +150,7 @@ namespace Lucene.Net.Search.Similarities
             }
         }
 
-        public virtual string Field
+        public virtual string Field // LUCENENET TODO: API - eliminate and use internal field instead
         {
             get { return m_field; }
         }
@@ -158,7 +159,7 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// The square of the raw normalization value. </summary>
-        /// <seealso cref= #rawNormalizationValue()  </seealso>
+        /// <seealso cref="RawNormalizationValue()"/>
         public override float GetValueForNormalization()
         {
             float rawValue = RawNormalizationValue();
@@ -166,10 +167,10 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Computes the raw normalization value. this basic implementation returns
+        /// Computes the raw normalization value. This basic implementation returns
         /// the query boost. Subclasses may override this method to include other
         /// factors (such as idf), or to save the value for inclusion in
-        /// <seealso cref="#normalize(float, float)"/>, etc.
+        /// <seealso cref="Normalize(float, float)"/>, etc.
         /// </summary>
         protected internal virtual float RawNormalizationValue()
         {
@@ -177,7 +178,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// No normalization is done. {@code topLevelBoost} is saved in the object,
+        /// No normalization is done. <paramref name="topLevelBoost"/> is saved in the object,
         /// however.
         /// </summary>
         public override void Normalize(float queryNorm, float topLevelBoost)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/DFRSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/DFRSimilarity.cs b/src/Lucene.Net/Search/Similarities/DFRSimilarity.cs
index 0e319af..be52875 100644
--- a/src/Lucene.Net/Search/Similarities/DFRSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/DFRSimilarity.cs
@@ -25,53 +25,66 @@ namespace Lucene.Net.Search.Similarities
     /// Probabilistic models of information retrieval based on measuring the
     /// divergence from randomness. ACM Trans. Inf. Syst. 20, 4 (October 2002),
     /// 357-389.
-    /// <p>The DFR scoring formula is composed of three separate components: the
+    /// <para>The DFR scoring formula is composed of three separate components: the
     /// <em>basic model</em>, the <em>aftereffect</em> and an additional
     /// <em>normalization</em> component, represented by the classes
-    /// {@code BasicModel}, {@code AfterEffect} and {@code Normalization},
+    /// <see cref="Similarities.BasicModel"/>, <see cref="Similarities.AfterEffect"/> and <see cref="Similarities.Normalization"/>,
     /// respectively. The names of these classes were chosen to match the names of
-    /// their counterparts in the Terrier IR engine.</p>
-    /// <p>To construct a DFRSimilarity, you must specify the implementations for
+    /// their counterparts in the Terrier IR engine.</para>
+    /// <para>To construct a <see cref="DFRSimilarity"/>, you must specify the implementations for
     /// all three components of DFR:
-    /// <ol>
-    ///    <li><seealso cref="BasicModel"/>: Basic model of information content:
-    ///        <ul>
-    ///           <li><seealso cref="BasicModelBE"/>: Limiting form of Bose-Einstein
-    ///           <li><seealso cref="BasicModelG"/>: Geometric approximation of Bose-Einstein
-    ///           <li><seealso cref="BasicModelP"/>: Poisson approximation of the Binomial
-    ///           <li><seealso cref="BasicModelD"/>: Divergence approximation of the Binomial
-    ///           <li><seealso cref="BasicModelIn"/>: Inverse document frequency
-    ///           <li><seealso cref="BasicModelIne"/>: Inverse expected document
-    ///               frequency [mixture of Poisson and IDF]
-    ///           <li><seealso cref="BasicModelIF"/>: Inverse term frequency
-    ///               [approximation of I(ne)]
-    ///        </ul>
-    ///    <li><seealso cref="AfterEffect"/>: First normalization of information
-    ///        gain:
-    ///        <ul>
-    ///           <li><seealso cref="AfterEffectL"/>: Laplace's law of succession
-    ///           <li><seealso cref="AfterEffectB"/>: Ratio of two Bernoulli processes
-    ///           <li><seealso cref="NoAfterEffect"/>: no first normalization
-    ///        </ul>
-    ///    <li><seealso cref="Normalization"/>: Second (length) normalization:
-    ///        <ul>
-    ///           <li><seealso cref="NormalizationH1"/>: Uniform distribution of term
-    ///               frequency
-    ///           <li><seealso cref="NormalizationH2"/>: term frequency density inversely
-    ///               related to length
-    ///           <li><seealso cref="NormalizationH3"/>: term frequency normalization
-    ///               provided by Dirichlet prior
-    ///           <li><seealso cref="NormalizationZ"/>: term frequency normalization provided
-    ///                by a Zipfian relation
-    ///           <li><seealso cref="NoNormalization"/>: no second normalization
-    ///        </ul>
-    /// </ol>
-    /// <p>Note that <em>qtf</em>, the multiplicity of term-occurrence in the query,
-    /// is not handled by this implementation.</p> </summary>
-    /// <seealso cref= BasicModel </seealso>
-    /// <seealso cref= AfterEffect </seealso>
-    /// <seealso cref= Normalization
-    /// @lucene.experimental </seealso>
+    /// <list type="table">
+    ///     <listheader>
+    ///         <term>Component</term>
+    ///         <term>Implementations</term>
+    ///     </listheader>
+    ///     <item>
+    ///         <term><see cref="Similarities.BasicModel"/>: Basic model of information content:</term>
+    ///         <term>
+    ///             <list type="bullet">
+    ///                 <item><description><see cref="BasicModelBE"/>: Limiting form of Bose-Einstein</description></item>
+    ///                 <item><description><see cref="BasicModelG"/>: Geometric approximation of Bose-Einstein</description></item>
+    ///                 <item><description><see cref="BasicModelP"/>: Poisson approximation of the Binomial</description></item>
+    ///                 <item><description><see cref="BasicModelD"/>: Divergence approximation of the Binomial</description></item>
+    ///                 <item><description><see cref="BasicModelIn"/>: Inverse document frequency</description></item>
+    ///                 <item><description><see cref="BasicModelIne"/>: Inverse expected document frequency [mixture of Poisson and IDF]</description></item>
+    ///                 <item><description><see cref="BasicModelIF"/>: Inverse term frequency [approximation of I(ne)]</description></item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term><see cref="Similarities.AfterEffect"/>: First normalization of information gain:</term>
+    ///         <term>
+    ///             <list type="bullet">
+    ///                 <item><description><see cref="AfterEffectL"/>: Laplace's law of succession</description></item>
+    ///                 <item><description><see cref="AfterEffectB"/>: Ratio of two Bernoulli processes</description></item>
+    ///                 <item><description><see cref="AfterEffect.NoAfterEffect"/>: no first normalization</description></item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term><see cref="Similarities.Normalization"/>: Second (length) normalization:</term>
+    ///         <term>
+    ///             <list type="bullet">
+    ///                 <item><description><see cref="NormalizationH1"/>: Uniform distribution of term frequency</description></item>
+    ///                 <item><description><see cref="NormalizationH2"/>: term frequency density inversely related to length</description></item>
+    ///                 <item><description><see cref="NormalizationH3"/>: term frequency normalization provided by Dirichlet prior</description></item>
+    ///                 <item><description><see cref="NormalizationZ"/>: term frequency normalization provided by a Zipfian relation</description></item>
+    ///                 <item><description><see cref="Normalization.NoNormalization"/>: no second normalization</description></item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    /// </list>
+    /// 
+    /// </para>
+    /// <para>Note that <em>qtf</em>, the multiplicity of term-occurrence in the query,
+    /// is not handled by this implementation.
+    /// </para> 
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="Similarities.BasicModel"/>
+    /// <seealso cref="Similarities.AfterEffect"/>
+    /// <seealso cref="Similarities.Normalization"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -91,10 +104,10 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Creates DFRSimilarity from the three components.
-        /// <p>
-        /// Note that <code>null</code> values are not allowed:
+        /// <para/>
+        /// Note that <c>null</c> values are not allowed:
         /// if you want no normalization or after-effect, instead pass
-        /// <seealso cref="NoNormalization"/> or <seealso cref="NoAfterEffect"/> respectively. </summary>
+        /// <see cref="Normalization.NoNormalization"/> or <see cref="AfterEffect.NoAfterEffect"/> respectively. </summary>
         /// <param name="basicModel"> Basic model of information content </param>
         /// <param name="afterEffect"> First normalization of information gain </param>
         /// <param name="normalization"> Second (length) normalization </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/DefaultSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/DefaultSimilarity.cs b/src/Lucene.Net/Search/Similarities/DefaultSimilarity.cs
index 3b290ec..a0b297f 100644
--- a/src/Lucene.Net/Search/Similarities/DefaultSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/DefaultSimilarity.cs
@@ -24,25 +24,25 @@ namespace Lucene.Net.Search.Similarities
     using SmallSingle = Lucene.Net.Util.SmallSingle;
 
     /// <summary>
-    /// Expert: Default scoring implementation which {@link #encodeNormValue(float)
-    /// encodes} norm values as a single byte before being stored. At search time,
+    /// Expert: Default scoring implementation which encodes (<see cref="EncodeNormValue(float)"/>)
+    /// norm values as a single byte before being stored. At search time,
     /// the norm byte value is read from the index
-    /// <seealso cref="Lucene.Net.Store.Directory directory"/> and
-    /// <seealso cref="#decodeNormValue(long) decoded"/> back to a float <i>norm</i> value.
+    /// <see cref="Lucene.Net.Store.Directory"/> and
+    /// decoded (<see cref="DecodeNormValue(long)"/>) back to a float <i>norm</i> value.
     /// this encoding/decoding, while reducing index size, comes with the price of
-    /// precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>. For
-    /// instance, <i>decode(encode(0.89)) = 0.75</i>.
-    /// <p>
+    /// precision loss - it is not guaranteed that <i>Decode(Encode(x)) = x</i>. For
+    /// instance, <i>Decode(Encode(0.89)) = 0.75</i>.
+    /// <para/>
     /// Compression of norm values to a single byte saves memory at search time,
     /// because once a field is referenced at search time, its norms - for all
     /// documents - are maintained in memory.
-    /// <p>
+    /// <para/>
     /// The rationale supporting such lossy compression of norm values is that given
     /// the difficulty (and inaccuracy) of users to express their true information
-    /// need by a query, only big differences matter. <br>
-    /// &nbsp;<br>
+    /// need by a query, only big differences matter. 
+    /// <para/>
     /// Last, note that search time is too late to modify this <i>norm</i> part of
-    /// scoring, e.g. by using a different <seealso cref="Similarity"/> for search.
+    /// scoring, e.g. by using a different <see cref="Similarity"/> for search.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -68,14 +68,14 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Implemented as <code>overlap / maxOverlap</code>. </summary>
+        /// Implemented as <c>overlap / maxOverlap</c>. </summary>
         public override float Coord(int overlap, int maxOverlap)
         {
             return overlap / (float)maxOverlap;
         }
 
         /// <summary>
-        /// Implemented as <code>1/sqrt(sumOfSquaredWeights)</code>. </summary>
+        /// Implemented as <c>1/sqrt(sumOfSquaredWeights)</c>. </summary>
         public override float QueryNorm(float sumOfSquaredWeights)
         {
             return (float)(1.0 / Math.Sqrt(sumOfSquaredWeights));
@@ -83,7 +83,7 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Encodes a normalization factor for storage in an index.
-        /// <p>
+        /// <para/>
         /// The encoding uses a three-bit mantissa, a five-bit exponent, and the
         /// zero-exponent point at 15, thus representing values from around 7x10^9 to
         /// 2x10^-9 with about one significant decimal digit of accuracy. Zero is also
@@ -92,8 +92,8 @@ namespace Lucene.Net.Search.Similarities
         /// values too small to represent are rounded up to the smallest positive
         /// representable value.
         /// </summary>
-        /// <seealso cref= Lucene.Net.Document.Field#setBoost(float) </seealso>
-        /// <seealso cref= Lucene.Net.Util.SmallSingle </seealso>
+        /// <seealso cref="Lucene.Net.Documents.Field.Boost"/>
+        /// <seealso cref="Lucene.Net.Util.SmallSingle"/>
         public override sealed long EncodeNormValue(float f)
         {
             return SmallSingle.SingleToSByte315(f);
@@ -102,7 +102,7 @@ namespace Lucene.Net.Search.Similarities
         /// <summary>
         /// Decodes the norm value, assuming it is a single byte.
         /// </summary>
-        /// <seealso cref= #encodeNormValue(float) </seealso>
+        /// <seealso cref="EncodeNormValue(float)"/>
         public override sealed float DecodeNormValue(long norm)
         {
             return NORM_TABLE[(int)(norm & 0xFF)]; // & 0xFF maps negative bytes to positive above 127
@@ -110,13 +110,14 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Implemented as
-        ///  <code>state.getBoost()*lengthNorm(numTerms)</code>, where
-        ///  <code>numTerms</code> is <seealso cref="FieldInvertState#getLength()"/> if {@link
-        ///  #setDiscountOverlaps} is false, else it's {@link
-        ///  FieldInvertState#getLength()} - {@link
-        ///  FieldInvertState#getNumOverlap()}.
+        /// <c>state.Boost * LengthNorm(numTerms)</c>, where
+        /// <c>numTerms</c> is <see cref="FieldInvertState.Length"/> if 
+        /// <see cref="DiscountOverlaps"/> is <c>false</c>, else it's 
+        /// <see cref="FieldInvertState.Length"/> - 
+        /// <see cref="FieldInvertState.NumOverlap"/>.
         ///
-        ///  @lucene.experimental
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
         public override float LengthNorm(FieldInvertState state)
         {
@@ -133,48 +134,48 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Implemented as <code>sqrt(freq)</code>. </summary>
+        /// Implemented as <c>Math.Sqrt(freq)</c>. </summary>
         public override float Tf(float freq)
         {
             return (float)Math.Sqrt(freq);
         }
 
         /// <summary>
-        /// Implemented as <code>1 / (distance + 1)</code>. </summary>
+        /// Implemented as <c>1 / (distance + 1)</c>. </summary>
         public override float SloppyFreq(int distance)
         {
             return 1.0f / (distance + 1);
         }
 
         /// <summary>
-        /// The default implementation returns <code>1</code> </summary>
+        /// The default implementation returns <c>1</c> </summary>
         public override float ScorePayload(int doc, int start, int end, BytesRef payload)
         {
             return 1;
         }
 
         /// <summary>
-        /// Implemented as <code>log(numDocs/(docFreq+1)) + 1</code>. </summary>
+        /// Implemented as <c>log(numDocs/(docFreq+1)) + 1</c>. </summary>
         public override float Idf(long docFreq, long numDocs)
         {
             return (float)(Math.Log(numDocs / (double)(docFreq + 1)) + 1.0);
         }
 
         /// <summary>
-        /// True if overlap tokens (tokens with a position of increment of zero) are
+        /// <c>True</c> if overlap tokens (tokens with a position of increment of zero) are
         /// discounted from the document's length.
         /// </summary>
         protected bool m_discountOverlaps = true;
 
         /// <summary>
         /// Determines whether overlap tokens (Tokens with
-        ///  0 position increment) are ignored when computing
-        ///  norm.  By default this is true, meaning overlap
-        ///  tokens do not count when computing norms.
-        ///
-        ///  @lucene.experimental
+        /// 0 position increment) are ignored when computing
+        /// norm.  By default this is true, meaning overlap
+        /// tokens do not count when computing norms.
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
-        ///  <seealso cref= #computeNorm </seealso>
+        /// <seealso cref="TFIDFSimilarity.ComputeNorm(FieldInvertState)"/>
         public virtual bool DiscountOverlaps
         {
             set

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/Distribution.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/Distribution.cs b/src/Lucene.Net/Search/Similarities/Distribution.cs
index 1b2c3e4..ee92b13 100644
--- a/src/Lucene.Net/Search/Similarities/Distribution.cs
+++ b/src/Lucene.Net/Search/Similarities/Distribution.cs
@@ -21,9 +21,11 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// The probabilistic distribution used to model term occurrence
-    /// in information-based models. </summary>
-    /// <seealso cref= IBSimilarity
-    /// @lucene.experimental </seealso>
+    /// in information-based models. 
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="IBSimilarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -43,7 +45,7 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Explains the score. Returns the name of the model only, since
-        /// both {@code tfn} and {@code lambda} are explained elsewhere.
+        /// both <c>tfn</c> and <c>lambda</c> are explained elsewhere.
         /// </summary>
         public virtual Explanation Explain(BasicStats stats, float tfn, float lambda)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/DistributionLL.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/DistributionLL.cs b/src/Lucene.Net/Search/Similarities/DistributionLL.cs
index 2841d2a..ed8ddea 100644
--- a/src/Lucene.Net/Search/Similarities/DistributionLL.cs
+++ b/src/Lucene.Net/Search/Similarities/DistributionLL.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Log-logistic distribution.
-    /// <p>Unlike for DFR, the natural logarithm is used, as
+    /// <para>Unlike for DFR, the natural logarithm is used, as
     /// it is faster to compute and the original paper does not express any
-    /// preference to a specific base.</p>
+    /// preference to a specific base.</para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/DistributionSPL.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/DistributionSPL.cs b/src/Lucene.Net/Search/Similarities/DistributionSPL.cs
index 55c7639..ade1b05 100644
--- a/src/Lucene.Net/Search/Similarities/DistributionSPL.cs
+++ b/src/Lucene.Net/Search/Similarities/DistributionSPL.cs
@@ -22,9 +22,9 @@ namespace Lucene.Net.Search.Similarities
     /// <summary>
     /// The smoothed power-law (SPL) distribution for the information-based framework
     /// that is described in the original paper.
-    /// <p>Unlike for DFR, the natural logarithm is used, as
+    /// <para>Unlike for DFR, the natural logarithm is used, as
     /// it is faster to compute and the original paper does not express any
-    /// preference to a specific base.</p>
+    /// preference to a specific base.</para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/IBSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/IBSimilarity.cs b/src/Lucene.Net/Search/Similarities/IBSimilarity.cs
index fecf57a..19223ee 100644
--- a/src/Lucene.Net/Search/Similarities/IBSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/IBSimilarity.cs
@@ -22,51 +22,64 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Provides a framework for the family of information-based models, as described
-    /// in St&eacute;phane Clinchant and Eric Gaussier. 2010. Information-based
+    /// in St&#201;phane Clinchant and Eric Gaussier. 2010. Information-based
     /// models for ad hoc IR. In Proceeding of the 33rd international ACM SIGIR
     /// conference on Research and development in information retrieval (SIGIR '10).
     /// ACM, New York, NY, USA, 234-241.
-    /// <p>The retrieval function is of the form <em>RSV(q, d) = &sum;
+    /// <para>The retrieval function is of the form <em>RSV(q, d) = &#8721;
     /// -x<sup>q</sup><sub>w</sub> log Prob(X<sub>w</sub> &gt;=
-    /// t<sup>d</sup><sub>w</sub> | &lambda;<sub>w</sub>)</em>, where
-    /// <ul>
-    ///   <li><em>x<sup>q</sup><sub>w</sub></em> is the query boost;</li>
-    ///   <li><em>X<sub>w</sub></em> is a random variable that counts the occurrences
-    ///   of word <em>w</em>;</li>
-    ///   <li><em>t<sup>d</sup><sub>w</sub></em> is the normalized term frequency;</li>
-    ///   <li><em>&lambda;<sub>w</sub></em> is a parameter.</li>
-    /// </ul>
-    /// </p>
-    /// <p>The framework described in the paper has many similarities to the DFR
-    /// framework (see <seealso cref="DFRSimilarity"/>). It is possible that the two
-    /// Similarities will be merged at one point.</p>
-    /// <p>To construct an IBSimilarity, you must specify the implementations for
+    /// t<sup>d</sup><sub>w</sub> | &#955;<sub>w</sub>)</em>, where
+    /// <list type="bullet">
+    ///     <item><description><em>x<sup>q</sup><sub>w</sub></em> is the query boost;</description></item>
+    ///     <item><description><em>X<sub>w</sub></em> is a random variable that counts the occurrences
+    ///         of word <em>w</em>;</description></item>
+    ///     <item><description><em>t<sup>d</sup><sub>w</sub></em> is the normalized term frequency;</description></item>
+    ///     <item><description><em>&#955;<sub>w</sub></em> is a parameter.</description></item>
+    /// </list>
+    /// </para>
+    /// <para>The framework described in the paper has many similarities to the DFR
+    /// framework (see <see cref="DFRSimilarity"/>). It is possible that the two
+    /// Similarities will be merged at one point.</para>
+    /// <para>To construct an <see cref="IBSimilarity"/>, you must specify the implementations for
     /// all three components of the Information-Based model.
-    /// <ol>
-    ///     <li><seealso cref="Distribution"/>: Probabilistic distribution used to
-    ///         model term occurrence
-    ///         <ul>
-    ///             <li><seealso cref="DistributionLL"/>: Log-logistic</li>
-    ///             <li><seealso cref="DistributionLL"/>: Smoothed power-law</li>
-    ///         </ul>
-    ///     </li>
-    ///     <li><seealso cref="Lambda"/>: &lambda;<sub>w</sub> parameter of the
-    ///         probability distribution
-    ///         <ul>
-    ///             <li><seealso cref="LambdaDF"/>: <code>N<sub>w</sub>/N</code> or average
-    ///                 number of documents where w occurs</li>
-    ///             <li><seealso cref="LambdaTTF"/>: <code>F<sub>w</sub>/N</code> or
-    ///                 average number of occurrences of w in the collection</li>
-    ///         </ul>
-    ///     </li>
-    ///     <li><seealso cref="Normalization"/>: Term frequency normalization
-    ///         <blockquote>Any supported DFR normalization (listed in
-    ///                      <seealso cref="DFRSimilarity"/>)</blockquote>
-    ///     </li>
-    /// </ol>
-    /// <p> </summary>
-    /// <seealso cref= DFRSimilarity
-    /// @lucene.experimental  </seealso>
+    /// <list type="table">
+    ///     <listheader>
+    ///         <term>Component</term>
+    ///         <term>Implementations</term>
+    ///     </listheader>
+    ///     <item>
+    ///         <term><see cref="Distribution"/>: Probabilistic distribution used to
+    ///             model term occurrence</term>
+    ///         <term>
+    ///             <list type="bullet">
+    ///                 <item><description><see cref="DistributionLL"/>: Log-logistic</description></item>
+    ///                 <item><description><see cref="DistributionLL"/>: Smoothed power-law</description></item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term><see cref="Lambda"/>: &#955;<sub>w</sub> parameter of the
+    ///             probability distribution</term>
+    ///         <term>
+    ///             <list type="bullet">
+    ///                 <item><description><see cref="LambdaDF"/>: <c>N<sub>w</sub>/N</c> or average
+    ///                     number of documents where w occurs</description></item>
+    ///                 <item><description><see cref="LambdaTTF"/>: <c>F<sub>w</sub>/N</c> or
+    ///                     average number of occurrences of w in the collection</description></item>
+    ///             </list>
+    ///         </term>
+    ///     </item>
+    ///     <item>
+    ///         <term><see cref="Normalization"/>: Term frequency normalization</term>
+    ///         <term>Any supported DFR normalization (listed in
+    ///                      <see cref="DFRSimilarity"/>)
+    ///         </term>
+    ///     </item>
+    /// </list>
+    /// </para>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="DFRSimilarity"/>
     [ExceptionToClassNameConvention]
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -78,7 +91,7 @@ namespace Lucene.Net.Search.Similarities
         protected internal readonly Distribution m_distribution;
 
         /// <summary>
-        /// The <em>lambda (&lambda;<sub>w</sub>)</em> parameter. </summary>
+        /// The <em>lambda (&#955;<sub>w</sub>)</em> parameter. </summary>
         protected internal readonly Lambda m_lambda;
 
         /// <summary>
@@ -87,12 +100,12 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Creates IBSimilarity from the three components.
-        /// <p>
-        /// Note that <code>null</code> values are not allowed:
+        /// <para/>
+        /// Note that <c>null</c> values are not allowed:
         /// if you want no normalization, instead pass
-        /// <seealso cref="NoNormalization"/>. </summary>
+        /// <see cref="Normalization.NoNormalization"/>. </summary>
         /// <param name="distribution"> probabilistic distribution modeling term occurrence </param>
-        /// <param name="lambda"> distribution's &lambda;<sub>w</sub> parameter </param>
+        /// <param name="lambda"> distribution's &#955;<sub>w</sub> parameter </param>
         /// <param name="normalization"> term frequency normalization </param>
         public IBSimilarity(Distribution distribution, Lambda lambda, Normalization normalization)
         {
@@ -121,9 +134,9 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// The name of IB methods follow the pattern
-        /// {@code IB <distribution> <lambda><normalization>}. The name of the
+        /// <c>IB &lt;distribution&gt; &lt;lambda&gt;&lt;normalization&gt;</c>. The name of the
         /// distribution is the same as in the original paper; for the names of lambda
-        /// parameters, refer to the javadoc of the <seealso cref="Lambda"/> classes.
+        /// parameters, refer to the doc of the <see cref="Similarities.Lambda"/> classes.
         /// </summary>
         public override string ToString()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/LMDirichletSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/LMDirichletSimilarity.cs b/src/Lucene.Net/Search/Similarities/LMDirichletSimilarity.cs
index 8a2da89..322f77b 100644
--- a/src/Lucene.Net/Search/Similarities/LMDirichletSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/LMDirichletSimilarity.cs
@@ -27,12 +27,12 @@ namespace Lucene.Net.Search.Similarities
     /// Ad Hoc information retrieval. In Proceedings of the 24th annual international
     /// ACM SIGIR conference on Research and development in information retrieval
     /// (SIGIR '01). ACM, New York, NY, USA, 334-342.
-    /// <p>
+    /// <para>
     /// The formula as defined the paper assigns a negative score to documents that
     /// contain the term, but with fewer occurrences than predicted by the collection
-    /// language model. The Lucene implementation returns {@code 0} for such
+    /// language model. The Lucene implementation returns <c>0</c> for such
     /// documents.
-    /// </p>
+    /// </para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -42,11 +42,11 @@ namespace Lucene.Net.Search.Similarities
     public class LMDirichletSimilarity : LMSimilarity
     {
         /// <summary>
-        /// The &mu; parameter. </summary>
+        /// The &#956; parameter. </summary>
         private readonly float mu;
 
         /// <summary>
-        /// Instantiates the similarity with the provided &mu; parameter. </summary>
+        /// Instantiates the similarity with the provided &#956; parameter. </summary>
         public LMDirichletSimilarity(ICollectionModel collectionModel, float mu)
             : base(collectionModel)
         {
@@ -54,21 +54,21 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Instantiates the similarity with the provided &mu; parameter. </summary>
+        /// Instantiates the similarity with the provided &#956; parameter. </summary>
         public LMDirichletSimilarity(float mu)
         {
             this.mu = mu;
         }
 
         /// <summary>
-        /// Instantiates the similarity with the default &mu; value of 2000. </summary>
+        /// Instantiates the similarity with the default &#956; value of 2000. </summary>
         public LMDirichletSimilarity(ICollectionModel collectionModel)
             : this(collectionModel, 2000)
         {
         }
 
         /// <summary>
-        /// Instantiates the similarity with the default &mu; value of 2000. </summary>
+        /// Instantiates the similarity with the default &#956; value of 2000. </summary>
         public LMDirichletSimilarity()
             : this(2000)
         {
@@ -97,7 +97,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the &mu; parameter. </summary>
+        /// Returns the &#956; parameter. </summary>
         public virtual float Mu
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/LMJelinekMercerSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/LMJelinekMercerSimilarity.cs b/src/Lucene.Net/Search/Similarities/LMJelinekMercerSimilarity.cs
index e932190..108f0d0 100644
--- a/src/Lucene.Net/Search/Similarities/LMJelinekMercerSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/LMJelinekMercerSimilarity.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Search.Similarities
     /// models applied to Ad Hoc information retrieval. In Proceedings of the 24th
     /// annual international ACM SIGIR conference on Research and development in
     /// information retrieval (SIGIR '01). ACM, New York, NY, USA, 334-342.
-    /// <p>The model has a single parameter, &lambda;. According to said paper, the
+    /// <para>The model has a single parameter, &#955;. According to said paper, the
     /// optimal value depends on both the collection and the query. The optimal value
-    /// is around {@code 0.1} for title queries and {@code 0.7} for long queries.</p>
+    /// is around <c>0.1</c> for title queries and <c>0.7</c> for long queries.</para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -39,11 +39,11 @@ namespace Lucene.Net.Search.Similarities
     public class LMJelinekMercerSimilarity : LMSimilarity
     {
         /// <summary>
-        /// The &lambda; parameter. </summary>
+        /// The &#955; parameter. </summary>
         private readonly float lambda;
 
         /// <summary>
-        /// Instantiates with the specified collectionModel and &lambda; parameter. </summary>
+        /// Instantiates with the specified <paramref name="collectionModel"/> and &#955; parameter. </summary>
         public LMJelinekMercerSimilarity(ICollectionModel collectionModel, float lambda)
             : base(collectionModel)
         {
@@ -51,7 +51,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Instantiates with the specified &lambda; parameter. </summary>
+        /// Instantiates with the specified &#955; parameter. </summary>
         public LMJelinekMercerSimilarity(float lambda)
         {
             this.lambda = lambda;
@@ -73,7 +73,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the &lambda; parameter. </summary>
+        /// Returns the &#955; parameter. </summary>
         public virtual float Lambda
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/LMSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/LMSimilarity.cs b/src/Lucene.Net/Search/Similarities/LMSimilarity.cs
index 6c0e3d9..721d576 100644
--- a/src/Lucene.Net/Search/Similarities/LMSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/LMSimilarity.cs
@@ -23,16 +23,16 @@ namespace Lucene.Net.Search.Similarities
     /// <summary>
     /// Abstract superclass for language modeling Similarities. The following inner
     /// types are introduced:
-    /// <ul>
-    ///   <li><seealso cref="LMStats"/>, which defines a new statistic, the probability that
-    ///   the collection language model generates the current term;</li>
-    ///   <li><seealso cref="ICollectionModel"/>, which is a strategy interface for object that
-    ///   compute the collection language model {@code p(w|C)};</li>
-    ///   <li><seealso cref="DefaultCollectionModel"/>, an implementation of the former, that
-    ///   computes the term probability as the number of occurrences of the term in the
-    ///   collection, divided by the total number of tokens.</li>
-    /// </ul>
-    ///
+    /// <list type="bullet">
+    ///     <item><description><see cref="LMStats"/>, which defines a new statistic, the probability that
+    ///         the collection language model generates the current term;</description></item>
+    ///     <item><description><see cref="ICollectionModel"/>, which is a strategy interface for object that
+    ///         compute the collection language model <c>p(w|C)</c>;</description></item>
+    ///     <item><description><see cref="DefaultCollectionModel"/>, an implementation of the former, that
+    ///         computes the term probability as the number of occurrences of the term in the
+    ///         collection, divided by the total number of tokens.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -82,16 +82,16 @@ namespace Lucene.Net.Search.Similarities
         /// <summary>
         /// Returns the name of the LM method. The values of the parameters should be
         /// included as well.
-        /// <p>Used in <seealso cref="#toString()"/></p>.
+        /// <para>Used in <see cref="ToString()"/></para>.
         /// </summary>
         public abstract string GetName();
 
         /// <summary>
         /// Returns the name of the LM method. If a custom collection model strategy is
         /// used, its name is included as well. </summary>
-        /// <seealso cref= #getName() </seealso>
-        /// <seealso cref= CollectionModel#getName() </seealso>
-        /// <seealso cref= DefaultCollectionModel  </seealso>
+        /// <seealso cref="GetName()"/>
+        /// <seealso cref="ICollectionModel.Name"/>
+        /// <seealso cref="DefaultCollectionModel"/>
         public override string ToString()
         {
             string coll = m_collectionModel.Name;
@@ -117,7 +117,7 @@ namespace Lucene.Net.Search.Similarities
             private float collectionProbability;
 
             /// <summary>
-            /// Creates LMStats for the provided field and query-time boost
+            /// Creates <see cref="LMStats"/> for the provided field and query-time boost
             /// </summary>
             public LMStats(string field, float queryBoost)
                 : base(field, queryBoost)
@@ -146,19 +146,19 @@ namespace Lucene.Net.Search.Similarities
         public interface ICollectionModel
         {
             /// <summary>
-            /// Computes the probability {@code p(w|C)} according to the language model
+            /// Computes the probability <c>p(w|C)</c> according to the language model
             /// strategy for the current term.
             /// </summary>
             float ComputeProbability(BasicStats stats);
 
             /// <summary>
             /// The name of the collection model strategy. </summary>
-            string Name { get; }
+            string Name { get; } // LUCENENET TODO: API Change to GetName() ? (consistency)
         }
 
         /// <summary>
-        /// Models {@code p(w|C)} as the number of occurrences of the term in the
-        /// collection, divided by the total number of tokens {@code + 1}.
+        /// Models <c>p(w|C)</c> as the number of occurrences of the term in the
+        /// collection, divided by the total number of tokens <c>+ 1</c>.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/Lambda.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/Lambda.cs b/src/Lucene.Net/Search/Similarities/Lambda.cs
index 1621113..12d1976 100644
--- a/src/Lucene.Net/Search/Similarities/Lambda.cs
+++ b/src/Lucene.Net/Search/Similarities/Lambda.cs
@@ -20,10 +20,12 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// The <em>lambda (&lambda;<sub>w</sub>)</em> parameter in information-based
-    /// models. </summary>
-    /// <seealso cref= IBSimilarity
-    /// @lucene.experimental </seealso>
+    /// The <em>lambda (&#955;<sub>w</sub>)</em> parameter in information-based
+    /// models. 
+    /// <para/>
+    /// @lucene.experimental
+    /// </summary>
+    /// <seealso cref="IBSimilarity"/> 
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/LambdaDF.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/LambdaDF.cs b/src/Lucene.Net/Search/Similarities/LambdaDF.cs
index f6c43f5..49404f6 100644
--- a/src/Lucene.Net/Search/Similarities/LambdaDF.cs
+++ b/src/Lucene.Net/Search/Similarities/LambdaDF.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// Computes lambda as {@code docFreq+1 / numberOfDocuments+1}.
+    /// Computes lambda as <c>docFreq+1 / numberOfDocuments+1</c>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/LambdaTTF.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/LambdaTTF.cs b/src/Lucene.Net/Search/Similarities/LambdaTTF.cs
index 6f398bd..f65705b 100644
--- a/src/Lucene.Net/Search/Similarities/LambdaTTF.cs
+++ b/src/Lucene.Net/Search/Similarities/LambdaTTF.cs
@@ -20,7 +20,8 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// Computes lambda as {@code totalTermFreq+1 / numberOfDocuments+1}.
+    /// Computes lambda as <c>totalTermFreq+1 / numberOfDocuments+1</c>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/MultiSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/MultiSimilarity.cs b/src/Lucene.Net/Search/Similarities/MultiSimilarity.cs
index 6064822..49fb5d9 100644
--- a/src/Lucene.Net/Search/Similarities/MultiSimilarity.cs
+++ b/src/Lucene.Net/Search/Similarities/MultiSimilarity.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Search.Similarities
     /// Implements the CombSUM method for combining evidence from multiple
     /// similarity values described in: Joseph A. Shaw, Edward A. Fox.
     /// In Text REtrieval Conference (1993), pp. 243-252
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -39,8 +40,8 @@ namespace Lucene.Net.Search.Similarities
         protected internal readonly Similarity[] m_sims;
 
         /// <summary>
-        /// Creates a MultiSimilarity which will sum the scores
-        /// of the provided <code>sims</code>.
+        /// Creates a <see cref="MultiSimilarity"/> which will sum the scores
+        /// of the provided <paramref name="sims"/>.
         /// </summary>
         public MultiSimilarity(Similarity[] sims)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/Normalization.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/Normalization.cs b/src/Lucene.Net/Search/Similarities/Normalization.cs
index d1a9cce..c80158e 100644
--- a/src/Lucene.Net/Search/Similarities/Normalization.cs
+++ b/src/Lucene.Net/Search/Similarities/Normalization.cs
@@ -20,11 +20,12 @@ namespace Lucene.Net.Search.Similarities
      */
 
     /// <summary>
-    /// this class acts as the base class for the implementations of the term
+    /// This class acts as the base class for the implementations of the term
     /// frequency normalization methods in the DFR framework.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= DFRSimilarity
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="DFRSimilarity"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -45,10 +46,10 @@ namespace Lucene.Net.Search.Similarities
 
         /// <summary>
         /// Returns an explanation for the normalized term frequency.
-        /// <p>The default normalization methods use the field length of the document
+        /// <para>The default normalization methods use the field length of the document
         /// and the average field length to compute the normalized term frequency.
-        /// this method provides a generic explanation for such methods.
-        /// Subclasses that use other statistics must override this method.</p>
+        /// This method provides a generic explanation for such methods.
+        /// Subclasses that use other statistics must override this method.</para>
         /// </summary>
         public virtual Explanation Explain(BasicStats stats, float tf, float len)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/NormalizationH1.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/NormalizationH1.cs b/src/Lucene.Net/Search/Similarities/NormalizationH1.cs
index 65a24ae..6a0a160 100644
--- a/src/Lucene.Net/Search/Similarities/NormalizationH1.cs
+++ b/src/Lucene.Net/Search/Similarities/NormalizationH1.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Normalization model that assumes a uniform distribution of the term frequency.
-    /// <p>While this model is parameterless in the
+    /// <para>While this model is parameterless in the
     /// <a href="http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.742">
     /// original article</a>, <a href="http://dl.acm.org/citation.cfm?id=1835490">
-    /// information-based models</a> (see <seealso cref="IBSimilarity"/>) introduced a
+    /// information-based models</a> (see <see cref="IBSimilarity"/>) introduced a
     /// multiplying factor.
-    /// The default value for the {@code c} parameter is {@code 1}.</p>
+    /// The default value for the <c>c</c> parameter is <c>1</c>.</para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -37,8 +37,8 @@ namespace Lucene.Net.Search.Similarities
         private readonly float c;
 
         /// <summary>
-        /// Creates NormalizationH1 with the supplied parameter <code>c</code>. </summary>
-        /// <param name="c"> hyper-parameter that controls the term frequency
+        /// Creates <see cref="NormalizationH1"/> with the supplied parameter <paramref name="c"/>. </summary>
+        /// <param name="c"> Hyper-parameter that controls the term frequency
         /// normalization with respect to the document length. </param>
         public NormalizationH1(float c)
         {
@@ -46,7 +46,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Calls <seealso cref="#NormalizationH1(float) NormalizationH1(1)"/>
+        /// Calls <see cref="T:NormalizationH1(1)"/>
         /// </summary>
         public NormalizationH1()
             : this(1)
@@ -64,8 +64,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the <code>c</code> parameter. </summary>
-        /// <seealso cref= #NormalizationH1(float) </seealso>
+        /// Returns the <c>c</c> parameter. </summary>
+        /// <seealso cref="NormalizationH1(float)"/>
         public virtual float C
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/NormalizationH2.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/NormalizationH2.cs b/src/Lucene.Net/Search/Similarities/NormalizationH2.cs
index a12ddd6..2a43ec5 100644
--- a/src/Lucene.Net/Search/Similarities/NormalizationH2.cs
+++ b/src/Lucene.Net/Search/Similarities/NormalizationH2.cs
@@ -22,11 +22,11 @@ namespace Lucene.Net.Search.Similarities
     /// <summary>
     /// Normalization model in which the term frequency is inversely related to the
     /// length.
-    /// <p>While this model is parameterless in the
+    /// <para>While this model is parameterless in the
     /// <a href="http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.742">
     /// original article</a>, the <a href="http://theses.gla.ac.uk/1570/">thesis</a>
     /// introduces the parameterized variant.
-    /// The default value for the {@code c} parameter is {@code 1}.</p>
+    /// The default value for the <c>c</c> parameter is <c>1</c>.</para>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -37,8 +37,8 @@ namespace Lucene.Net.Search.Similarities
         private readonly float c;
 
         /// <summary>
-        /// Creates NormalizationH2 with the supplied parameter <code>c</code>. </summary>
-        /// <param name="c"> hyper-parameter that controls the term frequency
+        /// Creates <see cref="NormalizationH2"/> with the supplied parameter <paramref name="c"/>. </summary>
+        /// <param name="c"> Hyper-parameter that controls the term frequency
         /// normalization with respect to the document length. </param>
         public NormalizationH2(float c)
         {
@@ -46,7 +46,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Calls <seealso cref="#NormalizationH2(float) NormalizationH2(1)"/>
+        /// Calls <see cref="T:NormalizationH2(1)"/>
         /// </summary>
         public NormalizationH2()
             : this(1)
@@ -64,8 +64,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the <code>c</code> parameter. </summary>
-        /// <seealso cref= #NormalizationH2(float) </seealso>
+        /// Returns the <c>c</c> parameter. </summary>
+        /// <seealso cref="NormalizationH2(float)"/>
         public virtual float C
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/NormalizationH3.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/NormalizationH3.cs b/src/Lucene.Net/Search/Similarities/NormalizationH3.cs
index 3efb568..82d8f40 100644
--- a/src/Lucene.Net/Search/Similarities/NormalizationH3.cs
+++ b/src/Lucene.Net/Search/Similarities/NormalizationH3.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Dirichlet Priors normalization
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -31,7 +32,7 @@ namespace Lucene.Net.Search.Similarities
         private readonly float mu;
 
         /// <summary>
-        /// Calls <seealso cref="#NormalizationH3(float) NormalizationH3(800)"/>
+        /// Calls <see cref="T:NormalizationH3(800)"/>
         /// </summary>
         public NormalizationH3()
             : this(800F)
@@ -39,8 +40,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Creates NormalizationH3 with the supplied parameter <code>&mu;</code>. </summary>
-        /// <param name="mu"> smoothing parameter <code>&mu;</code> </param>
+        /// Creates <see cref="NormalizationH3"/> with the supplied parameter <c>&#956;</c>. </summary>
+        /// <param name="mu"> smoothing parameter <c>&#956;</c> </param>
         public NormalizationH3(float mu)
         {
             this.mu = mu;
@@ -57,8 +58,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the parameter <code>&mu;</code> </summary>
-        /// <seealso cref= #NormalizationH3(float) </seealso>
+        /// Returns the parameter <c>&#956;</c> </summary>
+        /// <seealso cref="NormalizationH3(float)"/>
         public virtual float Mu
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/NormalizationZ.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/NormalizationZ.cs b/src/Lucene.Net/Search/Similarities/NormalizationZ.cs
index d8d5df8..4eef520 100644
--- a/src/Lucene.Net/Search/Similarities/NormalizationZ.cs
+++ b/src/Lucene.Net/Search/Similarities/NormalizationZ.cs
@@ -21,6 +21,7 @@ namespace Lucene.Net.Search.Similarities
 
     /// <summary>
     /// Pareto-Zipf Normalization
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -31,7 +32,7 @@ namespace Lucene.Net.Search.Similarities
         internal readonly float z;
 
         /// <summary>
-        /// Calls <seealso cref="#NormalizationZ(float) NormalizationZ(0.3)"/>
+        /// Calls <see cref="T:NormalizationZ(0.3)"/>
         /// </summary>
         public NormalizationZ()
             : this(0.30F)
@@ -39,8 +40,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Creates NormalizationZ with the supplied parameter <code>z</code>. </summary>
-        /// <param name="z"> represents <code>A/(A+1)</code> where <code>A</code>
+        /// Creates <see cref="NormalizationZ"/> with the supplied parameter <paramref name="z"/>. </summary>
+        /// <param name="z"> represents <c>A/(A+1)</c> where <c>A</c>
         ///          measures the specificity of the language. </param>
         public NormalizationZ(float z)
         {
@@ -58,8 +59,8 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns the parameter <code>z</code> </summary>
-        /// <seealso cref= #NormalizationZ(float) </seealso>
+        /// Returns the parameter <c>z</c> </summary>
+        /// <seealso cref="NormalizationZ(float)"/>
         public virtual float Z
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a1541c1/src/Lucene.Net/Search/Similarities/PerFieldSimilarityWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Similarities/PerFieldSimilarityWrapper.cs b/src/Lucene.Net/Search/Similarities/PerFieldSimilarityWrapper.cs
index 7197ba9..27d327b 100644
--- a/src/Lucene.Net/Search/Similarities/PerFieldSimilarityWrapper.cs
+++ b/src/Lucene.Net/Search/Similarities/PerFieldSimilarityWrapper.cs
@@ -23,11 +23,11 @@ namespace Lucene.Net.Search.Similarities
     using FieldInvertState = Lucene.Net.Index.FieldInvertState;
 
     /// <summary>
-    /// Provides the ability to use a different <seealso cref="Similarity"/> for different fields.
-    /// <p>
-    /// Subclasses should implement <seealso cref="#get(String)"/> to return an appropriate
-    /// Similarity (for example, using field-specific parameter values) for the field.
-    ///
+    /// Provides the ability to use a different <see cref="Similarity"/> for different fields.
+    /// <para/>
+    /// Subclasses should implement <see cref="Get(string)"/> to return an appropriate
+    /// <see cref="Similarity"/> (for example, using field-specific parameter values) for the field.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -63,7 +63,7 @@ namespace Lucene.Net.Search.Similarities
         }
 
         /// <summary>
-        /// Returns a <seealso cref="Similarity"/> for scoring a field.
+        /// Returns a <see cref="Similarity"/> for scoring a field.
         /// </summary>
         public abstract Similarity Get(string name);
 


[24/48] lucenenet git commit: Lucene.Net.Misc: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.Misc: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/dd55920d
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/dd55920d
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/dd55920d

Branch: refs/heads/master
Commit: dd55920d8f12a512902c15d41c554b95477e7c1c
Parents: 6267463
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 04:29:09 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 04:29:09 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/dd55920d/src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs b/src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs
index 8ee1541..bca1284 100644
--- a/src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs
+++ b/src/Lucene.Net.Misc/Index/MultiPassIndexSplitter.cs
@@ -39,8 +39,8 @@ namespace Lucene.Net.Index
     /// 
     /// </para>
     /// <para><b>NOTE</b>: this tool is unaware of documents added
-    /// atomically via <see cref="IndexWriter.AddDocuments"/> or 
-    /// <see cref="IndexWriter.UpdateDocuments"/>, which means it can easily
+    /// atomically via <see cref="IndexWriter.AddDocuments(IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/> or 
+    /// <see cref="IndexWriter.UpdateDocuments(Term, IEnumerable{IEnumerable{IIndexableField}}, Analysis.Analyzer)"/>, which means it can easily
     /// break up such document groups.
     /// </para>
     /// </summary>


[06/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs b/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
index 0b3401e..d172657 100644
--- a/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
+++ b/src/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
@@ -31,17 +31,17 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// A wrapper for <seealso cref="MultiTermQuery"/>, that exposes its
-    /// functionality as a <seealso cref="Filter"/>.
-    /// <P>
-    /// <code>MultiTermQueryWrapperFilter</code> is not designed to
-    /// be used by itself. Normally you subclass it to provide a Filter
-    /// counterpart for a <seealso cref="MultiTermQuery"/> subclass.
-    /// <P>
-    /// For example, <seealso cref="TermRangeFilter"/> and <seealso cref="PrefixFilter"/> extend
-    /// <code>MultiTermQueryWrapperFilter</code>.
-    /// this class also provides the functionality behind
-    /// <seealso cref="MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE"/>;
+    /// A wrapper for <see cref="MultiTermQuery"/>, that exposes its
+    /// functionality as a <see cref="Filter"/>.
+    /// <para/>
+    /// <see cref="MultiTermQueryWrapperFilter{Q}"/> is not designed to
+    /// be used by itself. Normally you subclass it to provide a <see cref="Filter"/>
+    /// counterpart for a <see cref="MultiTermQuery"/> subclass.
+    /// <para/>
+    /// For example, <see cref="TermRangeFilter"/> and <see cref="PrefixFilter"/> extend
+    /// <see cref="MultiTermQueryWrapperFilter{Q}"/>.
+    /// This class also provides the functionality behind
+    /// <see cref="MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE"/>;
     /// this is why it is not abstract.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -52,7 +52,7 @@ namespace Lucene.Net.Search
         protected readonly Q m_query;
 
         /// <summary>
-        /// Wrap a <seealso cref="MultiTermQuery"/> as a Filter.
+        /// Wrap a <see cref="MultiTermQuery"/> as a <see cref="Filter"/>.
         /// </summary>
         protected internal MultiTermQueryWrapperFilter(Q query)
         {
@@ -98,7 +98,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns a DocIdSet with documents that should be permitted in search
+        /// Returns a <see cref="DocIdSet"/> with documents that should be permitted in search
         /// results.
         /// </summary>
         public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/NGramPhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/NGramPhraseQuery.cs b/src/Lucene.Net/Search/NGramPhraseQuery.cs
index 2ce79ee..2f208ae 100644
--- a/src/Lucene.Net/Search/NGramPhraseQuery.cs
+++ b/src/Lucene.Net/Search/NGramPhraseQuery.cs
@@ -24,12 +24,11 @@ namespace Lucene.Net.Search
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// this is a <seealso cref="PhraseQuery"/> which is optimized for n-gram phrase query.
+    /// This is a <see cref="PhraseQuery"/> which is optimized for n-gram phrase query.
     /// For example, when you query "ABCD" on a 2-gram field, you may want to use
-    /// NGramPhraseQuery rather than <seealso cref="PhraseQuery"/>, because NGramPhraseQuery
-    /// will <seealso cref="#rewrite(IndexReader)"/> the query to "AB/0 CD/2", while <seealso cref="PhraseQuery"/>
+    /// <see cref="NGramPhraseQuery"/> rather than <see cref="PhraseQuery"/>, because <see cref="NGramPhraseQuery"/>
+    /// will <see cref="Rewrite(IndexReader)"/> the query to "AB/0 CD/2", while <see cref="PhraseQuery"/>
     /// will query "AB/0 BC/1 CD/2" (where term/position).
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -93,7 +92,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!(o is NGramPhraseQuery))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/NumericRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/NumericRangeFilter.cs b/src/Lucene.Net/Search/NumericRangeFilter.cs
index 5900506..f42c87d 100644
--- a/src/Lucene.Net/Search/NumericRangeFilter.cs
+++ b/src/Lucene.Net/Search/NumericRangeFilter.cs
@@ -20,26 +20,25 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A <seealso cref="Filter"/> that only accepts numeric values within
+    /// A <see cref="Filter"/> that only accepts numeric values within
     /// a specified range. To use this, you must first index the
-    /// numeric values using <seealso cref="IntField"/>, {@link
-    /// FloatField}, <seealso cref="LongField"/> or <seealso cref="DoubleField"/> (expert: {@link
-    /// NumericTokenStream}).
+    /// numeric values using <see cref="Documents.Int32Field"/>, 
+    /// <see cref="Documents.SingleField"/>, <see cref="Documents.Int64Field"/> or <see cref="Documents.DoubleField"/> (expert:
+    /// <see cref="Analysis.NumericTokenStream"/>).
     ///
-    /// <p>You create a new NumericRangeFilter with the static
+    /// <para/>You create a new <see cref="NumericRangeFilter"/> with the static
     /// factory methods, eg:
     ///
-    /// <pre class="prettyprint">
-    /// Filter f = NumericRangeFilter.newFloatRange("weight", 0.03f, 0.10f, true, true);
-    /// </pre>
+    /// <code>
+    /// Filter f = NumericRangeFilter.NewFloatRange("weight", 0.03f, 0.10f, true, true);
+    /// </code>
     ///
-    /// accepts all documents whose float valued "weight" field
+    /// Accepts all documents whose float valued "weight" field
     /// ranges from 0.03 to 0.10, inclusive.
-    /// See <seealso cref="NumericRangeQuery"/> for details on how Lucene
+    /// See <see cref="NumericRangeQuery"/> for details on how Lucene
     /// indexes and searches numeric valued fields.
-    ///
+    /// <para/>
     /// @since 2.9
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -56,14 +55,14 @@ namespace Lucene.Net.Search
         // LUCENENET NOTE: Static methods were moved into NumericRangeFilter class
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public bool IncludesMin
         {
             get { return m_query.IncludesMin; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public bool IncludesMax
         {
             get { return m_query.IncludesMax; }
@@ -100,13 +99,17 @@ namespace Lucene.Net.Search
         }
     }
 
+    /// <summary>
+    /// LUCENENET specific static class to provide access to static methods without referring to the
+    /// <see cref="NumericRangeFilter{T}"/>'s generic closing type.
+    /// </summary>
     public static class NumericRangeFilter
     {
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>long</code>
-        /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that filters a <see cref="long"/>
+        /// range using the given <see cref="NumericRangeQuery{T}.PrecisionStep"/>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newLongRange() in Lucene
@@ -117,10 +120,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>long</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that queries a <see cref="long"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.PrecisionStep"/> <see cref="Util.NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newLongRange() in Lucene
@@ -131,10 +134,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>int</code>
-        /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that filters a <see cref="int"/>
+        /// range using the given <see cref="NumericRangeQuery{T}.PrecisionStep"/>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newIntRange() in Lucene
@@ -145,10 +148,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>int</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that queries a <see cref="int"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.PrecisionStep"/> <see cref="Util.NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newIntRange() in Lucene
@@ -159,12 +162,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>double</code>
-        /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that filters a <see cref="double"/>
+        /// range using the given <see cref="NumericRangeQuery{T}.PrecisionStep"/>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Double#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Double.NaN}. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="double.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Double.NaN</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// </summary>
         public static NumericRangeFilter<double> NewDoubleRange(string field, int precisionStep, double? min, double? max, bool minInclusive, bool maxInclusive)
@@ -173,12 +176,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>double</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that queries a <see cref="double"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.PrecisionStep"/> <see cref="Util.NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Double#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Double.NaN}. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="double.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Double.NaN</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// </summary>
         public static NumericRangeFilter<double> NewDoubleRange(string field, double? min, double? max, bool minInclusive, bool maxInclusive)
@@ -187,12 +190,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>float</code>
-        /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that filters a <see cref="float"/>
+        /// range using the given <see cref="NumericRangeQuery{T}.PrecisionStep"/>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Float#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Float.NaN}. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="float.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Single.NaN</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newFloatRange() in Lucene
@@ -203,12 +206,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>float</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeFilter"/>, that queries a <see cref="float"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.PrecisionStep"/> <see cref="Util.NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Float#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Float.NaN}. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="float.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Single.NaN</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newFloatRange() in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/NumericRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/NumericRangeQuery.cs b/src/Lucene.Net/Search/NumericRangeQuery.cs
index 2c97b1a..344bc4d 100644
--- a/src/Lucene.Net/Search/NumericRangeQuery.cs
+++ b/src/Lucene.Net/Search/NumericRangeQuery.cs
@@ -32,135 +32,135 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// <p>A <seealso cref="Query"/> that matches numeric values within a
+    /// <para>A <see cref="Query"/> that matches numeric values within a
     /// specified range.  To use this, you must first index the
-    /// numeric values using <seealso cref="Int32Field"/>, {@link
-    /// FloatField}, <seealso cref="Int64Field"/> or <seealso cref="DoubleField"/> (expert: {@link
-    /// NumericTokenStream}).  If your terms are instead textual,
-    /// you should use <seealso cref="TermRangeQuery"/>.  {@link
-    /// NumericRangeFilter} is the filter equivalent of this
-    /// query.</p>
+    /// numeric values using <see cref="Int32Field"/>, 
+    /// <see cref="SingleField"/>, <see cref="Int64Field"/> or <see cref="DoubleField"/> (expert: 
+    /// <see cref="Analysis.NumericTokenStream"/>).  If your terms are instead textual,
+    /// you should use <see cref="TermRangeQuery"/>.  
+    /// <see cref="NumericRangeFilter"/> is the filter equivalent of this
+    /// query.</para>
     ///
-    /// <p>You create a new NumericRangeQuery with the static
+    /// <para>You create a new <see cref="NumericRangeQuery{T}"/> with the static
     /// factory methods, eg:
     ///
-    /// <pre class="prettyprint">
-    /// Query q = NumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
-    /// </pre>
+    /// <code>
+    /// Query q = NumericRangeQuery.NewFloatRange("weight", 0.03f, 0.10f, true, true);
+    /// </code>
     ///
-    /// matches all documents whose float valued "weight" field
-    /// ranges from 0.03 to 0.10, inclusive.
+    /// matches all documents whose <see cref="float"/> valued "weight" field
+    /// ranges from 0.03 to 0.10, inclusive.</para>
     ///
-    /// <p>The performance of NumericRangeQuery is much better
-    /// than the corresponding <seealso cref="TermRangeQuery"/> because the
+    /// <para>The performance of <see cref="NumericRangeQuery{T}"/> is much better
+    /// than the corresponding <see cref="TermRangeQuery"/> because the
     /// number of terms that must be searched is usually far
-    /// fewer, thanks to trie indexing, described below.</p>
+    /// fewer, thanks to trie indexing, described below.</para>
     ///
-    /// <p>You can optionally specify a <a
-    /// href="#precisionStepDesc"><code>precisionStep</code></a>
-    /// when creating this query.  this is necessary if you've
+    /// <para>You can optionally specify a <a
+    /// href="#precisionStepDesc"><see cref="precisionStep"/></a>
+    /// when creating this query.  This is necessary if you've
     /// changed this configuration from its default (4) during
     /// indexing.  Lower values consume more disk space but speed
     /// up searching.  Suitable values are between <b>1</b> and
     /// <b>8</b>. A good starting point to test is <b>4</b>,
-    /// which is the default value for all <code>Numeric*</code>
+    /// which is the default value for all <c>Numeric*</c>
     /// classes.  See <a href="#precisionStepDesc">below</a> for
-    /// details.
+    /// details.</para>
     ///
-    /// <p>this query defaults to {@linkplain
-    /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}.
+    /// <para>This query defaults to 
+    /// <see cref="MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/>.
     /// With precision steps of &lt;=4, this query can be run with
-    /// one of the BooleanQuery rewrite methods without changing
-    /// BooleanQuery's default max clause count.
+    /// one of the <see cref="BooleanQuery"/> rewrite methods without changing
+    /// <see cref="BooleanQuery"/>'s default max clause count.</para>
     ///
-    /// <br><h3>How it works</h3>
-    ///
-    /// <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
-    /// where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
+    /// <para/><h3>How it works</h3>
     ///
+    /// <para>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
+    /// where this algorithm was described (referred to as <c>TrieRangeQuery</c>):
+    /// </para>
     /// <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
     /// <em>Generic XML-based Framework for Metadata Portals.</em>
     /// Computers &amp; Geosciences 34 (12), 1947-1955.
     /// <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
     /// target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
     ///
-    /// <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
+    /// <para><em>A quote from this paper:</em> Because Apache Lucene is a full-text
     /// search engine and not a conventional database, it cannot handle numerical ranges
     /// (e.g., field value is inside user defined bounds, even dates are numerical values).
     /// We have developed an extension to Apache Lucene that stores
     /// the numerical values in a special string-encoded format with variable precision
-    /// (all numerical values like doubles, longs, floats, and ints are converted to
+    /// (all numerical values like <see cref="double"/>s, <see cref="long"/>s, <see cref="float"/>s, and <see cref="int"/>s are converted to
     /// lexicographic sortable string representations and stored with different precisions
     /// (for a more detailed description of how the values are stored,
-    /// see <seealso cref="NumericUtils"/>). A range is then divided recursively into multiple intervals for searching:
+    /// see <see cref="NumericUtils"/>). A range is then divided recursively into multiple intervals for searching:
     /// The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
-    /// while the boundaries are matched more exactly. this reduces the number of terms dramatically.</p>
+    /// while the boundaries are matched more exactly. This reduces the number of terms dramatically.</para>
     ///
-    /// <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
+    /// <para>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
     /// uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
     /// lowest precision. Overall, a range could consist of a theoretical maximum of
     /// <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
     /// 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
     /// because it would always be possible to reduce the full 256 values to one term with degraded precision).
     /// In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
-    /// and a uniform value distribution).</p>
+    /// and a uniform value distribution).</para>
     ///
-    /// <a name="precisionStepDesc"><h3>Precision Step</h3>
-    /// <p>You can choose any <code>precisionStep</code> when encoding values.
+    /// <a name="precisionStepDesc"><h3>Precision Step</h3></a>
+    /// <para/>You can choose any <see cref="precisionStep"/> when encoding values.
     /// Lower step values mean more precisions and so more terms in index (and index gets larger). The number
-    /// of indexed terms per value is (those are generated by <seealso cref="NumericTokenStream"/>):
-    /// <p style="font-family:serif">
-    /// &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
-    /// </p>
+    /// of indexed terms per value is (those are generated by <see cref="Analysis.NumericTokenStream"/>):
+    /// <para>
+    /// &#160;&#160;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
+    /// </para>
     /// As the lower precision terms are shared by many values, the additional terms only
-    /// slightly grow the term dictionary (approx. 7% for <code>precisionStep=4</code>), but have a larger
+    /// slightly grow the term dictionary (approx. 7% for <c>precisionStep=4</c>), but have a larger
     /// impact on the postings (the postings file will have  more entries, as every document is linked to
-    /// <code>indexedTermsPerValue</code> terms instead of one). The formula to estimate the growth
+    /// <c>indexedTermsPerValue</c> terms instead of one). The formula to estimate the growth
     /// of the term dictionary in comparison to one term per value:
-    /// <p>
+    /// <para>
     /// <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
-    /// &nbsp;&nbsp;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}" />
-    /// </p>
-    /// <p>On the other hand, if the <code>precisionStep</code> is smaller, the maximum number of terms to match reduces,
+    /// &#160;&#160;<img src="doc-files/nrq-formula-1.png" alt="\mathrm{termDictOverhead} = \sum\limits_{i=0}^{\mathrm{indexedTermsPerValue}-1} \frac{1}{2^{\mathrm{precisionStep}\cdot i}}" />
+    /// </para>
+    /// <para>On the other hand, if the <see cref="precisionStep"/> is smaller, the maximum number of terms to match reduces,
     /// which optimizes query speed. The formula to calculate the maximum number of terms that will be visited while
     /// executing the query is:
-    /// <p>
+    /// </para>
+    /// <para>
     /// <!-- the formula in the alt attribute was transformed from latex to PNG with http://1.618034.com/latex.php (with 110 dpi): -->
-    /// &nbsp;&nbsp;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)" />
-    /// </p>
-    /// <p>For longs stored using a precision step of 4, <code>maxQueryTerms = 15*15*2 + 15 = 465</code>, and for a precision
-    /// step of 2, <code>maxQueryTerms = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
-    /// in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
+    /// &#160;&#160;<img src="doc-files/nrq-formula-2.png" alt="\mathrm{maxQueryTerms} = \left[ \left( \mathrm{indexedTermsPerValue} - 1 \right) \cdot \left(2^\mathrm{precisionStep} - 1 \right) \cdot 2 \right] + \left( 2^\mathrm{precisionStep} - 1 \right)" />
+    /// </para>
+    /// <para>For longs stored using a precision step of 4, <c>maxQueryTerms = 15*15*2 + 15 = 465</c>, and for a precision
+    /// step of 2, <c>maxQueryTerms = 31*3*2 + 3 = 189</c>. But the faster search speed is reduced by more seeking
+    /// in the term enum of the index. Because of this, the ideal <see cref="precisionStep"/> value can only
     /// be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
-    /// using a multiple of the original step value.</p>
+    /// using a multiple of the original step value.</para>
     ///
-    /// <p>Good values for <code>precisionStep</code> are depending on usage and data type:
-    /// <ul>
-    ///  <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
-    ///  <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
-    ///  <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
-    ///  <li>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
-    ///  fair to use <seealso cref="int.MaxValue"/> (see below).
-    ///  <li>Steps <b>&gt;=64</b> for <em>long/double</em> and <b>&gt;=32</b> for <em>int/float</em> produces one token
-    ///  per value in the index and querying is as slow as a conventional <seealso cref="TermRangeQuery"/>. But it can be used
-    ///  to produce fields, that are solely used for sorting (in this case simply use <seealso cref="int.MaxValue"/> as
-    ///  <code>precisionStep</code>). Using <seealso cref="Int32Field"/>,
-    ///  <seealso cref="Int64Field"/>, <seealso cref="SingleField"/> or <seealso cref="DoubleField"/> for sorting
+    /// <para>Good values for <see cref="precisionStep"/> are depending on usage and data type:</para>
+    /// <list type="bullet">
+    ///  <item><description>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.</description></item>
+    ///  <item><description>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.</description></item>
+    ///  <item><description>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.</description></item>
+    ///  <item><description>For low cardinality fields larger precision steps are good. If the cardinality is &lt; 100, it is
+    ///  fair to use <see cref="int.MaxValue"/> (see below).</description></item>
+    ///  <item><description>Steps <b>&gt;=64</b> for <em>long/double</em> and <b>&gt;=32</b> for <em>int/float</em> produces one token
+    ///  per value in the index and querying is as slow as a conventional <see cref="TermRangeQuery"/>. But it can be used
+    ///  to produce fields, that are solely used for sorting (in this case simply use <see cref="int.MaxValue"/> as
+    ///  <see cref="precisionStep"/>). Using <see cref="Int32Field"/>,
+    ///  <see cref="Int64Field"/>, <see cref="SingleField"/> or <see cref="DoubleField"/> for sorting
     ///  is ideal, because building the field cache is much faster than with text-only numbers.
     ///  These fields have one term per value and therefore also work with term enumeration for building distinct lists
     ///  (e.g. facets / preselected values to search for).
-    ///  Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
-    /// </ul>
+    ///  Sorting is also possible with range query optimized fields using one of the above <see cref="precisionStep"/>s.</description></item>
+    /// </list>
     ///
-    /// <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
-    /// that <seealso cref="TermRangeQuery"/> in boolean rewrite mode (with raised <seealso cref="BooleanQuery"/> clause count)
-    /// took about 30-40 secs to complete, <seealso cref="TermRangeQuery"/> in constant score filter rewrite mode took 5 secs
+    /// <para>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
+    /// that <see cref="TermRangeQuery"/> in boolean rewrite mode (with raised <see cref="BooleanQuery"/> clause count)
+    /// took about 30-40 secs to complete, <see cref="TermRangeQuery"/> in constant score filter rewrite mode took 5 secs
     /// and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
-    /// precision step). this query type was developed for a geographic portal, where the performance for
-    /// e.g. bounding boxes or exact date/time stamps is important.</p>
+    /// precision step). This query type was developed for a geographic portal, where the performance for
+    /// e.g. bounding boxes or exact date/time stamps is important.</para>
     ///
     /// @since 2.9
-    ///
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -196,14 +196,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public bool IncludesMin
         {
             get { return minInclusive; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public bool IncludesMax
         {
             get { return maxInclusive; }
@@ -311,14 +311,14 @@ namespace Lucene.Net.Search
         internal static readonly int INT32_POSITIVE_INFINITY = NumericUtils.SingleToSortableInt32(float.PositiveInfinity);
 
         /// <summary>
-        /// Subclass of FilteredTermsEnum for enumerating all terms that match the
+        /// Subclass of <see cref="FilteredTermsEnum"/> for enumerating all terms that match the
         /// sub-ranges for trie range queries, using flex API.
-        /// <p>
+        /// <para/>
         /// WARNING: this term enumeration is not guaranteed to be always ordered by
-        /// <seealso cref="Term#compareTo"/>.
-        /// The ordering depends on how <seealso cref="NumericUtils#splitLongRange"/> and
-        /// <seealso cref="NumericUtils#splitIntRange"/> generates the sub-ranges. For
-        /// <seealso cref="MultiTermQuery"/> ordering is not relevant.
+        /// <see cref="Index.Term.CompareTo(Index.Term)"/>.
+        /// The ordering depends on how <see cref="NumericUtils.SplitInt64Range(NumericUtils.Int64RangeBuilder, int, long, long)"/> and
+        /// <see cref="NumericUtils.SplitInt32Range(NumericUtils.Int32RangeBuilder, int, int, int)"/> generates the sub-ranges. For
+        /// <see cref="MultiTermQuery"/> ordering is not relevant.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -532,13 +532,17 @@ namespace Lucene.Net.Search
         }
     }
 
+    /// <summary>
+    /// LUCENENET specific class to provide access to static factory metods of <see cref="NumericRangeQuery{T}"/>
+    /// without referring to its genereic closing type.
+    /// </summary>
     public static class NumericRangeQuery
     {
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
-        /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="long"/>
+        /// range using the given <a href="#precisionStepDesc"><see cref="NumericRangeQuery{T}.precisionStep"/></a>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newLongRange() in Lucene
@@ -549,10 +553,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="long"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.precisionStep"/> <see cref="NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newLongRange() in Lucene
@@ -563,10 +567,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
-        /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="int"/>
+        /// range using the given <a href="#precisionStepDesc"><see cref="NumericRangeQuery{T}.precisionStep"/></a>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newIntRange() in Lucene
@@ -577,10 +581,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="int"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.precisionStep"/> <see cref="NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>. By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newIntRange() in Lucene
@@ -591,12 +595,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
-        /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="double"/>
+        /// range using the given <a href="#precisionStepDesc"><see cref="NumericRangeQuery{T}.precisionStep"/></a>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Double#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="double.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Double.NaN</c>.  By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// </summary>
         public static NumericRangeQuery<double> NewDoubleRange(string field, int precisionStep, double? min, double? max, bool minInclusive, bool maxInclusive)
@@ -605,12 +609,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="double"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.precisionStep"/> <see cref="NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Double#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Double.NaN}.  By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="double.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Double.NaN</c>.  By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// </summary>
         public static NumericRangeQuery<double> NewDoubleRange(string field, double? min, double? max, bool minInclusive, bool maxInclusive)
@@ -619,12 +623,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
-        /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="float"/>
+        /// range using the given <a href="#precisionStepDesc"><see cref="NumericRangeQuery{T}.precisionStep"/></a>.
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Float#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="float.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Single.NaN</c>.  By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newFloatRange() in Lucene
@@ -635,12 +639,12 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
-        /// range using the default <code>precisionStep</code> <seealso cref="NumericUtils#PRECISION_STEP_DEFAULT"/> (4).
+        /// Factory that creates a <see cref="NumericRangeQuery{T}"/>, that queries a <see cref="float"/>
+        /// range using the default <see cref="NumericRangeQuery{T}.precisionStep"/> <see cref="NumericUtils.PRECISION_STEP_DEFAULT"/> (4).
         /// You can have half-open ranges (which are in fact &lt;/&lt;= or &gt;/&gt;= queries)
-        /// by setting the min or max value to <code>null</code>.
-        /// <seealso cref="Float#NaN"/> will never match a half-open range, to hit {@code NaN} use a query
-        /// with {@code min == max == Float.NaN}.  By setting inclusive to false, it will
+        /// by setting the min or max value to <c>null</c>.
+        /// <see cref="float.NaN"/> will never match a half-open range, to hit <c>NaN</c> use a query
+        /// with <c>min == max == System.Single.NaN</c>.  By setting inclusive to <c>false</c>, it will
         /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
         /// <para/>
         /// NOTE: This was newFloatRange() in Lucene

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PhrasePositions.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PhrasePositions.cs b/src/Lucene.Net/Search/PhrasePositions.cs
index 3e875c0..dc9619a 100644
--- a/src/Lucene.Net/Search/PhrasePositions.cs
+++ b/src/Lucene.Net/Search/PhrasePositions.cs
@@ -75,9 +75,9 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Go to next location of this term current document, and set
-        /// <code>position</code> as <code>location - offset</code>, so that a
-        /// matching exact phrase is easily identified when all PhrasePositions
-        /// have exactly the same <code>position</code>.
+        /// <c>position</c> as <c>location - offset</c>, so that a
+        /// matching exact phrase is easily identified when all <see cref="PhrasePositions"/>
+        /// have exactly the same <c>position</c>.
         /// </summary>
         internal bool NextPosition()
         {
@@ -93,7 +93,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// for debug purposes </summary>
+        /// For debug purposes </summary>
         public override string ToString()
         {
             string s = "d:" + doc + " o:" + offset + " p:" + position + " c:" + count;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PhraseQuery.cs b/src/Lucene.Net/Search/PhraseQuery.cs
index 99186db..224eef3 100644
--- a/src/Lucene.Net/Search/PhraseQuery.cs
+++ b/src/Lucene.Net/Search/PhraseQuery.cs
@@ -43,10 +43,10 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A Query that matches documents containing a particular sequence of terms.
-    /// A PhraseQuery is built by QueryParser for input like <code>"new york"</code>.
+    /// A <see cref="Query"/> that matches documents containing a particular sequence of terms.
+    /// A <see cref="PhraseQuery"/> is built by QueryParser for input like <c>"new york"</c>.
     ///
-    /// <p>this query may be combined with other terms or queries with a <seealso cref="BooleanQuery"/>.
+    /// <para/>This query may be combined with other terms or queries with a <see cref="BooleanQuery"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -67,19 +67,19 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Sets the number of other words permitted between words in query phrase.
-        ///  If zero, then this is an exact phrase search.  For larger values this works
-        ///  like a <code>WITHIN</code> or <code>NEAR</code> operator.
+        /// If zero, then this is an exact phrase search.  For larger values this works
+        /// like a <c>WITHIN</c> or <c>NEAR</c> operator.
         ///
-        ///  <p>The slop is in fact an edit-distance, where the units correspond to
-        ///  moves of terms in the query phrase out of position.  For example, to switch
-        ///  the order of two words requires two moves (the first move places the words
-        ///  atop one another), so to permit re-orderings of phrases, the slop must be
-        ///  at least two.
+        /// <para/>The slop is in fact an edit-distance, where the units correspond to
+        /// moves of terms in the query phrase out of position.  For example, to switch
+        /// the order of two words requires two moves (the first move places the words
+        /// atop one another), so to permit re-orderings of phrases, the slop must be
+        /// at least two.
         ///
-        ///  <p>More exact matches are scored higher than sloppier matches, thus search
-        ///  results are sorted by exactness.
+        /// <para/>More exact matches are scored higher than sloppier matches, thus search
+        /// results are sorted by exactness.
         ///
-        ///  <p>The slop is zero by default, requiring exact matches.
+        /// <para/>The slop is zero by default, requiring exact matches.
         /// </summary>
         public virtual int Slop
         {
@@ -117,7 +117,6 @@ namespace Lucene.Net.Search
         /// The relative position of the term within the phrase is specified explicitly.
         /// this allows e.g. phrases with more than one term at the same position
         /// or phrases with gaps (e.g. in connection with stopwords).
-        ///
         /// </summary>
         public virtual void Add(Term term, int position)
         {
@@ -438,7 +437,7 @@ namespace Lucene.Net.Search
             return new PhraseWeight(this, searcher);
         }
 
-        /// <seealso cref= Lucene.Net.Search.Query#extractTerms(Set) </seealso>
+        /// <seealso cref="Lucene.Net.Search.Query.ExtractTerms(ISet{Term})"/>
         public override void ExtractTerms(ISet<Term> queryTerms)
         {
             queryTerms.UnionWith(terms);
@@ -501,7 +500,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!(o is PhraseQuery))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PositiveScoresOnlyCollector.cs b/src/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
index e50e75d..8f5c289 100644
--- a/src/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
+++ b/src/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// A <seealso cref="ICollector"/> implementation which wraps another
-    /// <seealso cref="ICollector"/> and makes sure only documents with
+    /// A <see cref="ICollector"/> implementation which wraps another
+    /// <see cref="ICollector"/> and makes sure only documents with
     /// scores &gt; 0 are collected.
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PrefixFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PrefixFilter.cs b/src/Lucene.Net/Search/PrefixFilter.cs
index 8c7ee56..7f39e69 100644
--- a/src/Lucene.Net/Search/PrefixFilter.cs
+++ b/src/Lucene.Net/Search/PrefixFilter.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Search
     using Term = Lucene.Net.Index.Term;
 
     /// <summary>
-    /// A Filter that restricts search results to values that have a matching prefix in a given
+    /// A <see cref="Filter"/> that restricts search results to values that have a matching prefix in a given
     /// field.
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PrefixQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PrefixQuery.cs b/src/Lucene.Net/Search/PrefixQuery.cs
index 5381aaf..6db8c34 100644
--- a/src/Lucene.Net/Search/PrefixQuery.cs
+++ b/src/Lucene.Net/Search/PrefixQuery.cs
@@ -27,11 +27,11 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A Query that matches documents containing terms with a specified prefix. A PrefixQuery
-    /// is built by QueryParser for input like <code>app*</code>.
+    /// A <see cref="Query"/> that matches documents containing terms with a specified prefix. A <see cref="PrefixQuery"/>
+    /// is built by QueryParser for input like <c>app*</c>.
     ///
-    /// <p>this query uses the {@link
-    /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+    /// <para/>This query uses the
+    /// <see cref="MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/>
     /// rewrite method.
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -42,7 +42,7 @@ namespace Lucene.Net.Search
         private readonly Term _prefix;
 
         /// <summary>
-        /// Constructs a query for terms starting with <code>prefix</code>. </summary>
+        /// Constructs a query for terms starting with <paramref name="prefix"/>. </summary>
         public PrefixQuery(Term prefix)
             : base(prefix.Field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/PrefixTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/PrefixTermsEnum.cs b/src/Lucene.Net/Search/PrefixTermsEnum.cs
index 025d43f..12888b0 100644
--- a/src/Lucene.Net/Search/PrefixTermsEnum.cs
+++ b/src/Lucene.Net/Search/PrefixTermsEnum.cs
@@ -25,11 +25,11 @@ namespace Lucene.Net.Search
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// Subclass of FilteredTermEnum for enumerating all terms that match the
+    /// Subclass of <see cref="FilteredTermsEnum"/> for enumerating all terms that match the
     /// specified prefix filter term.
-    /// <p>Term enumerations are always ordered by
-    /// <seealso cref="#getComparer"/>.  Each term in the enumeration is
-    /// greater than all that precede it.</p>
+    /// <para>Term enumerations are always ordered by
+    /// <see cref="TermsEnum.Comparer"/>.  Each term in the enumeration is
+    /// greater than all that precede it.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Query.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Query.cs b/src/Lucene.Net/Search/Query.cs
index 93bfb21..14a8db3 100644
--- a/src/Lucene.Net/Search/Query.cs
+++ b/src/Lucene.Net/Search/Query.cs
@@ -26,23 +26,23 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// The abstract base class for queries.
-    ///    <p>Instantiable subclasses are:
-    ///    <ul>
-    ///    <li> <seealso cref="TermQuery"/>
-    ///    <li> <seealso cref="BooleanQuery"/>
-    ///    <li> <seealso cref="WildcardQuery"/>
-    ///    <li> <seealso cref="PhraseQuery"/>
-    ///    <li> <seealso cref="PrefixQuery"/>
-    ///    <li> <seealso cref="MultiPhraseQuery"/>
-    ///    <li> <seealso cref="FuzzyQuery"/>
-    ///    <li> <seealso cref="RegexpQuery"/>
-    ///    <li> <seealso cref="TermRangeQuery"/>
-    ///    <li> <seealso cref="NumericRangeQuery"/>
-    ///    <li> <seealso cref="ConstantScoreQuery"/>
-    ///    <li> <seealso cref="DisjunctionMaxQuery"/>
-    ///    <li> <seealso cref="MatchAllDocsQuery"/>
-    ///    </ul>
-    ///    <p>See also the family of <seealso cref="Lucene.Net.Search.Spans Span Queries"/>
+    ///    <para/>Instantiable subclasses are:
+    ///    <list type="bullet">
+    ///    <item><description> <seealso cref="TermQuery"/> </description></item>
+    ///    <item><description> <seealso cref="BooleanQuery"/> </description></item>
+    ///    <item><description> <seealso cref="WildcardQuery"/> </description></item>
+    ///    <item><description> <seealso cref="PhraseQuery"/> </description></item>
+    ///    <item><description> <seealso cref="PrefixQuery"/> </description></item>
+    ///    <item><description> <seealso cref="MultiPhraseQuery"/> </description></item>
+    ///    <item><description> <seealso cref="FuzzyQuery"/> </description></item>
+    ///    <item><description> <seealso cref="RegexpQuery"/> </description></item>
+    ///    <item><description> <seealso cref="TermRangeQuery"/> </description></item>
+    ///    <item><description> <seealso cref="NumericRangeQuery"/> </description></item>
+    ///    <item><description> <seealso cref="ConstantScoreQuery"/> </description></item>
+    ///    <item><description> <seealso cref="DisjunctionMaxQuery"/> </description></item>
+    ///    <item><description> <seealso cref="MatchAllDocsQuery"/> </description></item>
+    ///    </list>
+    ///    <para/>See also the family of Span Queries (<see cref="Lucene.Net.Search.Spans"/>)
     ///       and additional queries available in the <a href="{@docRoot}/../queries/overview-summary.html">Queries module</a>
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -56,9 +56,9 @@ namespace Lucene.Net.Search
 
 
         /// <summary>
-        /// Sets the boost for this query clause to <code>b</code>.  Documents
+        /// Gets or Sets the boost for this query clause.  Documents
         /// matching this clause will (in addition to the normal weightings) have
-        /// their score multiplied by <code>b</code>.
+        /// their score multiplied by <see cref="Boost"/>. The boost is 1.0 by default.
         /// </summary>
         public virtual float Boost
         {
@@ -67,7 +67,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Prints a query to a string, with <code>field</code> assumed to be the
+        /// Prints a query to a string, with <paramref name="field"/> assumed to be the
         /// default field and omitted.
         /// </summary>
         public abstract string ToString(string field);
@@ -80,9 +80,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: Constructs an appropriate Weight implementation for this query.
+        /// Expert: Constructs an appropriate <see cref="Weight"/> implementation for this query.
         ///
-        /// <p>
+        /// <para/>
         /// Only implemented by primitive queries, which re-write to themselves.
         /// </summary>
         public virtual Weight CreateWeight(IndexSearcher searcher)
@@ -92,8 +92,8 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: called to re-write queries into primitive queries. For example,
-        /// a PrefixQuery will be rewritten into a BooleanQuery that consists
-        /// of TermQuerys.
+        /// a <see cref="PrefixQuery"/> will be rewritten into a <see cref="BooleanQuery"/> that consists
+        /// of <see cref="TermQuery"/>s.
         /// </summary>
         public virtual Query Rewrite(IndexReader reader)
         {
@@ -102,9 +102,9 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Expert: adds all terms occurring in this query to the terms set. Only
-        /// works if this query is in its <seealso cref="#rewrite rewritten"/> form.
+        /// works if this query is in its rewritten (<see cref="Rewrite(IndexReader)"/>) form.
         /// </summary>
-        /// <exception cref="InvalidOperationException"> if this query is not yet rewritten </exception>
+        /// <exception cref="InvalidOperationException"> If this query is not yet rewritten </exception>
         public virtual void ExtractTerms(ISet<Term> terms)
         {
             // needs to be implemented by query subclasses

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/QueryRescorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/QueryRescorer.cs b/src/Lucene.Net/Search/QueryRescorer.cs
index 0455789..d434881 100644
--- a/src/Lucene.Net/Search/QueryRescorer.cs
+++ b/src/Lucene.Net/Search/QueryRescorer.cs
@@ -24,9 +24,9 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// A <seealso cref="Rescorer"/> that uses a provided Query to assign
-    ///  scores to the first-pass hits.
-    ///
+    /// A <see cref="Rescorer"/> that uses a provided <see cref="Query"/> to assign
+    /// scores to the first-pass hits.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -38,7 +38,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Sole constructor, passing the 2nd pass query to
-        ///  assign scores to the 1st pass hits.
+        /// assign scores to the 1st pass hits.
         /// </summary>
         public QueryRescorer(Query query)
         {
@@ -47,10 +47,10 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Implement this in a subclass to combine the first pass and
-        /// second pass scores.  If secondPassMatches is false then
+        /// second pass scores.  If <paramref name="secondPassMatches"/> is <c>false</c> then
         /// the second pass query failed to match a hit from the
         /// first pass query, and you should ignore the
-        /// secondPassScore.
+        /// <paramref name="secondPassScore"/>.
         /// </summary>
         protected abstract float Combine(float firstPassScore, bool secondPassMatches, float secondPassScore);
 
@@ -214,8 +214,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Sugar API, calling {#rescore} using a simple linear
-        ///  combination of firstPassScore + weight * secondPassScore
+        /// Sugar API, calling <see cref="QueryRescorer.Rescore(IndexSearcher, TopDocs, int)"/> using a simple linear
+        /// combination of firstPassScore + <paramref name="weight"/> * secondPassScore
         /// </summary>
         public static TopDocs Rescore(IndexSearcher searcher, TopDocs topDocs, Query query, double weight, int topN)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/QueryWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/QueryWrapperFilter.cs b/src/Lucene.Net/Search/QueryWrapperFilter.cs
index fe3f70e..727291f 100644
--- a/src/Lucene.Net/Search/QueryWrapperFilter.cs
+++ b/src/Lucene.Net/Search/QueryWrapperFilter.cs
@@ -26,10 +26,10 @@ namespace Lucene.Net.Search
     /// Constrains search results to only match those which also match a provided
     /// query.
     ///
-    /// <p> this could be used, for example, with a <seealso cref="NumericRangeQuery"/> on a suitably
+    /// <para/> This could be used, for example, with a <see cref="NumericRangeQuery"/> on a suitably
     /// formatted date field to implement date filtering.  One could re-use a single
-    /// CachingWrapperFilter(QueryWrapperFilter) that matches, e.g., only documents modified
-    /// within the last week.  this would only need to be reconstructed once per day.
+    /// <c>CachingWrapperFilter(QueryWrapperFilter)</c> that matches, e.g., only documents modified
+    /// within the last week.  This would only need to be reconstructed once per day.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -40,7 +40,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Constructs a filter which only matches documents matching
-        /// <code>query</code>.
+        /// <paramref name="query"/>.
         /// </summary>
         public QueryWrapperFilter(Query query)
         {
@@ -52,7 +52,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// returns the inner Query </summary>
+        /// Returns the inner Query </summary>
         public Query Query
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ReferenceManager.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ReferenceManager.cs b/src/Lucene.Net/Search/ReferenceManager.cs
index 6f4e2f8..8cc4174 100644
--- a/src/Lucene.Net/Search/ReferenceManager.cs
+++ b/src/Lucene.Net/Search/ReferenceManager.cs
@@ -26,16 +26,15 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// Utility class to safely share instances of a certain type across multiple
-    /// threads, while periodically refreshing them. this class ensures each
+    /// threads, while periodically refreshing them. This class ensures each
     /// reference is closed only once all threads have finished using it. It is
-    /// recommended to consult the documentation of <seealso cref="ReferenceManager"/>
-    /// implementations for their <seealso cref="#maybeRefresh()"/> semantics.
+    /// recommended to consult the documentation of <see cref="ReferenceManager{G}"/>
+    /// implementations for their <see cref="MaybeRefresh()"/> semantics.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// @param <G>
-    ///          the concrete type that will be <seealso cref="#acquire() acquired"/> and
-    ///          <seealso cref="#release(Object) released"/>.
-    ///
-    /// @lucene.experimental </param>
+    /// <typeparam name="G">The concrete type that will be <see cref="Acquire()"/>d and
+    ///          <see cref="Release(G)"/>d.</typeparam>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -49,6 +48,9 @@ namespace Lucene.Net.Search
         // fields are not CLS compliant
         private volatile G current;
 
+        /// <summary>
+        /// The current reference
+        /// </summary>
         protected G Current
         {
             get { return current; }
@@ -80,29 +82,28 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Decrement reference counting on the given reference. </summary>
-        /// <exception cref="IOException"> if reference decrement on the given resource failed.
-        ///  </exception>
+        /// <exception cref="System.IO.IOException"> If reference decrement on the given resource failed.</exception>
         protected abstract void DecRef(G reference);
 
         /// <summary>
-        /// Refresh the given reference if needed. Returns {@code null} if no refresh
+        /// Refresh the given reference if needed. Returns <c>null</c> if no refresh
         /// was needed, otherwise a new refreshed reference. </summary>
-        /// <exception cref="ObjectDisposedException"> if the reference manager has been <seealso cref="#close() closed"/>. </exception>
-        /// <exception cref="IOException"> if the refresh operation failed </exception>
+        /// <exception cref="ObjectDisposedException"> If the reference manager has been <see cref="Dispose()"/>d. </exception>
+        /// <exception cref="System.IO.IOException"> If the refresh operation failed </exception>
         protected abstract G RefreshIfNeeded(G referenceToRefresh);
 
         /// <summary>
-        /// Try to increment reference counting on the given reference. Return true if
+        /// Try to increment reference counting on the given reference. Returns <c>true</c> if
         /// the operation was successful. </summary>
-        /// <exception cref="ObjectDisposedException"> if the reference manager has been <seealso cref="#close() closed"/>.  </exception>
+        /// <exception cref="ObjectDisposedException"> if the reference manager has been <see cref="Dispose()"/>d.  </exception>
         protected abstract bool TryIncRef(G reference);
 
         /// <summary>
         /// Obtain the current reference. You must match every call to acquire with one
-        /// call to <seealso cref="#release"/>; it's best to do so in a finally clause, and set
-        /// the reference to {@code null} to prevent accidental usage after it has been
+        /// call to <see cref="Release(G)"/>; it's best to do so in a finally clause, and set
+        /// the reference to <c>null</c> to prevent accidental usage after it has been
         /// released. </summary>
-        /// <exception cref="ObjectDisposedException"> if the reference manager has been <seealso cref="#close() closed"/>.  </exception>
+        /// <exception cref="ObjectDisposedException"> If the reference manager has been <see cref="Dispose()"/>d.  </exception>
         public G Acquire()
         {
             G @ref;
@@ -135,25 +136,25 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// <p>
-        /// Closes this ReferenceManager to prevent future <seealso cref="#acquire() acquiring"/>. A
-        /// reference manager should be closed if the reference to the managed resource
-        /// should be disposed or the application using the <seealso cref="ReferenceManager"/>
+        /// <para>
+        /// Closes this ReferenceManager to prevent future <see cref="Acquire()"/>ing. A
+        /// reference manager should be disposed if the reference to the managed resource
+        /// should be disposed or the application using the <see cref="ReferenceManager{G}"/>
         /// is shutting down. The managed resource might not be released immediately,
-        /// if the <seealso cref="ReferenceManager"/> user is holding on to a previously
-        /// <seealso cref="#acquire() acquired"/> reference. The resource will be released once
-        /// when the last reference is <seealso cref="#release(Object) released"/>. Those
+        /// if the <see cref="ReferenceManager{G}"/> user is holding on to a previously
+        /// <see cref="Acquire()"/>d reference. The resource will be released once
+        /// when the last reference is <see cref="Release(G)"/>d. Those
         /// references can still be used as if the manager was still active.
-        /// </p>
-        /// <p>
-        /// Applications should not <seealso cref="#acquire() acquire"/> new references from this
-        /// manager once this method has been called. <seealso cref="#acquire() Acquiring"/> a
-        /// resource on a closed <seealso cref="ReferenceManager"/> will throw an
+        /// </para>
+        /// <para>
+        /// Applications should not <see cref="Acquire()"/> new references from this
+        /// manager once this method has been called. <see cref="Acquire()"/>ing a
+        /// resource on a disposed <see cref="ReferenceManager{G}"/> will throw an
         /// <seealso cref="ObjectDisposedException"/>.
-        /// </p>
+        /// </para>
         /// </summary>
-        /// <exception cref="IOException">
-        ///           if the underlying reader of the current reference could not be closed </exception>
+        /// <exception cref="System.IO.IOException">
+        ///           If the underlying reader of the current reference could not be disposed </exception>
         public void Dispose()
         {
             lock (this)
@@ -175,10 +176,10 @@ namespace Lucene.Net.Search
         protected abstract int GetRefCount(G reference);
 
         /// <summary>
-        ///  Called after close(), so subclass can free any resources. </summary>
-        ///  <exception cref="IOException"> if the after close operation in a sub-class throws an <seealso cref="IOException"/>
-        ///  </exception>
-        protected virtual void AfterClose()
+        /// Called after <see cref="Dispose()"/>, so subclass can free any resources. </summary>
+        /// <exception cref="System.IO.IOException"> if the after dispose operation in a sub-class throws an <see cref="System.IO.IOException"/>
+        /// </exception>
+        protected virtual void AfterClose() // LUCENENET TODO: API: Rename AfterDispose() ? Or perhaps just use dispose pattern.
         {
         }
 
@@ -230,24 +231,24 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// You must call this (or <seealso cref="#maybeRefreshBlocking()"/>), periodically, if
-        /// you want that <seealso cref="#acquire()"/> will return refreshed instances.
+        /// You must call this (or <see cref="MaybeRefreshBlocking()"/>), periodically, if
+        /// you want that <see cref="Acquire()"/> will return refreshed instances.
         ///
-        /// <p>
+        /// <para>
         /// <b>Threads</b>: it's fine for more than one thread to call this at once.
         /// Only the first thread will attempt the refresh; subsequent threads will see
         /// that another thread is already handling refresh and will return
         /// immediately. Note that this means if another thread is already refreshing
         /// then subsequent threads will return right away without waiting for the
         /// refresh to complete.
-        ///
-        /// <p>
-        /// If this method returns true it means the calling thread either refreshed or
-        /// that there were no changes to refresh. If it returns false it means another
+        /// </para>
+        /// <para>
+        /// If this method returns <c>true</c> it means the calling thread either refreshed or
+        /// that there were no changes to refresh. If it returns <c>false</c> it means another
         /// thread is currently refreshing.
-        /// </p> </summary>
-        /// <exception cref="IOException"> if refreshing the resource causes an <seealso cref="IOException"/> </exception>
-        /// <exception cref="ObjectDisposedException"> if the reference manager has been <seealso cref="#close() closed"/>.  </exception>
+        /// </para> </summary>
+        /// <exception cref="System.IO.IOException"> If refreshing the resource causes an <see cref="System.IO.IOException"/> </exception>
+        /// <exception cref="ObjectDisposedException"> If the reference manager has been <see cref="Dispose()"/>d.  </exception>
         public bool MaybeRefresh()
         {
             EnsureOpen();
@@ -270,17 +271,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// You must call this (or <seealso cref="#maybeRefresh()"/>), periodically, if you want
-        /// that <seealso cref="#acquire()"/> will return refreshed instances.
+        /// You must call this (or <see cref="MaybeRefresh()"/>), periodically, if you want
+        /// that <see cref="Acquire()"/> will return refreshed instances.
         ///
-        /// <p>
-        /// <b>Threads</b>: unlike <seealso cref="#maybeRefresh()"/>, if another thread is
+        /// <para/>
+        /// <b>Threads</b>: unlike <see cref="MaybeRefresh()"/>, if another thread is
         /// currently refreshing, this method blocks until that thread completes. It is
-        /// useful if you want to guarantee that the next call to <seealso cref="#acquire()"/>
+        /// useful if you want to guarantee that the next call to <see cref="Acquire()"/>
         /// will return a refreshed instance. Otherwise, consider using the
-        /// non-blocking <seealso cref="#maybeRefresh()"/>. </summary>
-        /// <exception cref="IOException"> if refreshing the resource causes an <seealso cref="IOException"/> </exception>
-        /// <exception cref="ObjectDisposedException"> if the reference manager has been <seealso cref="#close() closed"/>.  </exception>
+        /// non-blocking <see cref="MaybeRefresh()"/>. </summary>
+        /// <exception cref="System.IO.IOException"> If refreshing the resource causes an <see cref="System.IO.IOException"/> </exception>
+        /// <exception cref="ObjectDisposedException"> If the reference manager has been <see cref="Dispose()"/>d.  </exception>
         public void MaybeRefreshBlocking()
         {
             EnsureOpen();
@@ -299,18 +300,17 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Called after a refresh was attempted, regardless of
-        ///  whether a new reference was in fact created. </summary>
-        ///  <exception cref="IOException"> if a low level I/O exception occurs
-        ///  </exception>
+        /// whether a new reference was in fact created. </summary>
+        /// <exception cref="System.IO.IOException"> if a low level I/O exception occurs</exception>
         protected virtual void AfterMaybeRefresh()
         {
         }
 
         /// <summary>
-        /// Release the reference previously obtained via <seealso cref="#acquire()"/>.
-        /// <p>
-        /// <b>NOTE:</b> it's safe to call this after <seealso cref="#close()"/>. </summary>
-        /// <exception cref="IOException"> if the release operation on the given resource throws an <seealso cref="IOException"/> </exception>
+        /// Release the reference previously obtained via <see cref="Acquire()"/>.
+        /// <para/>
+        /// <b>NOTE:</b> it's safe to call this after <see cref="Dispose()"/>. </summary>
+        /// <exception cref="System.IO.IOException"> If the release operation on the given resource throws an <see cref="System.IO.IOException"/> </exception>
         public void Release(G reference)
         {
             Debug.Assert(reference != null);
@@ -346,7 +346,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Remove a listener added with <seealso cref="#addListener(RefreshListener)"/>.
+        /// Remove a listener added with <see cref="AddListener(ReferenceManager.IRefreshListener)"/>.
         /// </summary>
         public virtual void RemoveListener(ReferenceManager.IRefreshListener listener)
         {
@@ -358,12 +358,15 @@ namespace Lucene.Net.Search
         }
     }
 
-    // .NET Port: non-generic type to hold RefreshListener
+    /// <summary>
+    /// LUCENENET specific class used to provide static access to <see cref="ReferenceManager.IRefreshListener"/>
+    /// without having to specifiy the generic closing type of <see cref="ReferenceManager{G}"/>.
+    /// </summary>
     public static class ReferenceManager
     {
         /// <summary>
         /// Use to receive notification when a refresh has
-        ///  finished.  See <seealso cref="#addListener"/>.
+        /// finished.  See <see cref="ReferenceManager{G}.AddListener(IRefreshListener)"/>.
         /// </summary>
         public interface IRefreshListener
         {
@@ -373,8 +376,8 @@ namespace Lucene.Net.Search
 
             /// <summary>
             /// Called after the attempted refresh; if the refresh
-            /// did open a new reference then didRefresh will be true
-            /// and <seealso cref="#acquire()"/> is guaranteed to return the new
+            /// did open a new reference then didRefresh will be <c>true</c>
+            /// and <see cref="ReferenceManager{G}.Acquire()"/> is guaranteed to return the new
             /// reference.
             /// </summary>
             void AfterRefresh(bool didRefresh);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/RegexpQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/RegexpQuery.cs b/src/Lucene.Net/Search/RegexpQuery.cs
index fc4fc29..7981730 100644
--- a/src/Lucene.Net/Search/RegexpQuery.cs
+++ b/src/Lucene.Net/Search/RegexpQuery.cs
@@ -29,25 +29,26 @@ namespace Lucene.Net.Search
 
     /// <summary>
     /// A fast regular expression query based on the
-    /// <seealso cref="Lucene.Net.Util.Automaton"/> package.
-    /// <ul>
-    /// <li>Comparisons are <a
-    /// href="http://tusker.org/regex/regex_benchmark.html">fast</a>
-    /// <li>The term dictionary is enumerated in an intelligent way, to avoid
-    /// comparisons. See <seealso cref="AutomatonQuery"/> for more details.
-    /// </ul>
-    /// <p>
-    /// The supported syntax is documented in the <seealso cref="RegExp"/> class.
+    /// <see cref="Lucene.Net.Util.Automaton"/> package.
+    /// <list type="bullet">
+    ///     <item><description>Comparisons are <a
+    ///         href="http://tusker.org/regex/regex_benchmark.html">fast</a></description></item>
+    ///     <item><description>The term dictionary is enumerated in an intelligent way, to avoid
+    ///         comparisons. See <see cref="AutomatonQuery"/> for more details.</description></item>
+    /// </list>
+    /// <para>
+    /// The supported syntax is documented in the <see cref="RegExp"/> class.
     /// Note this might be different than other regular expression implementations.
     /// For some alternatives with different syntax, look under the sandbox.
-    /// </p>
-    /// <p>
+    /// </para>
+    /// <para>
     /// Note this query can be slow, as it needs to iterate over many terms. In order
-    /// to prevent extremely slow RegexpQueries, a Regexp term should not start with
-    /// the expression <code>.*</code>
+    /// to prevent extremely slow <see cref="RegexpQuery"/>s, a <see cref="RegExp"/> term should not start with
+    /// the expression <c>.*</c>
+    /// </para>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= RegExp
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="RegExp"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -70,33 +71,33 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Constructs a query for terms matching <code>term</code>.
-        /// <p>
+        /// Constructs a query for terms matching <paramref name="term"/>.
+        /// <para>
         /// By default, all regular expression features are enabled.
-        /// </p>
+        /// </para>
         /// </summary>
-        /// <param name="term"> regular expression. </param>
+        /// <param name="term"> Regular expression. </param>
         public RegexpQuery(Term term)
             : this(term, RegExpSyntax.ALL)
         {
         }
 
         /// <summary>
-        /// Constructs a query for terms matching <code>term</code>.
+        /// Constructs a query for terms matching <paramref name="term"/>.
         /// </summary>
-        /// <param name="term"> regular expression. </param>
-        /// <param name="flags"> optional RegExp features from <see cref="RegExpSyntax"/> </param>
+        /// <param name="term"> Regular expression. </param>
+        /// <param name="flags"> Optional <see cref="RegExp"/> features from <see cref="RegExpSyntax"/> </param>
         public RegexpQuery(Term term, RegExpSyntax flags)
             : this(term, flags, defaultProvider)
         {
         }
 
         /// <summary>
-        /// Constructs a query for terms matching <code>term</code>.
+        /// Constructs a query for terms matching <paramref name="term"/>.
         /// </summary>
-        /// <param name="term"> regular expression. </param>
-        /// <param name="flags"> optional RegExp features from <see cref="RegExpSyntax"/> </param>
-        /// <param name="provider"> custom AutomatonProvider for named automata </param>
+        /// <param name="term"> Regular expression. </param>
+        /// <param name="flags"> Optional <see cref="RegExp"/> features from <see cref="RegExpSyntax"/> </param>
+        /// <param name="provider"> Custom <see cref="IAutomatonProvider"/> for named automata </param>
         public RegexpQuery(Term term, RegExpSyntax flags, IAutomatonProvider provider)
             : base(term, (new RegExp(term.Text(), flags)).ToAutomaton(provider))
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ReqExclScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ReqExclScorer.cs b/src/Lucene.Net/Search/ReqExclScorer.cs
index a5d3822..dac790c 100644
--- a/src/Lucene.Net/Search/ReqExclScorer.cs
+++ b/src/Lucene.Net/Search/ReqExclScorer.cs
@@ -21,11 +21,11 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A Scorer for queries with a required subscorer
-    /// and an excluding (prohibited) sub DocIdSetIterator.
-    /// <br>
-    /// this <code>Scorer</code> implements <seealso cref="Scorer#advance(int)"/>,
-    /// and it uses the skipTo() on the given scorers.
+    /// A <see cref="Scorer"/> for queries with a required subscorer
+    /// and an excluding (prohibited) sub <see cref="DocIdSetIterator"/>.
+    /// <para/>
+    /// This <see cref="Scorer"/> implements <see cref="DocIdSetIterator.Advance(int)"/>,
+    /// and it uses the SkipTo() on the given scorers.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -37,9 +37,9 @@ namespace Lucene.Net.Search
         private int doc = -1;
 
         /// <summary>
-        /// Construct a <code>ReqExclScorer</code>. </summary>
+        /// Construct a <see cref="ReqExclScorer"/>. </summary>
         /// <param name="reqScorer"> The scorer that must match, except where </param>
-        /// <param name="exclDisi"> indicates exclusion. </param>
+        /// <param name="exclDisi"> Indicates exclusion. </param>
         public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi)
             : base(reqScorer.m_weight)
         {
@@ -68,15 +68,15 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Advance to non excluded doc.
-        /// <br>On entry:
-        /// <ul>
-        /// <li>reqScorer != null,
-        /// <li>exclScorer != null,
-        /// <li>reqScorer was advanced once via next() or skipTo()
-        ///      and reqScorer.doc() may still be excluded.
-        /// </ul>
+        /// <para/>On entry:
+        /// <list type="bullet">
+        /// <item><description>reqScorer != null,</description></item>
+        /// <item><description>exclScorer != null,</description></item>
+        /// <item><description>reqScorer was advanced once via Next() or SkipTo()
+        ///      and reqScorer.Doc may still be excluded.</description></item>
+        /// </list>
         /// Advances reqScorer a non excluded required doc, if any. </summary>
-        /// <returns> true iff there is a non excluded required doc. </returns>
+        /// <returns> <c>true</c> if there is a non excluded required doc. </returns>
         private int ToNonExcluded()
         {
             int exclDoc = exclDisi.DocID;
@@ -112,7 +112,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the score of the current document matching the query.
-        /// Initially invalid, until <seealso cref="#nextDoc()"/> is called the first time. </summary>
+        /// Initially invalid, until <see cref="NextDoc()"/> is called the first time. </summary>
         /// <returns> The score of the required scorer. </returns>
         public override float GetScore()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ReqOptSumScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ReqOptSumScorer.cs b/src/Lucene.Net/Search/ReqOptSumScorer.cs
index 679e5a7..af31188 100644
--- a/src/Lucene.Net/Search/ReqOptSumScorer.cs
+++ b/src/Lucene.Net/Search/ReqOptSumScorer.cs
@@ -22,10 +22,10 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A Scorer for queries with a required part and an optional part.
-    /// Delays skipTo() on the optional part until a score() is needed.
-    /// <br>
-    /// this <code>Scorer</code> implements <seealso cref="Scorer#advance(int)"/>.
+    /// A <see cref="Scorer"/> for queries with a required part and an optional part.
+    /// Delays SkipTo() on the optional part until a GetScore() is needed.
+    /// <para/>
+    /// This <see cref="Scorer"/> implements <see cref="DocIdSetIterator.Advance(int)"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -34,16 +34,16 @@ namespace Lucene.Net.Search
     {
         /// <summary>
         /// The scorers passed from the constructor.
-        /// These are set to null as soon as their next() or skipTo() returns false.
+        /// These are set to <c>null</c> as soon as their Next() or SkipTo() returns <c>false</c>.
         /// </summary>
         private Scorer reqScorer;
 
         private Scorer optScorer;
 
         /// <summary>
-        /// Construct a <code>ReqOptScorer</code>. </summary>
-        /// <param name="reqScorer"> The required scorer. this must match. </param>
-        /// <param name="optScorer"> The optional scorer. this is used for scoring only. </param>
+        /// Construct a <see cref="ReqOptSumScorer"/>. </summary>
+        /// <param name="reqScorer"> The required scorer. This must match. </param>
+        /// <param name="optScorer"> The optional scorer. This is used for scoring only. </param>
         public ReqOptSumScorer(Scorer reqScorer, Scorer optScorer)
             : base(reqScorer.m_weight)
         {
@@ -70,7 +70,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the score of the current document matching the query.
-        /// Initially invalid, until <seealso cref="#nextDoc()"/> is called the first time. </summary>
+        /// Initially invalid, until <see cref="NextDoc()"/> is called the first time. </summary>
         /// <returns> The score of the required scorer, eventually increased by the score
         /// of the optional scorer when it also matches the current document. </returns>
         public override float GetScore()


[39/48] lucenenet git commit: Lucene.Net.Codecs.Lucene41: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene41: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/3221b638
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/3221b638
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/3221b638

Branch: refs/heads/master
Commit: 3221b6383abcc0c10537a3c145406bbb918f6285
Parents: ee52fd3
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 14:09:15 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:41 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   3 +-
 src/Lucene.Net/Codecs/Lucene41/ForUtil.cs       |  54 +--
 src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs |  17 +-
 .../Lucene41/Lucene41PostingsBaseFormat.cs      |   6 +-
 .../Codecs/Lucene41/Lucene41PostingsFormat.cs   | 404 +++++++++----------
 .../Codecs/Lucene41/Lucene41PostingsReader.cs   |   7 +-
 .../Codecs/Lucene41/Lucene41PostingsWriter.cs   |  11 +-
 .../Codecs/Lucene41/Lucene41SkipReader.cs       |  22 +-
 .../Codecs/Lucene41/Lucene41SkipWriter.cs       |   7 +-
 .../Lucene41/Lucene41StoredFieldsFormat.cs      | 146 +++----
 10 files changed, 336 insertions(+), 341 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6886da2..5f422f8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -54,8 +54,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
    1. Codecs.Compressing (namespace)
    2. Codecs.Lucene3x (namespace)
    3. Codecs.Lucene40 (namespace)
-   4. Codecs.Lucene41 (namespace)
-   5. Util.Packed (namespace)
+   4. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/ForUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/ForUtil.cs b/src/Lucene.Net/Codecs/Lucene41/ForUtil.cs
index 3488cb3..d65a350 100644
--- a/src/Lucene.Net/Codecs/Lucene41/ForUtil.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/ForUtil.cs
@@ -23,16 +23,6 @@ namespace Lucene.Net.Codecs.Lucene41
      * limitations under the License.
      */
 
-    /*
-	using DataInput = Lucene.Net.Store.DataInput;
-	using DataOutput = Lucene.Net.Store.DataOutput;
-	using IndexInput = Lucene.Net.Store.IndexInput;
-	using IndexOutput = Lucene.Net.Store.IndexOutput;
-	using Decoder = Lucene.Net.Util.Packed.PackedInts.Decoder;
-	using FormatAndBits = Lucene.Net.Util.Packed.PackedInts.FormatAndBits;
-	using PackedInts = Lucene.Net.Util.Packed.PackedInts;
-    */
-
     /// <summary>
     /// Encode all values in normal area with fixed bit width,
     /// which is determined by the max value in this block.
@@ -46,15 +36,15 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Upper limit of the number of bytes that might be required to stored
-        /// <code>BLOCK_SIZE</code> encoded values.
+        /// <see cref="Lucene41PostingsFormat.BLOCK_SIZE"/> encoded values.
         /// </summary>
         public static readonly int MAX_ENCODED_SIZE = Lucene41PostingsFormat.BLOCK_SIZE * 4;
 
         /// <summary>
         /// Upper limit of the number of values that might be decoded in a single call to
-        /// <seealso cref="#readBlock(IndexInput, byte[], int[])"/>. Although values after
-        /// <code>BLOCK_SIZE</code> are garbage, it is necessary to allocate value buffers
-        /// whose size is >= MAX_DATA_SIZE to avoid <seealso cref="ArrayIndexOutOfBoundsException"/>s.
+        /// <see cref="ReadBlock(IndexInput, byte[], int[])"/>. Although values after
+        /// <see cref="Lucene41PostingsFormat.BLOCK_SIZE"/> are garbage, it is necessary to allocate value buffers
+        /// whose size is &gt;= MAX_DATA_SIZE to avoid <see cref="IndexOutOfRangeException"/>s.
         /// </summary>
         public static readonly int MAX_DATA_SIZE;
 
@@ -81,8 +71,8 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Compute the number of iterations required to decode <code>BLOCK_SIZE</code>
-        /// values with the provided <seealso cref="Decoder"/>.
+        /// Compute the number of iterations required to decode <see cref="Lucene41PostingsFormat.BLOCK_SIZE"/>
+        /// values with the provided <see cref="PackedInt32s.IDecoder"/>.
         /// </summary>
         private static int ComputeIterations(PackedInt32s.IDecoder decoder)
         {
@@ -91,7 +81,7 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Compute the number of bytes required to encode a block of values that require
-        /// <code>bitsPerValue</code> bits per value with format <code>format</code>.
+        /// <paramref name="bitsPerValue"/> bits per value with format <paramref name="format"/>.
         /// </summary>
         private static int EncodedSize(PackedInt32s.Format format, int packedIntsVersion, int bitsPerValue)
         {
@@ -106,7 +96,7 @@ namespace Lucene.Net.Codecs.Lucene41
         private readonly int[] iterations;
 
         /// <summary>
-        /// Create a new <seealso cref="ForUtil"/> instance and save state into <code>out</code>.
+        /// Create a new <see cref="ForUtil"/> instance and save state into <paramref name="out"/>.
         /// </summary>
         internal ForUtil(float acceptableOverheadRatio, DataOutput @out)
         {
@@ -131,7 +121,7 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Restore a <seealso cref="ForUtil"/> from a <seealso cref="DataInput"/>.
+        /// Restore a <see cref="ForUtil"/> from a <see cref="DataInput"/>.
         /// </summary>
         internal ForUtil(DataInput @in)
         {
@@ -158,12 +148,12 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Write a block of data (<code>For</code> format).
+        /// Write a block of data (<c>For</c> format).
         /// </summary>
-        /// <param name="data">     the data to write </param>
-        /// <param name="encoded">  a buffer to use to encode data </param>
-        /// <param name="out">      the destination output </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
+        /// <param name="data">     The data to write. </param>
+        /// <param name="encoded">  A buffer to use to encode data. </param>
+        /// <param name="out">      The destination output. </param>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal void WriteBlock(int[] data, byte[] encoded, IndexOutput @out)
         {
             if (IsAllEqual(data))
@@ -188,12 +178,12 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Read the next block of data (<code>For</code> format).
+        /// Read the next block of data (<c>For</c> format).
         /// </summary>
-        /// <param name="in">        the input to use to read data </param>
-        /// <param name="encoded">   a buffer that can be used to store encoded data </param>
-        /// <param name="decoded">   where to write decoded data </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
+        /// <param name="in">        The input to use to read data. </param>
+        /// <param name="encoded">   A buffer that can be used to store encoded data. </param>
+        /// <param name="decoded">   Where to write decoded data. </param>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal void ReadBlock(IndexInput @in, byte[] encoded, int[] decoded)
         {
             int numBits = @in.ReadByte();
@@ -219,8 +209,8 @@ namespace Lucene.Net.Codecs.Lucene41
         /// <summary>
         /// Skip the next block of data.
         /// </summary>
-        /// <param name="in">      the input where to read data </param>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
+        /// <param name="in">      The input where to read data. </param>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         internal void SkipBlock(IndexInput @in)
         {
             int numBits = @in.ReadByte();
@@ -249,7 +239,7 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Compute the number of bits required to serialize any of the longs in
-        /// <code>data</code>.
+        /// <paramref name="data"/>.
         /// </summary>
         private static int BitsRequired(int[] data)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs
index c59c251..f691c5c 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41Codec.cs
@@ -34,13 +34,14 @@ namespace Lucene.Net.Codecs.Lucene41
 
     /// <summary>
     /// Implements the Lucene 4.1 index format, with configurable per-field postings formats.
-    /// <p>
+    /// <para/>
     /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
+    /// <see cref="FilterCodec"/>.
+    /// <para/>
+    /// See <see cref="Lucene.Net.Codecs.Lucene41"/> package documentation for file format details.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene41 package documentation for file format details. </seealso>
-    /// @deprecated Only for reading old 4.0 segments
-    /// @lucene.experimental
     [Obsolete("Only for reading old 4.0 segments")]
     [CodecName("Lucene41")] // LUCENENET specific - using CodecName attribute to ensure the default name passed from subclasses is the same as this class name
     public class Lucene41Codec : Codec
@@ -124,9 +125,9 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene41"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene41"
         /// </summary>
         public virtual PostingsFormat GetPostingsFormatForField(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsBaseFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsBaseFormat.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsBaseFormat.cs
index 6c27407..94c4d54 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsBaseFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsBaseFormat.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Codecs.Lucene41
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
     /// <summary>
-    /// Provides a <seealso cref="PostingsReaderBase"/> and {@link
-    /// PostingsWriterBase}.
-    ///
+    /// Provides a <see cref="Codecs.PostingsReaderBase"/> and 
+    /// <see cref="Codecs.PostingsWriterBase"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
index e591999..043b6e7 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsFormat.cs
@@ -27,132 +27,132 @@ namespace Lucene.Net.Codecs.Lucene41
     /// Lucene 4.1 postings format, which encodes postings in packed integer blocks
     /// for fast decode.
     ///
-    /// <p><b>NOTE</b>: this format is still experimental and
+    /// <para><b>NOTE</b>: this format is still experimental and
     /// subject to change without backwards compatibility.
     ///
-    /// <p>
+    /// <para>
     /// Basic idea:
-    /// <ul>
-    ///   <li>
+    /// <list type="bullet">
+    ///   <item><description>
     ///   <b>Packed Blocks and VInt Blocks</b>:
-    ///   <p>In packed blocks, integers are encoded with the same bit width packed format (<see cref="Util.Packed.PackedInt32s"/>):
+    ///   <para>In packed blocks, integers are encoded with the same bit width packed format (<see cref="Util.Packed.PackedInt32s"/>):
     ///      the block size (i.e. number of integers inside block) is fixed (currently 128). Additionally blocks
-    ///      that are all the same value are encoded in an optimized way.</p>
-    ///   <p>In VInt blocks, integers are encoded as VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>):
-    ///      the block size is variable.</p>
-    ///   </li>
+    ///      that are all the same value are encoded in an optimized way.</para>
+    ///   <para>In VInt blocks, integers are encoded as VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>):
+    ///      the block size is variable.</para>
+    ///   </description></item>
     ///
-    ///   <li>
+    ///   <item><description>
     ///   <b>Block structure</b>:
-    ///   <p>When the postings are long enough, Lucene41PostingsFormat will try to encode most integer data
-    ///      as a packed block.</p>
-    ///   <p>Take a term with 259 documents as an example, the first 256 document ids are encoded as two packed
-    ///      blocks, while the remaining 3 are encoded as one VInt block. </p>
-    ///   <p>Different kinds of data are always encoded separately into different packed blocks, but may
-    ///      possibly be interleaved into the same VInt block. </p>
-    ///   <p>this strategy is applied to pairs:
+    ///   <para>When the postings are long enough, Lucene41PostingsFormat will try to encode most integer data
+    ///      as a packed block.</para>
+    ///   <para>Take a term with 259 documents as an example, the first 256 document ids are encoded as two packed
+    ///      blocks, while the remaining 3 are encoded as one VInt block. </para>
+    ///   <para>Different kinds of data are always encoded separately into different packed blocks, but may
+    ///      possibly be interleaved into the same VInt block. </para>
+    ///   <para>This strategy is applied to pairs:
     ///      &lt;document number, frequency&gt;,
     ///      &lt;position, payload length&gt;,
     ///      &lt;position, offset start, offset length&gt;, and
-    ///      &lt;position, payload length, offsetstart, offset length&gt;.</p>
-    ///   </li>
+    ///      &lt;position, payload length, offsetstart, offset length&gt;.</para>
+    ///   </description></item>
     ///
-    ///   <li>
+    ///   <item><description>
     ///   <b>Skipdata settings</b>:
-    ///   <p>The structure of skip table is quite similar to previous version of Lucene. Skip interval is the
+    ///   <para>The structure of skip table is quite similar to previous version of Lucene. Skip interval is the
     ///      same as block size, and each skip entry points to the beginning of each block. However, for
-    ///      the first block, skip data is omitted.</p>
-    ///   </li>
+    ///      the first block, skip data is omitted.</para>
+    ///   </description></item>
     ///
-    ///   <li>
+    ///   <item><description>
     ///   <b>Positions, Payloads, and Offsets</b>:
-    ///   <p>A position is an integer indicating where the term occurs within one document.
+    ///   <para>A position is an integer indicating where the term occurs within one document.
     ///      A payload is a blob of metadata associated with current position.
     ///      An offset is a pair of integers indicating the tokenized start/end offsets for given term
-    ///      in current position: it is essentially a specialized payload. </p>
-    ///   <p>When payloads and offsets are not omitted, numPositions==numPayloads==numOffsets (assuming a
+    ///      in current position: it is essentially a specialized payload. </para>
+    ///   <para>When payloads and offsets are not omitted, numPositions==numPayloads==numOffsets (assuming a
     ///      null payload contributes one count). As mentioned in block structure, it is possible to encode
-    ///      these three either combined or separately.
-    ///   <p>In all cases, payloads and offsets are stored together. When encoded as a packed block,
+    ///      these three either combined or separately.</para>
+    ///   <para>In all cases, payloads and offsets are stored together. When encoded as a packed block,
     ///      position data is separated out as .pos, while payloads and offsets are encoded in .pay (payload
     ///      metadata will also be stored directly in .pay). When encoded as VInt blocks, all these three are
-    ///      stored interleaved into the .pos (so is payload metadata).</p>
-    ///   <p>With this strategy, the majority of payload and offset data will be outside .pos file.
+    ///      stored interleaved into the .pos (so is payload metadata).</para>
+    ///   <para>With this strategy, the majority of payload and offset data will be outside .pos file.
     ///      So for queries that require only position data, running on a full index with payloads and offsets,
-    ///      this reduces disk pre-fetches.</p>
-    ///   </li>
-    /// </ul>
-    /// </p>
+    ///      this reduces disk pre-fetches.</para>
+    ///   </description></item>
+    /// </list>
+    /// </para>
     ///
-    /// <p>
+    /// <para>
     /// Files and detailed format:
-    /// <ul>
-    ///   <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
-    ///   <li><tt>.tip</tt>: <a href="#Termindex">Term Index</a></li>
-    ///   <li><tt>.doc</tt>: <a href="#Frequencies">Frequencies and Skip Data</a></li>
-    ///   <li><tt>.pos</tt>: <a href="#Positions">Positions</a></li>
-    ///   <li><tt>.pay</tt>: <a href="#Payloads">Payloads and Offsets</a></li>
-    /// </ul>
-    /// </p>
+    /// <list type="bullet">
+    ///   <item><description><c>.tim</c>: <a href="#Termdictionary">Term Dictionary</a></description></item>
+    ///   <item><description><c>.tip</c>: <a href="#Termindex">Term Index</a></description></item>
+    ///   <item><description><c>.doc</c>: <a href="#Frequencies">Frequencies and Skip Data</a></description></item>
+    ///   <item><description><c>.pos</c>: <a href="#Positions">Positions</a></description></item>
+    ///   <item><description><c>.pay</c>: <a href="#Payloads">Payloads and Offsets</a></description></item>
+    /// </list>
+    /// </para>
     ///
     /// <a name="Termdictionary" id="Termdictionary"></a>
     /// <dl>
     /// <dd>
     /// <b>Term Dictionary</b>
     ///
-    /// <p>The .tim file contains the list of terms in each
+    /// <para>The .tim file contains the list of terms in each
     /// field along with per-term statistics (such as docfreq)
     /// and pointers to the frequencies, positions, payload and
     /// skip data in the .doc, .pos, and .pay files.
-    /// See <seealso cref="BlockTreeTermsWriter"/> for more details on the format.
-    /// </p>
+    /// See <see cref="BlockTreeTermsWriter"/> for more details on the format.
+    /// </para>
     ///
-    /// <p>NOTE: The term dictionary can plug into different postings implementations:
+    /// <para>NOTE: The term dictionary can plug into different postings implementations:
     /// the postings writer/reader are actually responsible for encoding
-    /// and decoding the PostingsHeader and TermMetadata sections described here:</p>
+    /// and decoding the PostingsHeader and TermMetadata sections described here:</para>
     ///
-    /// <ul>
-    ///   <li>PostingsHeader --&gt; Header, PackedBlockSize</li>
-    ///   <li>TermMetadata --&gt; (DocFPDelta|SingletonDocID), PosFPDelta?, PosVIntBlockFPDelta?, PayFPDelta?,
-    ///                            SkipFPDelta?</li>
-    ///   <li>Header, --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>PackedBlockSize, SingletonDocID --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>DocFPDelta, PosFPDelta, PayFPDelta, PosVIntBlockFPDelta, SkipFPDelta --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///    <li>Header is a <seealso cref="CodecUtil#writeHeader CodecHeader"/> storing the version information
-    ///        for the postings.</li>
-    ///    <li>PackedBlockSize is the fixed block size for packed blocks. In packed block, bit width is
+    /// <list type="bullet">
+    ///   <item><description>PostingsHeader --&gt; Header, PackedBlockSize</description></item>
+    ///   <item><description>TermMetadata --&gt; (DocFPDelta|SingletonDocID), PosFPDelta?, PosVIntBlockFPDelta?, PayFPDelta?,
+    ///                            SkipFPDelta?</description></item>
+    ///   <item><description>Header, --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>PackedBlockSize, SingletonDocID --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>DocFPDelta, PosFPDelta, PayFPDelta, PosVIntBlockFPDelta, SkipFPDelta --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///    <item><description>Header is a CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) storing the version information
+    ///        for the postings.</description></item>
+    ///    <item><description>PackedBlockSize is the fixed block size for packed blocks. In packed block, bit width is
     ///        determined by the largest integer. Smaller block size result in smaller variance among width
     ///        of integers hence smaller indexes. Larger block size result in more efficient bulk i/o hence
-    ///        better acceleration. this value should always be a multiple of 64, currently fixed as 128 as
-    ///        a tradeoff. It is also the skip interval used to accelerate <seealso cref="DocsEnum#advance(int)"/>.
-    ///    <li>DocFPDelta determines the position of this term's TermFreqs within the .doc file.
+    ///        better acceleration. This value should always be a multiple of 64, currently fixed as 128 as
+    ///        a tradeoff. It is also the skip interval used to accelerate <see cref="Search.DocIdSetIterator.Advance(int)"/>.</description></item>
+    ///    <item><description>DocFPDelta determines the position of this term's TermFreqs within the .doc file.
     ///        In particular, it is the difference of file offset between this term's
     ///        data and previous term's data (or zero, for the first term in the block).On disk it is
-    ///        stored as the difference from previous value in sequence. </li>
-    ///    <li>PosFPDelta determines the position of this term's TermPositions within the .pos file.
+    ///        stored as the difference from previous value in sequence. </description></item>
+    ///    <item><description>PosFPDelta determines the position of this term's TermPositions within the .pos file.
     ///        While PayFPDelta determines the position of this term's &lt;TermPayloads, TermOffsets?&gt; within
     ///        the .pay file. Similar to DocFPDelta, it is the difference between two file positions (or
-    ///        neglected, for fields that omit payloads and offsets).</li>
-    ///    <li>PosVIntBlockFPDelta determines the position of this term's last TermPosition in last pos packed
+    ///        neglected, for fields that omit payloads and offsets).</description></item>
+    ///    <item><description>PosVIntBlockFPDelta determines the position of this term's last TermPosition in last pos packed
     ///        block within the .pos file. It is synonym for PayVIntBlockFPDelta or OffsetVIntBlockFPDelta.
-    ///        this is actually used to indicate whether it is necessary to load following
+    ///        This is actually used to indicate whether it is necessary to load following
     ///        payloads and offsets from .pos instead of .pay. Every time a new block of positions are to be
     ///        loaded, the PostingsReader will use this value to check whether current block is packed format
     ///        or VInt. When packed format, payloads and offsets are fetched from .pay, otherwise from .pos.
     ///        (this value is neglected when total number of positions i.e. totalTermFreq is less or equal
-    ///        to PackedBlockSize).
-    ///    <li>SkipFPDelta determines the position of this term's SkipData within the .doc
+    ///        to PackedBlockSize).</description></item>
+    ///    <item><description>SkipFPDelta determines the position of this term's SkipData within the .doc
     ///        file. In particular, it is the length of the TermFreq data.
     ///        SkipDelta is only stored if DocFreq is not smaller than SkipMinimum
-    ///        (i.e. 128 in Lucene41PostingsFormat).</li>
-    ///    <li>SingletonDocID is an optimization when a term only appears in one document. In this case, instead
+    ///        (i.e. 128 in Lucene41PostingsFormat).</description></item>
+    ///    <item><description>SingletonDocID is an optimization when a term only appears in one document. In this case, instead
     ///        of writing a file pointer to the .doc file (DocFPDelta), and then a VIntBlock at that location, the
-    ///        single document ID is written to the term dictionary.</li>
-    /// </ul>
+    ///        single document ID is written to the term dictionary.</description></item>
+    /// </list>
     /// </dd>
     /// </dl>
     ///
@@ -160,8 +160,8 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <dl>
     /// <dd>
     /// <b>Term Index</b>
-    /// <p>The .tip file contains an index into the term dictionary, so that it can be
-    /// accessed randomly.  See <seealso cref="BlockTreeTermsWriter"/> for more details on the format.</p>
+    /// <para>The .tip file contains an index into the term dictionary, so that it can be
+    /// accessed randomly.  See <see cref="BlockTreeTermsWriter"/> for more details on the format.</para>
     /// </dd>
     /// </dl>
     ///
@@ -171,86 +171,86 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <dd>
     /// <b>Frequencies and Skip Data</b>
     ///
-    /// <p>The .doc file contains the lists of documents which contain each term, along
+    /// <para>The .doc file contains the lists of documents which contain each term, along
     /// with the frequency of the term in that document (except when frequencies are
-    /// omitted: <seealso cref="IndexOptions#DOCS_ONLY"/>). It also saves skip data to the beginning of
-    /// each packed or VInt block, when the length of document list is larger than packed block size.</p>
+    /// omitted: <see cref="Index.IndexOptions.DOCS_ONLY"/>). It also saves skip data to the beginning of
+    /// each packed or VInt block, when the length of document list is larger than packed block size.</para>
     ///
-    /// <ul>
-    ///   <li>docFile(.doc) --&gt; Header, &lt;TermFreqs, SkipData?&gt;<sup>TermCount</sup>, Footer</li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>TermFreqs --&gt; &lt;PackedBlock&gt; <sup>PackedDocBlockNum</sup>,
-    ///                        VIntBlock? </li>
-    ///   <li>PackedBlock --&gt; PackedDocDeltaBlock, PackedFreqBlock?
-    ///   <li>VIntBlock --&gt; &lt;DocDelta[, Freq?]&gt;<sup>DocFreq-PackedBlockSize*PackedDocBlockNum</sup>
-    ///   <li>SkipData --&gt; &lt;&lt;SkipLevelLength, SkipLevel&gt;
-    ///       <sup>NumSkipLevels-1</sup>, SkipLevel&gt;, SkipDatum?</li>
-    ///   <li>SkipLevel --&gt; &lt;SkipDatum&gt; <sup>TrimmedDocFreq/(PackedBlockSize^(Level + 1))</sup></li>
-    ///   <li>SkipDatum --&gt; DocSkip, DocFPSkip, &lt;PosFPSkip, PosBlockOffset, PayLength?,
-    ///                        PayFPSkip?&gt;?, SkipChildLevelPointer?</li>
-    ///   <li>PackedDocDeltaBlock, PackedFreqBlock --&gt; <seealso cref="PackedInts PackedInts"/></li>
-    ///   <li>DocDelta, Freq, DocSkip, DocFPSkip, PosFPSkip, PosBlockOffset, PayByteUpto, PayFPSkip
+    /// <list type="bullet">
+    ///   <item><description>docFile(.doc) --&gt; Header, &lt;TermFreqs, SkipData?&gt;<sup>TermCount</sup>, Footer</description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>)</description></item>
+    ///   <item><description>TermFreqs --&gt; &lt;PackedBlock&gt; <sup>PackedDocBlockNum</sup>,
+    ///                        VIntBlock? </description></item>
+    ///   <item><description>PackedBlock --&gt; PackedDocDeltaBlock, PackedFreqBlock?</description></item>
+    ///   <item><description>VIntBlock --&gt; &lt;DocDelta[, Freq?]&gt;<sup>DocFreq-PackedBlockSize*PackedDocBlockNum</sup></description></item>
+    ///   <item><description>SkipData --&gt; &lt;&lt;SkipLevelLength, SkipLevel&gt;
+    ///       <sup>NumSkipLevels-1</sup>, SkipLevel&gt;, SkipDatum?</description></item>
+    ///   <item><description>SkipLevel --&gt; &lt;SkipDatum&gt; <sup>TrimmedDocFreq/(PackedBlockSize^(Level + 1))</sup></description></item>
+    ///   <item><description>SkipDatum --&gt; DocSkip, DocFPSkip, &lt;PosFPSkip, PosBlockOffset, PayLength?,
+    ///                        PayFPSkip?&gt;?, SkipChildLevelPointer?</description></item>
+    ///   <item><description>PackedDocDeltaBlock, PackedFreqBlock --&gt; PackedInts (<see cref="Util.Packed.PackedInt32s"/>) </description></item>
+    ///   <item><description>DocDelta, Freq, DocSkip, DocFPSkip, PosFPSkip, PosBlockOffset, PayByteUpto, PayFPSkip
     ///       --&gt;
-    ///   <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>SkipChildLevelPointer --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///   <li>PackedDocDeltaBlock is theoretically generated from two steps:
-    ///     <ol>
-    ///       <li>Calculate the difference between each document number and previous one,
-    ///           and get a d-gaps list (for the first document, use absolute value); </li>
-    ///       <li>For those d-gaps from first one to PackedDocBlockNum*PackedBlockSize<sup>th</sup>,
-    ///           separately encode as packed blocks.</li>
-    ///     </ol>
+    ///   VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>SkipChildLevelPointer --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///   <item><description>PackedDocDeltaBlock is theoretically generated from two steps:
+    ///     <list type="number">
+    ///       <item><description>Calculate the difference between each document number and previous one,
+    ///           and get a d-gaps list (for the first document, use absolute value); </description></item>
+    ///       <item><description>For those d-gaps from first one to PackedDocBlockNum*PackedBlockSize<sup>th</sup>,
+    ///           separately encode as packed blocks.</description></item>
+    ///     </list>
     ///     If frequencies are not omitted, PackedFreqBlock will be generated without d-gap step.
-    ///   </li>
-    ///   <li>VIntBlock stores remaining d-gaps (along with frequencies when possible) with a format
+    ///   </description></item>
+    ///   <item><description>VIntBlock stores remaining d-gaps (along with frequencies when possible) with a format
     ///       that encodes DocDelta and Freq:
-    ///       <p>DocDelta: if frequencies are indexed, this determines both the document
+    ///       <para>DocDelta: if frequencies are indexed, this determines both the document
     ///       number and the frequency. In particular, DocDelta/2 is the difference between
     ///       this document number and the previous document number (or zero when this is the
     ///       first document in a TermFreqs). When DocDelta is odd, the frequency is one.
     ///       When DocDelta is even, the frequency is read as another VInt. If frequencies
     ///       are omitted, DocDelta contains the gap (not multiplied by 2) between document
-    ///       numbers and no frequency information is stored.</p>
-    ///       <p>For example, the TermFreqs for a term which occurs once in document seven
+    ///       numbers and no frequency information is stored.</para>
+    ///       <para>For example, the TermFreqs for a term which occurs once in document seven
     ///          and three times in document eleven, with frequencies indexed, would be the
-    ///          following sequence of VInts:</p>
-    ///       <p>15, 8, 3</p>
-    ///       <p>If frequencies were omitted (<seealso cref="IndexOptions#DOCS_ONLY"/>) it would be this
-    ///          sequence of VInts instead:</p>
-    ///       <p>7,4</p>
-    ///   </li>
-    ///   <li>PackedDocBlockNum is the number of packed blocks for current term's docids or frequencies.
-    ///       In particular, PackedDocBlockNum = floor(DocFreq/PackedBlockSize) </li>
-    ///   <li>TrimmedDocFreq = DocFreq % PackedBlockSize == 0 ? DocFreq - 1 : DocFreq.
+    ///          following sequence of VInts:</para>
+    ///       <para>15, 8, 3</para>
+    ///       <para>If frequencies were omitted (<see cref="Index.IndexOptions.DOCS_ONLY"/>) it would be this
+    ///          sequence of VInts instead:</para>
+    ///       <para>7,4</para>
+    ///   </description></item>
+    ///   <item><description>PackedDocBlockNum is the number of packed blocks for current term's docids or frequencies.
+    ///       In particular, PackedDocBlockNum = floor(DocFreq/PackedBlockSize) </description></item>
+    ///   <item><description>TrimmedDocFreq = DocFreq % PackedBlockSize == 0 ? DocFreq - 1 : DocFreq.
     ///       We use this trick since the definition of skip entry is a little different from base interface.
-    ///       In <seealso cref="MultiLevelSkipListWriter"/>, skip data is assumed to be saved for
+    ///       In <see cref="MultiLevelSkipListWriter"/>, skip data is assumed to be saved for
     ///       skipInterval<sup>th</sup>, 2*skipInterval<sup>th</sup> ... posting in the list. However,
     ///       in Lucene41PostingsFormat, the skip data is saved for skipInterval+1<sup>th</sup>,
     ///       2*skipInterval+1<sup>th</sup> ... posting (skipInterval==PackedBlockSize in this case).
     ///       When DocFreq is multiple of PackedBlockSize, MultiLevelSkipListWriter will expect one
-    ///       more skip data than Lucene41SkipWriter. </li>
-    ///   <li>SkipDatum is the metadata of one skip entry.
-    ///      For the first block (no matter packed or VInt), it is omitted.</li>
-    ///   <li>DocSkip records the document number of every PackedBlockSize<sup>th</sup> document number in
+    ///       more skip data than Lucene41SkipWriter. </description></item>
+    ///   <item><description>SkipDatum is the metadata of one skip entry.
+    ///      For the first block (no matter packed or VInt), it is omitted.</description></item>
+    ///   <item><description>DocSkip records the document number of every PackedBlockSize<sup>th</sup> document number in
     ///       the postings (i.e. last document number in each packed block). On disk it is stored as the
-    ///       difference from previous value in the sequence. </li>
-    ///   <li>DocFPSkip records the file offsets of each block (excluding )posting at
+    ///       difference from previous value in the sequence. </description></item>
+    ///   <item><description>DocFPSkip records the file offsets of each block (excluding )posting at
     ///       PackedBlockSize+1<sup>th</sup>, 2*PackedBlockSize+1<sup>th</sup> ... , in DocFile.
     ///       The file offsets are relative to the start of current term's TermFreqs.
-    ///       On disk it is also stored as the difference from previous SkipDatum in the sequence.</li>
-    ///   <li>Since positions and payloads are also block encoded, the skip should skip to related block first,
+    ///       On disk it is also stored as the difference from previous SkipDatum in the sequence.</description></item>
+    ///   <item><description>Since positions and payloads are also block encoded, the skip should skip to related block first,
     ///       then fetch the values according to in-block offset. PosFPSkip and PayFPSkip record the file
     ///       offsets of related block in .pos and .pay, respectively. While PosBlockOffset indicates
     ///       which value to fetch inside the related block (PayBlockOffset is unnecessary since it is always
     ///       equal to PosBlockOffset). Same as DocFPSkip, the file offsets are relative to the start of
-    ///       current term's TermFreqs, and stored as a difference sequence.</li>
-    ///   <li>PayByteUpto indicates the start offset of the current payload. It is equivalent to
-    ///       the sum of the payload lengths in the current block up to PosBlockOffset</li>
-    /// </ul>
+    ///       current term's TermFreqs, and stored as a difference sequence.</description></item>
+    ///   <item><description>PayByteUpto indicates the start offset of the current payload. It is equivalent to
+    ///       the sum of the payload lengths in the current block up to PosBlockOffset</description></item>
+    /// </list>
     /// </dd>
     /// </dl>
     ///
@@ -258,52 +258,52 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <dl>
     /// <dd>
     /// <b>Positions</b>
-    /// <p>The .pos file contains the lists of positions that each term occurs at within documents. It also
-    ///    sometimes stores part of payloads and offsets for speedup.</p>
-    /// <ul>
-    ///   <li>PosFile(.pos) --&gt; Header, &lt;TermPositions&gt; <sup>TermCount</sup>, Footer</li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>TermPositions --&gt; &lt;PackedPosDeltaBlock&gt; <sup>PackedPosBlockNum</sup>,
-    ///                            VIntBlock? </li>
-    ///   <li>VIntBlock --&gt; &lt;PositionDelta[, PayloadLength?], PayloadData?,
-    ///                        OffsetDelta?, OffsetLength?&gt;<sup>PosVIntCount</sup>
-    ///   <li>PackedPosDeltaBlock --&gt; <seealso cref="PackedInts PackedInts"/></li>
-    ///   <li>PositionDelta, OffsetDelta, OffsetLength --&gt;
-    ///       <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>PayloadData --&gt; <seealso cref="DataOutput#writeByte byte"/><sup>PayLength</sup></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///   <li>TermPositions are order by term (terms are implicit, from the term dictionary), and position
-    ///       values for each term document pair are incremental, and ordered by document number.</li>
-    ///   <li>PackedPosBlockNum is the number of packed blocks for current term's positions, payloads or offsets.
-    ///       In particular, PackedPosBlockNum = floor(totalTermFreq/PackedBlockSize) </li>
-    ///   <li>PosVIntCount is the number of positions encoded as VInt format. In particular,
-    ///       PosVIntCount = totalTermFreq - PackedPosBlockNum*PackedBlockSize</li>
-    ///   <li>The procedure how PackedPosDeltaBlock is generated is the same as PackedDocDeltaBlock
-    ///       in chapter <a href="#Frequencies">Frequencies and Skip Data</a>.</li>
-    ///   <li>PositionDelta is, if payloads are disabled for the term's field, the
+    /// <para>The .pos file contains the lists of positions that each term occurs at within documents. It also
+    ///    sometimes stores part of payloads and offsets for speedup.</para>
+    /// <list type="bullet">
+    ///   <item><description>PosFile(.pos) --&gt; Header, &lt;TermPositions&gt; <sup>TermCount</sup>, Footer</description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>TermPositions --&gt; &lt;PackedPosDeltaBlock&gt; <sup>PackedPosBlockNum</sup>,
+    ///                            VIntBlock? </description></item>
+    ///   <item><description>VIntBlock --&gt; &lt;PositionDelta[, PayloadLength?], PayloadData?,
+    ///                        OffsetDelta?, OffsetLength?&gt;<sup>PosVIntCount</sup></description></item>
+    ///   <item><description>PackedPosDeltaBlock --&gt; PackedInts (<see cref="Util.Packed.PackedInt32s"/>)</description></item>
+    ///   <item><description>PositionDelta, OffsetDelta, OffsetLength --&gt;
+    ///       VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>PayloadData --&gt; byte (<see cref="Store.DataOutput.WriteByte(byte)"/>)<sup>PayLength</sup></description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///   <item><description>TermPositions are order by term (terms are implicit, from the term dictionary), and position
+    ///       values for each term document pair are incremental, and ordered by document number.</description></item>
+    ///   <item><description>PackedPosBlockNum is the number of packed blocks for current term's positions, payloads or offsets.
+    ///       In particular, PackedPosBlockNum = floor(totalTermFreq/PackedBlockSize) </description></item>
+    ///   <item><description>PosVIntCount is the number of positions encoded as VInt format. In particular,
+    ///       PosVIntCount = totalTermFreq - PackedPosBlockNum*PackedBlockSize</description></item>
+    ///   <item><description>The procedure how PackedPosDeltaBlock is generated is the same as PackedDocDeltaBlock
+    ///       in chapter <a href="#Frequencies">Frequencies and Skip Data</a>.</description></item>
+    ///   <item><description>PositionDelta is, if payloads are disabled for the term's field, the
     ///       difference between the position of the current occurrence in the document and
     ///       the previous occurrence (or zero, if this is the first occurrence in this
     ///       document). If payloads are enabled for the term's field, then PositionDelta/2
     ///       is the difference between the current and the previous position. If payloads
     ///       are enabled and PositionDelta is odd, then PayloadLength is stored, indicating
-    ///       the length of the payload at the current term position.</li>
-    ///   <li>For example, the TermPositions for a term which occurs as the fourth term in
+    ///       the length of the payload at the current term position.</description></item>
+    ///   <item><description>For example, the TermPositions for a term which occurs as the fourth term in
     ///       one document, and as the fifth and ninth term in a subsequent document, would
     ///       be the following sequence of VInts (payloads disabled):
-    ///       <p>4, 5, 4</p></li>
-    ///   <li>PayloadData is metadata associated with the current term position. If
+    ///       <para>4, 5, 4</para></description></item>
+    ///   <item><description>PayloadData is metadata associated with the current term position. If
     ///       PayloadLength is stored at the current position, then it indicates the length
     ///       of this payload. If PayloadLength is not stored, then this payload has the same
-    ///       length as the payload at the previous position.</li>
-    ///   <li>OffsetDelta/2 is the difference between this position's startOffset from the
+    ///       length as the payload at the previous position.</description></item>
+    ///   <item><description>OffsetDelta/2 is the difference between this position's startOffset from the
     ///       previous occurrence (or zero, if this is the first occurrence in this document).
     ///       If OffsetDelta is odd, then the length (endOffset-startOffset) differs from the
     ///       previous occurrence and an OffsetLength follows. Offset data is only written for
-    ///       <seealso cref="IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS"/>.</li>
-    /// </ul>
+    ///       <see cref="Index.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS"/>.</description></item>
+    /// </list>
     /// </dd>
     /// </dl>
     ///
@@ -311,35 +311,35 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <dl>
     /// <dd>
     /// <b>Payloads and Offsets</b>
-    /// <p>The .pay file will store payloads and offsets associated with certain term-document positions.
-    ///    Some payloads and offsets will be separated out into .pos file, for performance reasons.</p>
-    /// <ul>
-    ///   <li>PayFile(.pay): --&gt; Header, &lt;TermPayloads, TermOffsets?&gt; <sup>TermCount</sup>, Footer</li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>TermPayloads --&gt; &lt;PackedPayLengthBlock, SumPayLength, PayData&gt; <sup>PackedPayBlockNum</sup>
-    ///   <li>TermOffsets --&gt; &lt;PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock&gt; <sup>PackedPayBlockNum</sup>
-    ///   <li>PackedPayLengthBlock, PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock --&gt; <seealso cref="PackedInts PackedInts"/></li>
-    ///   <li>SumPayLength --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>PayData --&gt; <seealso cref="DataOutput#writeByte byte"/><sup>SumPayLength</sup></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///   <li>The order of TermPayloads/TermOffsets will be the same as TermPositions, note that part of
-    ///       payload/offsets are stored in .pos.</li>
-    ///   <li>The procedure how PackedPayLengthBlock and PackedOffsetLengthBlock are generated is the
+    /// <para>The .pay file will store payloads and offsets associated with certain term-document positions.
+    ///    Some payloads and offsets will be separated out into .pos file, for performance reasons.</para>
+    /// <list type="bullet">
+    ///   <item><description>PayFile(.pay): --&gt; Header, &lt;TermPayloads, TermOffsets?&gt; <sup>TermCount</sup>, Footer</description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>TermPayloads --&gt; &lt;PackedPayLengthBlock, SumPayLength, PayData&gt; <sup>PackedPayBlockNum</sup></description></item>
+    ///   <item><description>TermOffsets --&gt; &lt;PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock&gt; <sup>PackedPayBlockNum</sup></description></item>
+    ///   <item><description>PackedPayLengthBlock, PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock --&gt; PackedInts (<see cref="Util.Packed.PackedInt32s"/>) </description></item>
+    ///   <item><description>SumPayLength --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>PayData --&gt; byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>SumPayLength</sup></description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///   <item><description>The order of TermPayloads/TermOffsets will be the same as TermPositions, note that part of
+    ///       payload/offsets are stored in .pos.</description></item>
+    ///   <item><description>The procedure how PackedPayLengthBlock and PackedOffsetLengthBlock are generated is the
     ///       same as PackedFreqBlock in chapter <a href="#Frequencies">Frequencies and Skip Data</a>.
-    ///       While PackedStartDeltaBlock follows a same procedure as PackedDocDeltaBlock.</li>
-    ///   <li>PackedPayBlockNum is always equal to PackedPosBlockNum, for the same term. It is also synonym
-    ///       for PackedOffsetBlockNum.</li>
-    ///   <li>SumPayLength is the total length of payloads written within one block, should be the sum
-    ///       of PayLengths in one packed block.</li>
-    ///   <li>PayLength in PackedPayLengthBlock is the length of each payload associated with the current
-    ///       position.</li>
-    /// </ul>
+    ///       While PackedStartDeltaBlock follows a same procedure as PackedDocDeltaBlock.</description></item>
+    ///   <item><description>PackedPayBlockNum is always equal to PackedPosBlockNum, for the same term. It is also synonym
+    ///       for PackedOffsetBlockNum.</description></item>
+    ///   <item><description>SumPayLength is the total length of payloads written within one block, should be the sum
+    ///       of PayLengths in one packed block.</description></item>
+    ///   <item><description>PayLength in PackedPayLengthBlock is the length of each payload associated with the current
+    ///       position.</description></item>
+    /// </list>
     /// </dd>
     /// </dl>
-    /// </p>
+    /// </para>
     ///
     /// @lucene.experimental
     /// </summary>
@@ -375,8 +375,8 @@ namespace Lucene.Net.Codecs.Lucene41
         public static int BLOCK_SIZE = 128;
 
         /// <summary>
-        /// Creates {@code Lucene41PostingsFormat} with default
-        ///  settings.
+        /// Creates <see cref="Lucene41PostingsFormat"/> with default
+        /// settings.
         /// </summary>
         public Lucene41PostingsFormat()
             : this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE)
@@ -384,10 +384,10 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Creates {@code Lucene41PostingsFormat} with custom
-        ///  values for {@code minBlockSize} and {@code
-        ///  maxBlockSize} passed to block terms dictionary. </summary>
-        ///  <seealso cref= BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)  </seealso>
+        /// Creates <see cref="Lucene41PostingsFormat"/> with custom
+        /// values for <paramref name="minTermBlockSize"/> and 
+        /// <paramref name="maxTermBlockSize"/> passed to block terms dictionary. </summary>
+        /// <seealso cref="BlockTreeTermsWriter.BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)"/>
         public Lucene41PostingsFormat(int minTermBlockSize, int maxTermBlockSize)
             : base()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsReader.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsReader.cs
index 406cf96..b31126d 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsReader.cs
@@ -27,9 +27,10 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <summary>
     /// Concrete class that reads docId(maybe frq,pos,offset,payloads) list
     /// with postings format.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene41SkipReader for details
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene41SkipReader"/>
     public sealed class Lucene41PostingsReader : PostingsReaderBase
     {
         private readonly IndexInput docIn;
@@ -95,7 +96,7 @@ namespace Lucene.Net.Codecs.Lucene41
         /// <summary>
         /// Read values that have been written using variable-length encoding instead of bit-packing.
         /// <para/>
-        /// NOTE: This was readVIntBlock() in Lucene
+        /// NOTE: This was readVIntBlock() in Lucene.
         /// </summary>
         internal static void ReadVInt32Block(IndexInput docIn, int[] docBuffer, int[] freqBuffer, int num, bool indexHasFreq)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsWriter.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsWriter.cs
index dcd68f1..5494d64 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41PostingsWriter.cs
@@ -35,11 +35,12 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <summary>
     /// Concrete class that writes docId(maybe frq,pos,offset,payloads) list
     /// with postings format.
-    ///
+    /// <para/>
     /// Postings list for each term will be stored separately.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene41SkipWriter for details about skipping setting and postings layout.
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene41SkipWriter"/> for details about skipping setting and postings layout.
     public sealed class Lucene41PostingsWriter : PostingsWriterBase
     {
         /// <summary>
@@ -347,7 +348,7 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Add a new position & payload </summary>
+        /// Add a new position &amp; payload </summary>
         public override void AddPosition(int position, BytesRef payload, int startOffset, int endOffset)
         {
             // if (DEBUG) {
@@ -433,7 +434,7 @@ namespace Lucene.Net.Codecs.Lucene41
         }
 
         /// <summary>
-        /// Called when we are done adding docs to this term </summary>
+        /// Called when we are done adding docs to this term. </summary>
         public override void FinishTerm(BlockTermState state)
         {
             Int32BlockTermState state2 = (Int32BlockTermState)state;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipReader.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipReader.cs
index 675777e..5cc0a91 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipReader.cs
@@ -25,29 +25,28 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <summary>
     /// Implements the skip list reader for block postings format
     /// that stores positions and payloads.
-    ///
+    /// <para/>
     /// Although this skipper uses MultiLevelSkipListReader as an interface,
     /// its definition of skip position will be a little different.
-    ///
+    /// <para/>
     /// For example, when skipInterval = blockSize = 3, df = 2*skipInterval = 6,
-    ///
+    /// <para/>
     /// 0 1 2 3 4 5
     /// d d d d d d    (posting list)
     ///     ^     ^    (skip point in MultiLeveSkipWriter)
     ///       ^        (skip point in Lucene41SkipWriter)
-    ///
+    /// <para/>
     /// In this case, MultiLevelSkipListReader will use the last document as a skip point,
     /// while Lucene41SkipReader should assume no skip point will comes.
-    ///
+    /// <para/>
     /// If we use the interface directly in Lucene41SkipReader, it may silly try to read
     /// another skip data after the only skip point is loaded.
-    ///
+    /// <para/>
     /// To illustrate this, we can call skipTo(d[5]), since skip point d[3] has smaller docId,
     /// and numSkipped+blockSize== df, the MultiLevelSkipListReader will assume the skip list
     /// isn't exhausted yet, and try to load a non-existed skip point
-    ///
-    /// Therefore, we'll trim df before passing it to the interface. see trim(int)
-    ///
+    /// <para/>
+    /// Therefore, we'll trim df before passing it to the interface. see <see cref="Trim(int)"/>.
     /// </summary>
     internal sealed class Lucene41SkipReader : MultiLevelSkipListReader
     {
@@ -100,12 +99,11 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Trim original docFreq to tell skipReader read proper number of skip points.
-        ///
+        /// <para/>
         /// Since our definition in Lucene41Skip* is a little different from MultiLevelSkip*
         /// this trimmed docFreq will prevent skipReader from:
         /// 1. silly reading a non-existed skip point after the last block boundary
         /// 2. moving into the vInt block
-        ///
         /// </summary>
         internal int Trim(int df)
         {
@@ -136,7 +134,7 @@ namespace Lucene.Net.Codecs.Lucene41
 
         /// <summary>
         /// Returns the doc pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
+        /// <seealso cref="MultiLevelSkipListReader.SkipTo(int)"/> has skipped.
         /// </summary>
         public long DocPointer
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipWriter.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipWriter.cs
index 3565a5d..4201c68 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41SkipWriter.cs
@@ -23,23 +23,22 @@ namespace Lucene.Net.Codecs.Lucene41
 
     /// <summary>
     /// Write skip lists with multiple levels, and support skip within block ints.
-    ///
+    /// <para/>
     /// Assume that docFreq = 28, skipInterval = blockSize = 12
     ///
     ///  |       block#0       | |      block#1        | |vInts|
     ///  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
     ///                          ^                       ^       (level 0 skip point)
-    ///
+    /// <para/>
     /// Note that skipWriter will ignore first document in block#0, since
     /// it is useless as a skip point.  Also, we'll never skip into the vInts
     /// block, only record skip data at the start its start point(if it exist).
-    ///
+    /// <para/>
     /// For each skip point, we will record:
     /// 1. docID in former position, i.e. for position 12, record docID[11], etc.
     /// 2. its related file points(position, payload),
     /// 3. related numbers or uptos(position, payload).
     /// 4. start offset.
-    ///
     /// </summary>
     internal sealed class Lucene41SkipWriter : MultiLevelSkipListWriter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3221b638/src/Lucene.Net/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs b/src/Lucene.Net/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
index 45e8d0c..8f9c5b5 100644
--- a/src/Lucene.Net/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene41/Lucene41StoredFieldsFormat.cs
@@ -26,91 +26,97 @@ namespace Lucene.Net.Codecs.Lucene41
     /// <summary>
     /// Lucene 4.1 stored fields format.
     ///
-    /// <p><b>Principle</b></p>
-    /// <p>this <seealso cref="StoredFieldsFormat"/> compresses blocks of 16KB of documents in
+    /// <para><b>Principle</b></para>
+    /// <para>This <seealso cref="StoredFieldsFormat"/> compresses blocks of 16KB of documents in
     /// order to improve the compression ratio compared to document-level
     /// compression. It uses the <a href="http://code.google.com/p/lz4/">LZ4</a>
     /// compression algorithm, which is fast to compress and very fast to decompress
     /// data. Although the compression method that is used focuses more on speed
     /// than on compression ratio, it should provide interesting compression ratios
-    /// for redundant inputs (such as log files, HTML or plain text).</p>
-    /// <p><b>File formats</b></p>
-    /// <p>Stored fields are represented by two files:</p>
-    /// <ol>
-    /// <li><a name="field_data" id="field_data"></a>
-    /// <p>A fields data file (extension <tt>.fdt</tt>). this file stores a compact
+    /// for redundant inputs (such as log files, HTML or plain text).</para>
+    /// <para><b>File formats</b></para>
+    /// <para>Stored fields are represented by two files:</para>
+    /// <list type="number">
+    /// <item><description><a name="field_data" id="field_data"></a>
+    /// <para>A fields data file (extension <c>.fdt</c>). this file stores a compact
     /// representation of documents in compressed blocks of 16KB or more. When
-    /// writing a segment, documents are appended to an in-memory <tt>byte[]</tt>
+    /// writing a segment, documents are appended to an in-memory <c>byte[]</c>
     /// buffer. When its size reaches 16KB or more, some metadata about the documents
     /// is flushed to disk, immediately followed by a compressed representation of
     /// the buffer using the
     /// <a href="http://code.google.com/p/lz4/">LZ4</a>
-    /// <a href="http://fastcompression.blogspot.fr/2011/05/lz4-explained.html">compression format</a>.</p>
-    /// <p>Here is a more detailed description of the field data file format:</p>
-    /// <ul>
-    /// <li>FieldData (.fdt) --&gt; &lt;Header&gt;, PackedIntsVersion, &lt;Chunk&gt;<sup>ChunkCount</sup></li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>PackedIntsVersion --&gt; <seealso cref="PackedInts#VERSION_CURRENT"/> as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkCount is not known in advance and is the number of chunks necessary to store all document of the segment</li>
-    /// <li>Chunk --&gt; DocBase, ChunkDocs, DocFieldCounts, DocLengths, &lt;CompressedDocs&gt;</li>
-    /// <li>DocBase --&gt; the ID of the first document of the chunk as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>ChunkDocs --&gt; the number of documents in the chunk as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    /// <li>DocFieldCounts --&gt; the number of stored fields of every document in the chunk, encoded as followed:<ul>
-    ///   <li>if chunkDocs=1, the unique value is encoded as a <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>else read a <seealso cref="DataOutput#writeVInt VInt"/> (let's call it <tt>bitsRequired</tt>)<ul>
-    ///     <li>if <tt>bitsRequired</tt> is <tt>0</tt> then all values are equal, and the common value is the following <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///     <li>else <tt>bitsRequired</tt> is the number of bits required to store any value, and values are stored in a <seealso cref="PackedInts packed"/> array where every value is stored on exactly <tt>bitsRequired</tt> bits</li>
-    ///   </ul></li>
-    /// </ul></li>
-    /// <li>DocLengths --&gt; the lengths of all documents in the chunk, encoded with the same method as DocFieldCounts</li>
-    /// <li>CompressedDocs --&gt; a compressed representation of &lt;Docs&gt; using the LZ4 compression format</li>
-    /// <li>Docs --&gt; &lt;Doc&gt;<sup>ChunkDocs</sup></li>
-    /// <li>Doc --&gt; &lt;FieldNumAndType, Value&gt;<sup>DocFieldCount</sup></li>
-    /// <li>FieldNumAndType --&gt; a <seealso cref="DataOutput#writeVLong VLong"/>, whose 3 last bits are Type and other bits are FieldNum</li>
-    /// <li>Type --&gt;<ul>
-    ///   <li>0: Value is String</li>
-    ///   <li>1: Value is BinaryValue</li>
-    ///   <li>2: Value is Int</li>
-    ///   <li>3: Value is Float</li>
-    ///   <li>4: Value is Long</li>
-    ///   <li>5: Value is Double</li>
-    ///   <li>6, 7: unused</li>
-    /// </ul></li>
-    /// <li>FieldNum --&gt; an ID of the field</li>
-    /// <li>Value --&gt; <seealso cref="DataOutput#writeString(String) String"/> | BinaryValue | Int | Float | Long | Double depending on Type</li>
-    /// <li>BinaryValue --&gt; ValueLength &lt;Byte&gt;<sup>ValueLength</sup></li>
-    /// </ul>
-    /// <p>Notes</p>
-    /// <ul>
-    /// <li>If documents are larger than 16KB then chunks will likely contain only
+    /// <a href="http://fastcompression.blogspot.fr/2011/05/lz4-explained.html">compression format</a>.</para>
+    /// <para>Here is a more detailed description of the field data file format:</para>
+    /// <list type="bullet">
+    /// <item><description>FieldData (.fdt) --&gt; &lt;Header&gt;, PackedIntsVersion, &lt;Chunk&gt;<sup>ChunkCount</sup></description></item>
+    /// <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    /// <item><description>PackedIntsVersion --&gt; <see cref="Util.Packed.PackedInt32s.VERSION_CURRENT"/> as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    /// <item><description>ChunkCount is not known in advance and is the number of chunks necessary to store all document of the segment</description></item>
+    /// <item><description>Chunk --&gt; DocBase, ChunkDocs, DocFieldCounts, DocLengths, &lt;CompressedDocs&gt;</description></item>
+    /// <item><description>DocBase --&gt; the ID of the first document of the chunk as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    /// <item><description>ChunkDocs --&gt; the number of documents in the chunk as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    /// <item><description>DocFieldCounts --&gt; the number of stored fields of every document in the chunk, encoded as followed:
+    /// <list type="bullet">
+    ///   <item><description>if chunkDocs=1, the unique value is encoded as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>else read a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) (let's call it <c>bitsRequired</c>)
+    ///   <list type="bullet">
+    ///     <item><description>if <c>bitsRequired</c> is <c>0</c> then all values are equal, and the common value is the following VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///     <item><description>else <c>bitsRequired</c> is the number of bits required to store any value, and values are stored in a packed (<see cref="Util.Packed.PackedInt32s"/>) array where every value is stored on exactly <c>bitsRequired</c> bits</description></item>
+    ///   </list>
+    ///   </description></item>
+    /// </list>
+    /// </description></item>
+    /// <item><description>DocLengths --&gt; the lengths of all documents in the chunk, encoded with the same method as DocFieldCounts</description></item>
+    /// <item><description>CompressedDocs --&gt; a compressed representation of &lt;Docs&gt; using the LZ4 compression format</description></item>
+    /// <item><description>Docs --&gt; &lt;Doc&gt;<sup>ChunkDocs</sup></description></item>
+    /// <item><description>Doc --&gt; &lt;FieldNumAndType, Value&gt;<sup>DocFieldCount</sup></description></item>
+    /// <item><description>FieldNumAndType --&gt; a VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>), whose 3 last bits are Type and other bits are FieldNum</description></item>
+    /// <item><description>Type --&gt;
+    /// <list type="bullet">
+    ///   <item><description>0: Value is String</description></item>
+    ///   <item><description>1: Value is BinaryValue</description></item>
+    ///   <item><description>2: Value is Int</description></item>
+    ///   <item><description>3: Value is Float</description></item>
+    ///   <item><description>4: Value is Long</description></item>
+    ///   <item><description>5: Value is Double</description></item>
+    ///   <item><description>6, 7: unused</description></item>
+    /// </list>
+    /// </description></item>
+    /// <item><description>FieldNum --&gt; an ID of the field</description></item>
+    /// <item><description>Value --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>) | BinaryValue | Int | Float | Long | Double depending on Type</description></item>
+    /// <item><description>BinaryValue --&gt; ValueLength &lt;Byte&gt;<sup>ValueLength</sup></description></item>
+    /// </list>
+    /// <para>Notes</para>
+    /// <list type="bullet">
+    /// <item><description>If documents are larger than 16KB then chunks will likely contain only
     /// one document. However, documents can never spread across several chunks (all
-    /// fields of a single document are in the same chunk).</li>
-    /// <li>When at least one document in a chunk is large enough so that the chunk
+    /// fields of a single document are in the same chunk).</description></item>
+    /// <item><description>When at least one document in a chunk is large enough so that the chunk
     /// is larger than 32KB, the chunk will actually be compressed in several LZ4
-    /// blocks of 16KB. this allows <seealso cref="StoredFieldVisitor"/>s which are only
+    /// blocks of 16KB. this allows <see cref="StoredFieldVisitor"/>s which are only
     /// interested in the first fields of a document to not have to decompress 10MB
-    /// of data if the document is 10MB, but only 16KB.</li>
-    /// <li>Given that the original lengths are written in the metadata of the chunk,
+    /// of data if the document is 10MB, but only 16KB.</description></item>
+    /// <item><description>Given that the original lengths are written in the metadata of the chunk,
     /// the decompressor can leverage this information to stop decoding as soon as
-    /// enough data has been decompressed.</li>
-    /// <li>In case documents are incompressible, CompressedDocs will be less than
-    /// 0.5% larger than Docs.</li>
-    /// </ul>
-    /// </li>
-    /// <li><a name="field_index" id="field_index"></a>
-    /// <p>A fields index file (extension <tt>.fdx</tt>).</p>
-    /// <ul>
-    /// <li>FieldsIndex (.fdx) --&gt; &lt;Header&gt;, &lt;ChunkIndex&gt;</li>
-    /// <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// <li>ChunkIndex: See <seealso cref="CompressingStoredFieldsIndexWriter"/></li>
-    /// </ul>
-    /// </li>
-    /// </ol>
-    /// <p><b>Known limitations</b></p>
-    /// <p>this <seealso cref="StoredFieldsFormat"/> does not support individual documents
-    /// larger than (<tt>2<sup>31</sup> - 2<sup>14</sup></tt>) bytes. In case this
+    /// enough data has been decompressed.</description></item>
+    /// <item><description>In case documents are incompressible, CompressedDocs will be less than
+    /// 0.5% larger than Docs.</description></item>
+    /// </list>
+    /// </description></item>
+    /// <item><description><a name="field_index" id="field_index"></a>
+    /// <para>A fields index file (extension <c>.fdx</c>).</para>
+    /// <list type="bullet">
+    /// <item><description>FieldsIndex (.fdx) --&gt; &lt;Header&gt;, &lt;ChunkIndex&gt;</description></item>
+    /// <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    /// <item><description>ChunkIndex: See <see cref="CompressingStoredFieldsIndexWriter"/></description></item>
+    /// </list>
+    /// </description></item>
+    /// </list>
+    /// <para><b>Known limitations</b></para>
+    /// <para>This <see cref="StoredFieldsFormat"/> does not support individual documents
+    /// larger than (<c>2<sup>31</sup> - 2<sup>14</sup></c>) bytes. In case this
     /// is a problem, you should use another format, such as
-    /// <seealso cref="Lucene40StoredFieldsFormat"/>.</p>
+    /// <see cref="Lucene40StoredFieldsFormat"/>.</para>
     /// @lucene.experimental
     /// </summary>
     public sealed class Lucene41StoredFieldsFormat : CompressingStoredFieldsFormat


[05/48] lucenenet git commit: Lucene.Net.Search: Fixed up documentation comments

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Rescorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Rescorer.cs b/src/Lucene.Net/Search/Rescorer.cs
index ca94e73..72841fb 100644
--- a/src/Lucene.Net/Search/Rescorer.cs
+++ b/src/Lucene.Net/Search/Rescorer.cs
@@ -20,18 +20,18 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Re-scores the topN results (<seealso cref="TopDocs"/>) from an original
-    /// query.  See <seealso cref="QueryRescorer"/> for an actual
+    /// Re-scores the topN results (<see cref="TopDocs"/>) from an original
+    /// query.  See <see cref="QueryRescorer"/> for an actual
     /// implementation.  Typically, you run a low-cost
     /// first-pass query across the entire index, collecting the
     /// top few hundred hits perhaps, and then use this class to
     /// mix in a more costly second pass scoring.
     ///
-    /// <p>See {@link
-    /// QueryRescorer#rescore(IndexSearcher,TopDocs,Query,double,int)}
+    /// <para/>See 
+    /// <see cref="QueryRescorer.Rescore(IndexSearcher, TopDocs, Query, double, int)"/>
     /// for a simple static method to call to rescore using a 2nd
-    /// pass <seealso cref="Query"/>.
-    ///
+    /// pass <see cref="Query"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -40,9 +40,9 @@ namespace Lucene.Net.Search
     public abstract class Rescorer
     {
         /// <summary>
-        /// Rescore an initial first-pass <seealso cref="TopDocs"/>.
+        /// Rescore an initial first-pass <see cref="TopDocs"/>.
         /// </summary>
-        /// <param name="searcher"> <seealso cref="IndexSearcher"/> used to produce the
+        /// <param name="searcher"> <see cref="IndexSearcher"/> used to produce the
         ///   first pass topDocs </param>
         /// <param name="firstPassTopDocs"> Hits from the first pass
         ///   search.  It's very important that these hits were

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs b/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
index e86a809..d643e50 100644
--- a/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
+++ b/src/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
@@ -21,14 +21,15 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// A <seealso cref="scorer"/> which wraps another scorer and caches the score of the
-    /// current document. Successive calls to <seealso cref="#score()"/> will return the same
-    /// result and will not invoke the wrapped Scorer's score() method, unless the
-    /// current document has changed.<br>
-    /// this class might be useful due to the changes done to the <seealso cref="ICollector"/>
+    /// A <see cref="Scorer"/> which wraps another scorer and caches the score of the
+    /// current document. Successive calls to <see cref="GetScore()"/> will return the same
+    /// result and will not invoke the wrapped Scorer's GetScore() method, unless the
+    /// current document has changed.
+    /// <para/>
+    /// This class might be useful due to the changes done to the <see cref="ICollector"/>
     /// interface, in which the score is not computed for a document by default, only
     /// if the collector requests it. Some collectors may need to use the score in
-    /// several places, however all they have in hand is a <seealso cref="scorer"/> object, and
+    /// several places, however all they have in hand is a <see cref="Scorer"/> object, and
     /// might end up computing the score of a document more than once.
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ScoreDoc.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ScoreDoc.cs b/src/Lucene.Net/Search/ScoreDoc.cs
index 8f0bc7f..2cfc1d7 100644
--- a/src/Lucene.Net/Search/ScoreDoc.cs
+++ b/src/Lucene.Net/Search/ScoreDoc.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Search
      */
 
     /// <summary>
-    /// Holds one hit in <seealso cref="TopDocs"/>. </summary>
+    /// Holds one hit in <see cref="TopDocs"/>. </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -32,22 +32,22 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// A hit document's number. </summary>
-        /// <seealso cref= IndexSearcher#doc(int)  </seealso>
+        /// <seealso cref="IndexSearcher.Doc(int)"/>
         public int Doc { get; set; } // LUCENENET NOTE: For some reason, this was not readonly - should it be?
 
         /// <summary>
-        /// Only set by <seealso cref="TopDocs#merge"/> </summary>
+        /// Only set by <see cref="TopDocs.Merge(Sort, int, int, TopDocs[])"/> </summary>
         public int ShardIndex { get; set; } // LUCENENET NOTE: For some reason, this was not readonly - should it be?
 
         /// <summary>
-        /// Constructs a ScoreDoc. </summary>
+        /// Constructs a <see cref="ScoreDoc"/>. </summary>
         public ScoreDoc(int doc, float score)
             : this(doc, score, -1)
         {
         }
 
         /// <summary>
-        /// Constructs a ScoreDoc. </summary>
+        /// Constructs a <see cref="ScoreDoc"/>. </summary>
         public ScoreDoc(int doc, float score, int shardIndex)
         {
             this.Doc = doc;
@@ -55,7 +55,9 @@ namespace Lucene.Net.Search
             this.ShardIndex = shardIndex;
         }
 
-        // A convenience method for debugging.
+        /// <summary>
+        /// A convenience method for debugging.
+        /// </summary>
         public override string ToString()
         {
             return "doc=" + Doc + " score=" + Score + " shardIndex=" + ShardIndex;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Scorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Scorer.cs b/src/Lucene.Net/Search/Scorer.cs
index 4e13ecf..c8ad66b 100644
--- a/src/Lucene.Net/Search/Scorer.cs
+++ b/src/Lucene.Net/Search/Scorer.cs
@@ -25,20 +25,21 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Expert: Common scoring functionality for different types of queries.
     ///
-    /// <p>
-    /// A <code>Scorer</code> iterates over documents matching a
+    /// <para>
+    /// A <see cref="Scorer"/> iterates over documents matching a
     /// query in increasing order of doc Id.
-    /// </p>
-    /// <p>
-    /// Document scores are computed using a given <code>Similarity</code>
+    /// </para>
+    /// <para>
+    /// Document scores are computed using a given <see cref="Similarities.Similarity"/>
     /// implementation.
-    /// </p>
+    /// </para>
     ///
-    /// <p><b>NOTE</b>: The values Float.Nan,
-    /// Float.NEGATIVE_INFINITY and Float.POSITIVE_INFINITY are
-    /// not valid scores.  Certain collectors (eg {@link
-    /// TopScoreDocCollector}) will not properly collect hits
+    /// <para><b>NOTE</b>: The values <see cref="float.NaN"/>,
+    /// <see cref="float.NegativeInfinity"/> and <see cref="float.PositiveInfinity"/> are
+    /// not valid scores.  Certain collectors (eg 
+    /// <see cref="TopScoreDocCollector"/>) will not properly collect hits
     /// with these scores.
+    /// </para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -46,13 +47,13 @@ namespace Lucene.Net.Search
     public abstract class Scorer : DocsEnum
     {
         /// <summary>
-        /// the Scorer's parent Weight. in some cases this may be null </summary>
+        /// The <see cref="Scorer"/>'s parent <see cref="Weight"/>. In some cases this may be <c>null</c>. </summary>
         // TODO can we clean this up?
         protected internal readonly Weight m_weight;
 
         /// <summary>
-        /// Constructs a Scorer </summary>
-        /// <param name="weight"> The scorers <code>Weight</code>. </param>
+        /// Constructs a <see cref="Scorer"/> </summary>
+        /// <param name="weight"> The scorers <see cref="Weight"/>. </param>
         protected Scorer(Weight weight)
         {
             this.m_weight = weight;
@@ -60,14 +61,15 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns the score of the current document matching the query.
-        /// Initially invalid, until <seealso cref="#nextDoc()"/> or <seealso cref="#advance(int)"/>
+        /// Initially invalid, until <see cref="DocIdSetIterator.NextDoc()"/> or <see cref="DocIdSetIterator.Advance(int)"/>
         /// is called the first time, or when called from within
-        /// <seealso cref="ICollector#collect"/>.
+        /// <see cref="ICollector.Collect(int)"/>.
         /// </summary>
         public abstract float GetScore();
 
         /// <summary>
-        /// returns parent Weight
+        /// returns parent <see cref="Weight"/>
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public virtual Weight Weight
@@ -80,6 +82,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns child sub-scorers
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public virtual ICollection<ChildScorer> GetChildren()
@@ -88,8 +91,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// A child Scorer and its relationship to its parent.
-        /// the meaning of the relationship depends upon the parent query.
+        /// A child <see cref="Scorer"/> and its relationship to its parent.
+        /// The meaning of the relationship depends upon the parent query.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -98,7 +102,7 @@ namespace Lucene.Net.Search
         public class ChildScorer
         {
             /// <summary>
-            /// Child Scorer. (note this is typically a direct child, and may
+            /// Child <see cref="Scorer"/>. (note this is typically a direct child, and may
             /// itself also have children).
             /// </summary>
             public Scorer Child { get; private set; }
@@ -109,10 +113,10 @@ namespace Lucene.Net.Search
             public string Relationship { get; private set; }
 
             /// <summary>
-            /// Creates a new ChildScorer node with the specified relationship.
-            /// <p>
+            /// Creates a new <see cref="ChildScorer"/> node with the specified relationship.
+            /// <para/>
             /// The relationship can be any be any string that makes sense to
-            /// the parent Scorer.
+            /// the parent <see cref="Scorer"/>.
             /// </summary>
             public ChildScorer(Scorer child, string relationship)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/ScoringRewrite.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/ScoringRewrite.cs b/src/Lucene.Net/Search/ScoringRewrite.cs
index b5587d6..4f67ba8 100644
--- a/src/Lucene.Net/Search/ScoringRewrite.cs
+++ b/src/Lucene.Net/Search/ScoringRewrite.cs
@@ -35,8 +35,8 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Base rewrite method that translates each term into a query, and keeps
     /// the scores as computed by the query.
-    /// <p>
-    /// @lucene.internal Only public to be accessible by spans package.
+    /// <para/>
+    /// @lucene.internal - Only public to be accessible by spans package.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -45,18 +45,18 @@ namespace Lucene.Net.Search
     {
         /// <summary>
         /// A rewrite method that first translates each term into
-        ///  <seealso cref="Occur#SHOULD"/> clause in a
-        ///  BooleanQuery, and keeps the scores as computed by the
-        ///  query.  Note that typically such scores are
-        ///  meaningless to the user, and require non-trivial CPU
-        ///  to compute, so it's almost always better to use {@link
-        ///  MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} instead.
+        /// <see cref="Occur.SHOULD"/> clause in a
+        /// <see cref="BooleanQuery"/>, and keeps the scores as computed by the
+        /// query.  Note that typically such scores are
+        /// meaningless to the user, and require non-trivial CPU
+        /// to compute, so it's almost always better to use 
+        /// <see cref="MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT"/> instead.
         ///
-        ///  <p><b>NOTE</b>: this rewrite method will hit {@link
-        ///  BooleanQuery.TooManyClauses} if the number of terms
-        ///  exceeds <seealso cref="BooleanQuery#getMaxClauseCount"/>.
+        /// <para/><b>NOTE</b>: this rewrite method will hit 
+        /// <see cref="BooleanQuery.TooManyClausesException"/> if the number of terms
+        /// exceeds <see cref="BooleanQuery.MaxClauseCount"/>.
         /// </summary>
-        ///  <seealso cref= MultiTermQuery#setRewriteMethod  </seealso>
+        ///  <seealso cref="MultiTermQuery.MultiTermRewriteMethod"/>
         public static readonly ScoringRewrite<BooleanQuery> SCORING_BOOLEAN_QUERY_REWRITE = new ScoringRewriteAnonymousInnerClassHelper();
 
 #if FEATURE_SERIALIZABLE
@@ -90,16 +90,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Like <seealso cref="#SCORING_BOOLEAN_QUERY_REWRITE"/> except
-        ///  scores are not computed.  Instead, each matching
-        ///  document receives a constant score equal to the
-        ///  query's boost.
+        /// Like <see cref="SCORING_BOOLEAN_QUERY_REWRITE"/> except
+        /// scores are not computed.  Instead, each matching
+        /// document receives a constant score equal to the
+        /// query's boost.
         ///
-        ///  <p><b>NOTE</b>: this rewrite method will hit {@link
-        ///  BooleanQuery.TooManyClauses} if the number of terms
-        ///  exceeds <seealso cref="BooleanQuery#getMaxClauseCount"/>.
+        /// <para/><b>NOTE</b>: this rewrite method will hit 
+        /// <see cref="BooleanQuery.TooManyClausesException"/> if the number of terms
+        /// exceeds <see cref="BooleanQuery.MaxClauseCount"/>.
         /// </summary>
-        ///  <seealso cref= MultiTermQuery#setRewriteMethod  </seealso>
+        /// <seealso cref="MultiTermQuery.MultiTermRewriteMethod"/>
         public static readonly RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = new RewriteMethodAnonymousInnerClassHelper();
 
 #if FEATURE_SERIALIZABLE
@@ -122,8 +122,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// this method is called after every new term to check if the number of max clauses
-        /// (e.g. in BooleanQuery) is not exceeded. Throws the corresponding <seealso cref="RuntimeException"/>.
+        /// This method is called after every new term to check if the number of max clauses
+        /// (e.g. in <see cref="BooleanQuery"/>) is not exceeded. Throws the corresponding <see cref="Exception"/>.
         /// </summary>
         protected abstract void CheckMaxClauseCount(int count);
 
@@ -205,7 +205,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Special implementation of BytesStartArray that keeps parallel arrays for boost and docFreq </summary>
+        /// Special implementation of <see cref="BytesRefHash.BytesStartArray"/> that keeps parallel arrays for boost and docFreq </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
 #endif

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SearcherFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SearcherFactory.cs b/src/Lucene.Net/Search/SearcherFactory.cs
index 6a5fb62..6ddb386 100644
--- a/src/Lucene.Net/Search/SearcherFactory.cs
+++ b/src/Lucene.Net/Search/SearcherFactory.cs
@@ -22,25 +22,26 @@ namespace Lucene.Net.Search
     using IndexReader = Lucene.Net.Index.IndexReader;
 
     /// <summary>
-    /// Factory class used by <seealso cref="SearcherManager"/> to
-    /// create new IndexSearchers. The default implementation just creates
-    /// an IndexSearcher with no custom behavior:
+    /// Factory class used by <see cref="SearcherManager"/> to
+    /// create new <see cref="IndexSearcher"/>s. The default implementation just creates
+    /// an <see cref="IndexSearcher"/> with no custom behavior:
     ///
-    /// <pre class="prettyprint">
-    ///   public IndexSearcher newSearcher(IndexReader r) throws IOException {
-    ///     return new IndexSearcher(r);
-    ///   }
-    /// </pre>
+    /// <code>
+    ///     public IndexSearcher NewSearcher(IndexReader r)
+    ///     {
+    ///         return new IndexSearcher(r);
+    ///     }
+    /// </code>
     ///
     /// You can pass your own factory instead if you want custom behavior, such as:
-    /// <ul>
-    ///   <li>Setting a custom scoring model: <seealso cref="IndexSearcher#setSimilarity(Similarity)"/>
-    ///   <li>Parallel per-segment search: <seealso cref="IndexSearcher#IndexSearcher(IndexReader, ExecutorService)"/>
-    ///   <li>Return custom subclasses of IndexSearcher (for example that implement distributed scoring)
-    ///   <li>Run queries to warm your IndexSearcher before it is used. Note: when using near-realtime search
-    ///       you may want to also <seealso cref="IndexWriterConfig#setMergedSegmentWarmer(IndexWriter.IndexReaderWarmer)"/> to warm
-    ///       newly merged segments in the background, outside of the reopen path.
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description>Setting a custom scoring model: <see cref="IndexSearcher.Similarity"/></description></item>
+    ///   <item><description>Parallel per-segment search: <see cref="IndexSearcher.IndexSearcher(IndexReader, System.Threading.Tasks.TaskScheduler)"/></description></item>
+    ///   <item><description>Return custom subclasses of <see cref="IndexSearcher"/> (for example that implement distributed scoring)</description></item>
+    ///   <item><description>Run queries to warm your <see cref="IndexSearcher"/> before it is used. Note: when using near-realtime search
+    ///       you may want to also set <see cref="Index.LiveIndexWriterConfig.MergedSegmentWarmer"/> to warm
+    ///       newly merged segments in the background, outside of the reopen path.</description></item>
+    /// </list>
     /// @lucene.experimental
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -49,7 +50,7 @@ namespace Lucene.Net.Search
     public class SearcherFactory
     {
         /// <summary>
-        /// Returns a new IndexSearcher over the given reader.
+        /// Returns a new <see cref="IndexSearcher"/> over the given reader.
         /// </summary>
         public virtual IndexSearcher NewSearcher(IndexReader reader)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SearcherLifetimeManager.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SearcherLifetimeManager.cs b/src/Lucene.Net/Search/SearcherLifetimeManager.cs
index b4585d1..0090339 100644
--- a/src/Lucene.Net/Search/SearcherLifetimeManager.cs
+++ b/src/Lucene.Net/Search/SearcherLifetimeManager.cs
@@ -28,62 +28,68 @@ namespace Lucene.Net.Search
     using IOUtils = Lucene.Net.Util.IOUtils;
 
     /// <summary>
-    /// Keeps track of current plus old IndexSearchers, closing
+    /// Keeps track of current plus old <see cref="IndexSearcher"/>s, disposing
     /// the old ones once they have timed out.
     ///
     /// Use it like this:
     ///
-    /// <pre class="prettyprint">
-    ///   SearcherLifetimeManager mgr = new SearcherLifetimeManager();
-    /// </pre>
+    /// <code>
+    ///     SearcherLifetimeManager mgr = new SearcherLifetimeManager();
+    /// </code>
     ///
     /// Per search-request, if it's a "new" search request, then
     /// obtain the latest searcher you have (for example, by
-    /// using <seealso cref="SearcherManager"/>), and then record this
+    /// using <see cref="SearcherManager"/>), and then record this
     /// searcher:
     ///
-    /// <pre class="prettyprint">
-    ///   // Record the current searcher, and save the returend
-    ///   // token into user's search results (eg as a  hidden
-    ///   // HTML form field):
-    ///   long token = mgr.record(searcher);
-    /// </pre>
+    /// <code>
+    ///     // Record the current searcher, and save the returend
+    ///     // token into user's search results (eg as a  hidden
+    ///     // HTML form field):
+    ///     long token = mgr.Record(searcher);
+    /// </code>
     ///
     /// When a follow-up search arrives, for example the user
     /// clicks next page, drills down/up, etc., take the token
     /// that you saved from the previous search and:
     ///
-    /// <pre class="prettyprint">
-    ///   // If possible, obtain the same searcher as the last
-    ///   // search:
-    ///   IndexSearcher searcher = mgr.acquire(token);
-    ///   if (searcher != null) {
-    ///     // Searcher is still here
-    ///     try {
-    ///       // do searching...
-    ///     } finally {
-    ///       mgr.release(searcher);
-    ///       // Do not use searcher after this!
-    ///       searcher = null;
+    /// <code>
+    ///     // If possible, obtain the same searcher as the last
+    ///     // search:
+    ///     IndexSearcher searcher = mgr.Acquire(token);
+    ///     if (searcher != null) 
+    ///     {
+    ///         // Searcher is still here
+    ///         try 
+    ///         {
+    ///             // do searching...
+    ///         } 
+    ///         finally 
+    ///         {
+    ///             mgr.Release(searcher);
+    ///             // Do not use searcher after this!
+    ///             searcher = null;
+    ///         }
+    ///     } 
+    ///     else 
+    ///     {
+    ///         // Searcher was pruned -- notify user session timed
+    ///         // out, or, pull fresh searcher again
     ///     }
-    ///   } else {
-    ///     // Searcher was pruned -- notify user session timed
-    ///     // out, or, pull fresh searcher again
-    ///   }
-    /// </pre>
+    /// </code>
     ///
     /// Finally, in a separate thread, ideally the same thread
     /// that's periodically reopening your searchers, you should
     /// periodically prune old searchers:
     ///
-    /// <pre class="prettyprint">
-    ///   mgr.prune(new PruneByAge(600.0));
-    /// </pre>
+    /// <code>
+    ///     mgr.Prune(new PruneByAge(600.0));
+    /// </code>
     ///
-    /// <p><b>NOTE</b>: keeping many searchers around means
+    /// <para><b>NOTE</b>: keeping many searchers around means
     /// you'll use more resources (open files, RAM) than a single
-    /// searcher.  However, as long as you are using {@link
-    /// DirectoryReader#openIfChanged(DirectoryReader)}, the searchers
+    /// searcher.  However, as long as you are using 
+    /// <see cref="DirectoryReader.OpenIfChanged(DirectoryReader)"/>, the searchers
     /// will usually share almost all segments and the added resource usage
     /// is contained.  When a large merge has completed, and
     /// you reopen, because that is a large change, the new
@@ -91,7 +97,7 @@ namespace Lucene.Net.Search
     /// searchers; but large merges don't complete very often and
     /// it's unlikely you'll hit two of them in your expiration
     /// window.  Still you should budget plenty of heap in the
-    /// JVM to have a good safety margin.</p>
+    /// runtime to have a good safety margin.</para>
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -150,18 +156,18 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Records that you are now using this IndexSearcher.
-        ///  Always call this when you've obtained a possibly new
-        ///  <seealso cref="IndexSearcher"/>, for example from {@link
-        ///  SearcherManager}.  It's fine if you already passed the
-        ///  same searcher to this method before.
+        /// Records that you are now using this <see cref="IndexSearcher"/>.
+        /// Always call this when you've obtained a possibly new
+        /// <see cref="IndexSearcher"/>, for example from 
+        /// <see cref="SearcherManager"/>.  It's fine if you already passed the
+        /// same searcher to this method before.
         ///
-        ///  <p>this returns the long token that you can later pass
-        ///  to <seealso cref="#acquire"/> to retrieve the same IndexSearcher.
-        ///  You should record this long token in the search results
-        ///  sent to your user, such that if the user performs a
-        ///  follow-on action (clicks next page, drills down, etc.)
-        ///  the token is returned.</p>
+        /// <para>This returns the <see cref="long"/> token that you can later pass
+        /// to <see cref="Acquire(long)"/> to retrieve the same <see cref="IndexSearcher"/>.
+        /// You should record this <see cref="long"/> token in the search results
+        /// sent to your user, such that if the user performs a
+        /// follow-on action (clicks next page, drills down, etc.)
+        /// the token is returned.</para>
         /// </summary>
         public virtual long Record(IndexSearcher searcher)
         {
@@ -181,18 +187,18 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Retrieve a previously recorded <seealso cref="IndexSearcher"/>, if it
-        ///  has not yet been closed
+        /// Retrieve a previously recorded <see cref="IndexSearcher"/>, if it
+        /// has not yet been closed.
         ///
-        ///  <p><b>NOTE</b>: this may return null when the
-        ///  requested searcher has already timed out.  When this
-        ///  happens you should notify your user that their session
-        ///  timed out and that they'll have to restart their
-        ///  search.</p>
+        /// <para><b>NOTE</b>: this may return <c>null</c> when the
+        /// requested searcher has already timed out.  When this
+        /// happens you should notify your user that their session
+        /// timed out and that they'll have to restart their
+        /// search.</para>
         ///
-        ///  <p>If this returns a non-null result, you must match
-        ///  later call <seealso cref="#release"/> on this searcher, best
-        ///  from a finally clause.</p>
+        /// <para>If this returns a non-null result, you must match
+        /// later call <see cref="Release(IndexSearcher)"/> on this searcher, best
+        /// from a finally clause.</para>
         /// </summary>
         public virtual IndexSearcher Acquire(long version)
         {
@@ -207,10 +213,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Release a searcher previously obtained from {@link
-        ///  #acquire}.
+        /// Release a searcher previously obtained from 
+        /// <see cref="Acquire(long)"/>.
         ///
-        /// <p><b>NOTE</b>: it's fine to call this after close.
+        /// <para/><b>NOTE</b>: it's fine to call this after Dispose().
         /// </summary>
         public virtual void Release(IndexSearcher s)
         {
@@ -218,22 +224,21 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// See <seealso cref="#prune"/>. </summary>
+        /// See <see cref="Prune(IPruner)"/>. </summary>
         public interface IPruner
         {
             /// <summary>
-            /// Return true if this searcher should be removed. </summary>
-            ///  <param name="ageSec"> how much time has passed since this
+            /// Return <c>true</c> if this searcher should be removed. </summary>
+            ///  <param name="ageSec"> How much time has passed since this
             ///         searcher was the current (live) searcher </param>
-            ///  <param name="searcher"> Searcher
-            ///  </param>
+            ///  <param name="searcher"> Searcher </param>
             bool DoPrune(double ageSec, IndexSearcher searcher);
         }
 
         /// <summary>
         /// Simple pruner that drops any searcher older by
-        ///  more than the specified seconds, than the newest
-        ///  searcher.
+        /// more than the specified seconds, than the newest
+        /// searcher.
         /// </summary>
 #if FEATURE_SERIALIZABLE
         [Serializable]
@@ -258,13 +263,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Calls provided <seealso cref="IPruner"/> to prune entries.  The
-        ///  entries are passed to the Pruner in sorted (newest to
-        ///  oldest IndexSearcher) order.
+        /// Calls provided <see cref="IPruner"/> to prune entries.  The
+        /// entries are passed to the <see cref="IPruner"/> in sorted (newest to
+        /// oldest <see cref="IndexSearcher"/>) order.
         ///
-        ///  <p><b>NOTE</b>: you must peridiocally call this, ideally
-        ///  from the same background thread that opens new
-        ///  searchers.
+        /// <para/><b>NOTE</b>: you must peridiocally call this, ideally
+        /// from the same background thread that opens new
+        /// searchers.
         /// </summary>
         public virtual void Prune(IPruner pruner)
         {
@@ -307,14 +312,14 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Close this to future searching; any searches still in
-        ///  process in other threads won't be affected, and they
-        ///  should still call <seealso cref="#release"/> after they are
-        ///  done.
+        /// process in other threads won't be affected, and they
+        /// should still call <see cref="Release(IndexSearcher)"/> after they are
+        /// done.
         ///
-        ///  <p><b>NOTE</b>: you must ensure no other threads are
-        ///  calling <seealso cref="#record"/> while you call close();
-        ///  otherwise it's possible not all searcher references
-        ///  will be freed.
+        /// <para/><b>NOTE</b>: you must ensure no other threads are
+        /// calling <see cref="Record(IndexSearcher)"/> while you call Dispose();
+        /// otherwise it's possible not all searcher references
+        /// will be freed.
         /// </summary>
         public virtual void Dispose()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SearcherManager.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SearcherManager.cs b/src/Lucene.Net/Search/SearcherManager.cs
index 4914047..b86743d 100644
--- a/src/Lucene.Net/Search/SearcherManager.cs
+++ b/src/Lucene.Net/Search/SearcherManager.cs
@@ -26,35 +26,38 @@ namespace Lucene.Net.Search
     using IndexWriter = Lucene.Net.Index.IndexWriter;
 
     /// <summary>
-    /// Utility class to safely share <seealso cref="IndexSearcher"/> instances across multiple
-    /// threads, while periodically reopening. this class ensures each searcher is
-    /// closed only once all threads have finished using it.
+    /// Utility class to safely share <see cref="IndexSearcher"/> instances across multiple
+    /// threads, while periodically reopening. This class ensures each searcher is
+    /// disposed only once all threads have finished using it.
     ///
-    /// <p>
-    /// Use <seealso cref="#acquire"/> to obtain the current searcher, and <seealso cref="#release"/> to
+    /// <para/>
+    /// Use <see cref="ReferenceManager{G}.Acquire()"/> to obtain the current searcher, and <see cref="ReferenceManager{G}.Release(G)"/> to
     /// release it, like this:
     ///
-    /// <pre class="prettyprint">
-    /// IndexSearcher s = manager.acquire();
-    /// try {
-    ///   // Do searching, doc retrieval, etc. with s
-    /// } finally {
-    ///   manager.release(s);
+    /// <code>
+    /// IndexSearcher s = manager.Acquire();
+    /// try 
+    /// {
+    ///     // Do searching, doc retrieval, etc. with s
+    /// } 
+    /// finally 
+    /// {
+    ///     manager.Release(s);
+    ///     // Do not use s after this!
+    ///     s = null;
     /// }
-    /// // Do not use s after this!
-    /// s = null;
-    /// </pre>
+    /// </code>
     ///
-    /// <p>
-    /// In addition you should periodically call <seealso cref="#maybeRefresh"/>. While it's
+    /// <para/>
+    /// In addition you should periodically call <see cref="ReferenceManager{G}.MaybeRefresh()"/>. While it's
     /// possible to call this just before running each query, this is discouraged
     /// since it penalizes the unlucky queries that do the reopen. It's better to use
-    /// a separate background thread, that periodically calls maybeReopen. Finally,
-    /// be sure to call <seealso cref="#close"/> once you are done.
+    /// a separate background thread, that periodically calls <see cref="ReferenceManager{G}.MaybeRefresh()"/>. Finally,
+    /// be sure to call <see cref="ReferenceManager{G}.Dispose()"/> once you are done.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     /// <seealso cref="SearcherFactory"/>
-    ///
-    /// @lucene.experimental </seealso>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -63,26 +66,26 @@ namespace Lucene.Net.Search
         private readonly SearcherFactory searcherFactory;
 
         /// <summary>
-        /// Creates and returns a new SearcherManager from the given
-        /// <seealso cref="IndexWriter"/>.
+        /// Creates and returns a new <see cref="SearcherManager"/> from the given
+        /// <see cref="IndexWriter"/>.
         /// </summary>
         /// <param name="writer">
-        ///          the IndexWriter to open the IndexReader from. </param>
+        ///          The <see cref="IndexWriter"/> to open the <see cref="IndexReader"/> from. </param>
         /// <param name="applyAllDeletes">
-        ///          If <code>true</code>, all buffered deletes will be applied (made
-        ///          visible) in the <seealso cref="IndexSearcher"/> / <seealso cref="DirectoryReader"/>.
-        ///          If <code>false</code>, the deletes may or may not be applied, but
-        ///          remain buffered (in IndexWriter) so that they will be applied in
+        ///          If <c>true</c>, all buffered deletes will be applied (made
+        ///          visible) in the <see cref="IndexSearcher"/> / <see cref="DirectoryReader"/>.
+        ///          If <c>false</c>, the deletes may or may not be applied, but
+        ///          remain buffered (in <see cref="IndexWriter"/>) so that they will be applied in
         ///          the future. Applying deletes can be costly, so if your app can
         ///          tolerate deleted documents being returned you might gain some
-        ///          performance by passing <code>false</code>. See
-        ///          <seealso cref="DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)"/>. </param>
+        ///          performance by passing <c>false</c>. See
+        ///          <see cref="DirectoryReader.OpenIfChanged(DirectoryReader, IndexWriter, bool)"/>. </param>
         /// <param name="searcherFactory">
-        ///          An optional <see cref="SearcherFactory"/>. Pass <code>null</code> if you
+        ///          An optional <see cref="SearcherFactory"/>. Pass <c>null</c> if you
         ///          don't require the searcher to be warmed before going live or other
         ///          custom behavior.
         /// </param>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception>
         public SearcherManager(IndexWriter writer, bool applyAllDeletes, SearcherFactory searcherFactory)
         {
             if (searcherFactory == null)
@@ -94,13 +97,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Creates and returns a new SearcherManager from the given <seealso cref="Directory"/>. </summary>
-        /// <param name="dir"> the directory to open the DirectoryReader on. </param>
+        /// Creates and returns a new <see cref="SearcherManager"/> from the given <see cref="Directory"/>. </summary>
+        /// <param name="dir"> The directory to open the <see cref="DirectoryReader"/> on. </param>
         /// <param name="searcherFactory"> An optional <see cref="SearcherFactory"/>. Pass
-        ///        <code>null</code> if you don't require the searcher to be warmed
+        ///        <c>null</c> if you don't require the searcher to be warmed
         ///        before going live or other custom behavior.
         /// </param>
-        /// <exception cref="IOException"> if there is a low-level I/O error </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public SearcherManager(Directory dir, SearcherFactory searcherFactory)
         {
             if (searcherFactory == null)
@@ -142,9 +145,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if no changes have occured since this searcher
-        /// ie. reader was opened, otherwise <code>false</code>. </summary>
-        /// <seealso cref= DirectoryReader#isCurrent()  </seealso>
+        /// Returns <c>true</c> if no changes have occured since this searcher
+        /// ie. reader was opened, otherwise <c>false</c>. </summary>
+        /// <seealso cref="DirectoryReader.IsCurrent()"/>
         public bool IsSearcherCurrent()
         {
             IndexSearcher searcher = Acquire();
@@ -161,9 +164,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: creates a searcher from the provided {@link
-        ///  IndexReader} using the provided {@link
-        ///  SearcherFactory}.  NOTE: this decRefs incoming reader
+        /// Expert: creates a searcher from the provided 
+        /// <see cref="IndexReader"/> using the provided 
+        /// <see cref="SearcherFactory"/>.  NOTE: this decRefs incoming reader
         /// on throwing an exception.
         /// </summary>
         public static IndexSearcher GetSearcher(SearcherFactory searcherFactory, IndexReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SloppyPhraseScorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SloppyPhraseScorer.cs b/src/Lucene.Net/Search/SloppyPhraseScorer.cs
index 6665a45..7dcde13 100644
--- a/src/Lucene.Net/Search/SloppyPhraseScorer.cs
+++ b/src/Lucene.Net/Search/SloppyPhraseScorer.cs
@@ -86,14 +86,14 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Score a candidate doc for all slop-valid position-combinations (matches)
         /// encountered while traversing/hopping the PhrasePositions.
-        /// <br> The score contribution of a match depends on the distance:
-        /// <br> - highest score for distance=0 (exact match).
-        /// <br> - score gets lower as distance gets higher.
-        /// <br>Example: for query "a b"~2, a document "x a b a y" can be scored twice:
+        /// <para/> The score contribution of a match depends on the distance:
+        /// <para/> - highest score for distance=0 (exact match).
+        /// <para/> - score gets lower as distance gets higher.
+        /// <para/>Example: for query "a b"~2, a document "x a b a y" can be scored twice:
         /// once for "a b" (distance=0), and once for "b a" (distance=2).
-        /// <br>Possibly not all valid combinations are encountered, because for efficiency
-        /// we always propagate the least PhrasePosition. this allows to base on
-        /// PriorityQueue and move forward faster.
+        /// <para/>Possibly not all valid combinations are encountered, because for efficiency
+        /// we always propagate the least PhrasePosition. This allows to base on
+        /// <see cref="Util.PriorityQueue{T}"/> and move forward faster.
         /// As result, for example, document "a b c b a"
         /// would score differently for queries "a b c"~4 and "c b a"~4, although
         /// they really are equivalent.
@@ -148,7 +148,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// advance a PhrasePosition and update 'end', return false if exhausted </summary>
+        /// Advance a PhrasePosition and update 'end', return false if exhausted </summary>
         private bool AdvancePP(PhrasePositions pp)
         {
             if (!pp.NextPosition())
@@ -213,7 +213,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// compare two pps, but only by position and offset </summary>
+        /// Compare two pps, but only by position and offset </summary>
         private PhrasePositions Lesser(PhrasePositions pp, PhrasePositions pp2)
         {
             if (pp.position < pp2.position || (pp.position == pp2.position && pp.offset < pp2.offset))
@@ -224,7 +224,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// index of a pp2 colliding with pp, or -1 if none </summary>
+        /// Index of a pp2 colliding with pp, or -1 if none </summary>
         private int Collide(PhrasePositions pp)
         {
             int tpPos = TpPos(pp);
@@ -241,19 +241,20 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Initialize PhrasePositions in place.
+        /// Initialize <see cref="PhrasePositions"/> in place.
         /// A one time initialization for this scorer (on first doc matching all terms):
-        /// <ul>
-        ///  <li>Check if there are repetitions
-        ///  <li>If there are, find groups of repetitions.
-        /// </ul>
+        /// <list type="bullet">
+        ///     <item><description>Check if there are repetitions</description></item>
+        ///     <item><description>If there are, find groups of repetitions.</description></item>
+        /// </list>
         /// Examples:
-        /// <ol>
-        ///  <li>no repetitions: <b>"ho my"~2</b>
-        ///  <li>repetitions: <b>"ho my my"~2</b>
-        ///  <li>repetitions: <b>"my ho my"~2</b>
-        /// </ol> </summary>
-        /// <returns> false if PPs are exhausted (and so current doc will not be a match)  </returns>
+        /// <list type="number">
+        ///     <item><description>no repetitions: <b>"ho my"~2</b></description></item>
+        ///     <item><description>>repetitions: <b>"ho my my"~2</b></description></item>
+        ///     <item><description>repetitions: <b>"my ho my"~2</b></description></item>
+        /// </list> 
+        /// </summary>
+        /// <returns> <c>false</c> if PPs are exhausted (and so current doc will not be a match)  </returns>
         private bool InitPhrasePositions()
         {
             end = int.MinValue;
@@ -270,7 +271,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// no repeats: simplest case, and most common. It is important to keep this piece of the code simple and efficient </summary>
+        /// No repeats: simplest case, and most common. It is important to keep this piece of the code simple and efficient </summary>
         private void InitSimple()
         {
             //System.err.println("initSimple: doc: "+min.doc);
@@ -288,7 +289,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// with repeats: not so simple. </summary>
+        /// With repeats: not so simple. </summary>
         private bool InitComplex()
         {
             //System.err.println("initComplex: doc: "+min.doc);
@@ -302,7 +303,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// move all PPs to their first position </summary>
+        /// Move all PPs to their first position </summary>
         private void PlaceFirstPositions()
         {
             for (PhrasePositions pp = min, prev = null; prev != max; pp = (prev = pp).next) // iterate cyclic list: done once handled max
@@ -312,7 +313,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Fill the queue (all pps are already placed </summary>
+        /// Fill the queue (all pps are already placed) </summary>
         private void FillQueue()
         {
             pq.Clear();
@@ -329,12 +330,13 @@ namespace Lucene.Net.Search
         /// <summary>
         /// At initialization (each doc), each repetition group is sorted by (query) offset.
         /// this provides the start condition: no collisions.
-        /// <p>Case 1: no multi-term repeats<br>
+        /// <para/>Case 1: no multi-term repeats
+        /// <para/>
         /// It is sufficient to advance each pp in the group by one less than its group index.
         /// So lesser pp is not advanced, 2nd one advance once, 3rd one advanced twice, etc.
-        /// <p>Case 2: multi-term repeats<br>
+        /// <para/>Case 2: multi-term repeats
         /// </summary>
-        /// <returns> false if PPs are exhausted.  </returns>
+        /// <returns> <c>false</c> if PPs are exhausted.  </returns>
         private bool AdvanceRepeatGroups()
         {
             foreach (PhrasePositions[] rg in rptGroups)
@@ -382,15 +384,23 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// initialize with checking for repeats. Heavy work, but done only for the first candidate doc.<p>
-        /// If there are repetitions, check if multi-term postings (MTP) are involved.<p>
-        /// Without MTP, once PPs are placed in the first candidate doc, repeats (and groups) are visible.<br>
-        /// With MTP, a more complex check is needed, up-front, as there may be "hidden collisions".<br>
+        /// Initialize with checking for repeats. Heavy work, but done only for the first candidate doc.
+        /// <para/>
+        /// If there are repetitions, check if multi-term postings (MTP) are involved.
+        /// <para/>
+        /// Without MTP, once PPs are placed in the first candidate doc, repeats (and groups) are visible.
+        /// <para/>
+        /// With MTP, a more complex check is needed, up-front, as there may be "hidden collisions".
+        /// <para/>
         /// For example P1 has {A,B}, P1 has {B,C}, and the first doc is: "A C B". At start, P1 would point
-        /// to "A", p2 to "C", and it will not be identified that P1 and P2 are repetitions of each other.<p>
-        /// The more complex initialization has two parts:<br>
-        /// (1) identification of repetition groups.<br>
-        /// (2) advancing repeat groups at the start of the doc.<br>
+        /// to "A", p2 to "C", and it will not be identified that P1 and P2 are repetitions of each other.
+        /// <para/>
+        /// The more complex initialization has two parts:
+        /// <para/>
+        /// (1) identification of repetition groups.
+        /// <para/>
+        /// (2) advancing repeat groups at the start of the doc.
+        /// <para/>
         /// For (1), a possible solution is to just create a single repetition group,
         /// made of all repeating pps. But this would slow down the check for collisions,
         /// as all pps would need to be checked. Instead, we compute "connected regions"
@@ -421,7 +431,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// sort each repetition group by (query) offset.
+        /// Sort each repetition group by (query) offset.
         /// Done only once (at first doc) and allows to initialize faster for each doc.
         /// </summary>
         private void SortRptGroups(IList<IList<PhrasePositions>> rgs)
@@ -459,7 +469,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Detect repetition groups. Done once - for first doc </summary>
+        /// Detect repetition groups. Done once - for first doc. </summary>
         private IList<IList<PhrasePositions>> GatherRptGroups(LinkedHashMap<Term, int?> rptTerms)
         {
             PhrasePositions[] rpp = RepeatingPPs(rptTerms);
@@ -538,7 +548,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// find repeating terms and assign them ordinal values </summary>
+        /// Find repeating terms and assign them ordinal values </summary>
         private LinkedHashMap<Term, int?> RepeatingTerms()
         {
             LinkedHashMap<Term, int?> tord = new LinkedHashMap<Term, int?>();
@@ -561,7 +571,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// find repeating pps, and for each, if has multi-terms, update this.hasMultiTermRpts </summary>
+        /// Find repeating pps, and for each, if has multi-terms, update this.hasMultiTermRpts </summary>
         private PhrasePositions[] RepeatingPPs(HashMap<Term, int?> rptTerms)
         {
             List<PhrasePositions> rp = new List<PhrasePositions>();
@@ -599,7 +609,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// union (term group) bit-sets until they are disjoint (O(n^^2)), and each group have different terms </summary>
+        /// Union (term group) bit-sets until they are disjoint (O(n^^2)), and each group have different terms </summary>
         private void UnionTermGroups(IList<FixedBitSet> bb)
         {
             int incr;
@@ -624,7 +634,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// map each term to the single group that contains it </summary>
+        /// Map each term to the single group that contains it </summary>
         private IDictionary<Term, int> TermGroups(LinkedHashMap<Term, int?> tord, IList<FixedBitSet> bb)
         {
             Dictionary<Term, int> tg = new Dictionary<Term, int>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/Sort.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/Sort.cs b/src/Lucene.Net/Search/Sort.cs
index 9efd341..369f4bd 100644
--- a/src/Lucene.Net/Search/Sort.cs
+++ b/src/Lucene.Net/Search/Sort.cs
@@ -24,77 +24,77 @@ namespace Lucene.Net.Search
     /// <summary>
     /// Encapsulates sort criteria for returned hits.
     ///
-    /// <p>The fields used to determine sort order must be carefully chosen.
-    /// Documents must contain a single term in such a field,
+    /// <para/>The fields used to determine sort order must be carefully chosen.
+    /// <see cref="Documents.Document"/>s must contain a single term in such a field,
     /// and the value of the term should indicate the document's relative position in
     /// a given sort order.  The field must be indexed, but should not be tokenized,
     /// and does not need to be stored (unless you happen to want it back with the
     /// rest of your document data).  In other words:
     ///
-    /// <p><code>document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.NOT_ANALYZED));</code></p>
+    /// <para/><code>document.Add(new Field("byNumber", x.ToString(CultureInfo.InvariantCulture), Field.Store.NO, Field.Index.NOT_ANALYZED));</code>
     ///
     ///
-    /// <p><h3>Valid Types of Values</h3>
+    /// <para/><h3>Valid Types of Values</h3>
     ///
-    /// <p>There are four possible kinds of term values which may be put into
-    /// sorting fields: Integers, Longs, Floats, or Strings.  Unless
-    /// <seealso cref="SortField SortField"/> objects are specified, the type of value
+    /// <para/>There are four possible kinds of term values which may be put into
+    /// sorting fields: <see cref="int"/>s, <see cref="long"/>s, <see cref="float"/>s, or <see cref="string"/>s.  Unless
+    /// <see cref="SortField"/> objects are specified, the type of value
     /// in the field is determined by parsing the first term in the field.
     ///
-    /// <p>Integer term values should contain only digits and an optional
+    /// <para/><see cref="int"/> term values should contain only digits and an optional
     /// preceding negative sign.  Values must be base 10 and in the range
     /// <see cref="int.MinValue"/> and <see cref="int.MaxValue"/> inclusive.
     /// Documents which should appear first in the sort
     /// should have low value integers, later documents high values
-    /// (i.e. the documents should be numbered <code>1..n</code> where
-    /// <code>1</code> is the first and <code>n</code> the last).
+    /// (i.e. the documents should be numbered <c>1..n</c> where
+    /// <c>1</c> is the first and <c>n</c> the last).
     ///
-    /// <p>Long term values should contain only digits and an optional
+    /// <para/><see cref="long"/> term values should contain only digits and an optional
     /// preceding negative sign.  Values must be base 10 and in the range
-    /// <code>Long.MIN_VALUE</code> and <code>Long.MAX_VALUE</code> inclusive.
+    /// <see cref="long.MinValue"/> and <see cref="long.MaxValue"/> inclusive.
     /// Documents which should appear first in the sort
     /// should have low value integers, later documents high values.
     ///
-    /// <p>Float term values should conform to values accepted by
-    /// <seealso cref="Float Float.valueOf(String)"/> (except that <code>NaN</code>
-    /// and <code>Infinity</code> are not supported).
-    /// Documents which should appear first in the sort
+    /// <para/><see cref="float"/> term values should conform to values accepted by
+    /// <see cref="Single"/> (except that <c>NaN</c>
+    /// and <c>Infinity</c> are not supported).
+    /// <see cref="Documents.Document"/>s which should appear first in the sort
     /// should have low values, later documents high values.
     ///
-    /// <p>String term values can contain any valid String, but should
+    /// <para/><see cref="string"/> term values can contain any valid <see cref="string"/>, but should
     /// not be tokenized.  The values are sorted according to their
-    /// <seealso cref="Comparable natural order"/>.  Note that using this type
+    /// comparable natural order (<see cref="StringComparer.Ordinal"/>).  Note that using this type
     /// of term value has higher memory requirements than the other
     /// two types.
     ///
-    /// <p><h3>Object Reuse</h3>
+    /// <para/><h3>Object Reuse</h3>
     ///
-    /// <p>One of these objects can be
+    /// <para/>One of these objects can be
     /// used multiple times and the sort order changed between usages.
     ///
-    /// <p>this class is thread safe.
+    /// <para/>This class is thread safe.
     ///
-    /// <p><h3>Memory Usage</h3>
+    /// <para/><h3>Memory Usage</h3>
     ///
-    /// <p>Sorting uses of caches of term values maintained by the
-    /// internal HitQueue(s).  The cache is static and contains an integer
-    /// or float array of length <code>IndexReader.maxDoc()</code> for each field
+    /// <para/>Sorting uses of caches of term values maintained by the
+    /// internal HitQueue(s).  The cache is static and contains an <see cref="int"/>
+    /// or <see cref="float"/> array of length <c>IndexReader.MaxDoc</c> for each field
     /// name for which a sort is performed.  In other words, the size of the
     /// cache in bytes is:
     ///
-    /// <p><code>4 * IndexReader.maxDoc() * (# of different fields actually used to sort)</code>
+    /// <para/><code>4 * IndexReader.MaxDoc * (# of different fields actually used to sort)</code>
     ///
-    /// <p>For String fields, the cache is larger: in addition to the
+    /// <para/>For <see cref="string"/> fields, the cache is larger: in addition to the
     /// above array, the value of every term in the field is kept in memory.
     /// If there are many unique terms in the field, this could
     /// be quite large.
     ///
-    /// <p>Note that the size of the cache is not affected by how many
+    /// <para/>Note that the size of the cache is not affected by how many
     /// fields are in the index and <i>might</i> be used to sort - only by
     /// the ones actually used to sort a result set.
     ///
-    /// <p>Created: Feb 12, 2004 10:53:57 AM
-    ///
+    /// <para/>Created: Feb 12, 2004 10:53:57 AM
+    /// <para/>
     /// @since   lucene 1.4
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -105,7 +105,7 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Represents sorting by computed relevance. Using this sort criteria returns
         /// the same results as calling
-        /// <seealso cref="IndexSearcher#search(Query,int) IndexSearcher#search()"/>without a sort criteria,
+        /// <see cref="IndexSearcher.Search(Query, int)"/>without a sort criteria,
         /// only with slightly more overhead.
         /// </summary>
         public static readonly Sort RELEVANCE = new Sort();
@@ -118,8 +118,8 @@ namespace Lucene.Net.Search
         internal SortField[] fields;
 
         /// <summary>
-        /// Sorts by computed relevance. this is the same sort criteria as calling
-        /// <seealso cref="IndexSearcher#search(Query,int) IndexSearcher#search()"/>without a sort criteria,
+        /// Sorts by computed relevance. This is the same sort criteria as calling
+        /// <see cref="IndexSearcher.Search(Query, int)"/> without a sort criteria,
         /// only with slightly more overhead.
         /// </summary>
         public Sort()
@@ -128,14 +128,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Sorts by the criteria in the given SortField. </summary>
+        /// Sorts by the criteria in the given <see cref="SortField"/>. </summary>
         public Sort(SortField field)
         {
             SetSort(field);
         }
 
         /// <summary>
-        /// Sorts in succession by the criteria in each SortField. </summary>
+        /// Sorts in succession by the criteria in each <see cref="SortField"/>. </summary>
         public Sort(params SortField[] fields)
         {
             SetSort(fields);
@@ -154,7 +154,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary> Representation of the sort criteria.</summary>
-        /// <returns> Array of SortField objects used in this sort criteria
+        /// <returns> Array of <see cref="SortField"/> objects used in this sort criteria
         /// </returns>
         [WritableArray]
         public virtual SortField[] GetSort()
@@ -163,14 +163,15 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Rewrites the SortFields in this Sort, returning a new Sort if any of the fields
+        /// Rewrites the <see cref="SortField"/>s in this <see cref="Sort"/>, returning a new <see cref="Sort"/> if any of the fields
         /// changes during their rewriting.
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
-        /// <param name="searcher"> IndexSearcher to use in the rewriting </param>
-        /// <returns> {@code this} if the Sort/Fields have not changed, or a new Sort if there
+        /// <param name="searcher"> <see cref="IndexSearcher"/> to use in the rewriting </param>
+        /// <returns> <c>this</c> if the Sort/Fields have not changed, or a new <see cref="Sort"/> if there
         ///        is a change </returns>
-        /// <exception cref="IOException"> Can be thrown by the rewriting
-        /// @lucene.experimental </exception>
+        /// <exception cref="System.IO.IOException"> Can be thrown by the rewriting</exception>
         public virtual Sort Rewrite(IndexSearcher searcher)
         {
             bool changed = false;
@@ -205,7 +206,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (this == o)
@@ -228,7 +229,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if the relevance score is needed to sort documents. </summary>
+        /// Returns <c>true</c> if the relevance score is needed to sort documents. </summary>
         public virtual bool NeedsScores
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SortField.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SortField.cs b/src/Lucene.Net/Search/SortField.cs
index b9690d6..c82423a 100644
--- a/src/Lucene.Net/Search/SortField.cs
+++ b/src/Lucene.Net/Search/SortField.cs
@@ -29,10 +29,10 @@ namespace Lucene.Net.Search
     /// Stores information about how to sort documents by terms in an individual
     /// field.  Fields must be indexed in order to sort by them.
     ///
-    /// <p>Created: Feb 11, 2004 1:25:29 PM
-    ///
+    /// <para/>Created: Feb 11, 2004 1:25:29 PM
+    /// <para/>
     /// @since   lucene 1.4 </summary>
-    /// <seealso cref= Sort </seealso>
+    /// <seealso cref="Sort"/>
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
@@ -84,9 +84,9 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Creates a sort by terms in the given field with the type of term
         /// values explicitly given. </summary>
-        /// <param name="field">  Name of field to sort by.  Can be <code>null</code> if
-        ///               <code>type</code> is SCORE or DOC. </param>
-        /// <param name="type">   Type of values in the terms. </param>
+        /// <param name="field"> Name of field to sort by. Can be <c>null</c> if
+        ///               <paramref name="type"/> is <see cref="SortFieldType.SCORE"/> or <see cref="SortFieldType.DOC"/>. </param>
+        /// <param name="type"> Type of values in the terms. </param>
         public SortField(string field, SortFieldType type)
         {
             InitFieldType(field, type);
@@ -95,10 +95,10 @@ namespace Lucene.Net.Search
         /// <summary>
         /// Creates a sort, possibly in reverse, by terms in the given field with the
         /// type of term values explicitly given. </summary>
-        /// <param name="field">  Name of field to sort by.  Can be <code>null</code> if
-        ///               <code>type</code> is SCORE or DOC. </param>
+        /// <param name="field">  Name of field to sort by.  Can be <c>null</c> if
+        ///               <paramref name="type"/> is <see cref="SortFieldType.SCORE"/> or <see cref="SortFieldType.DOC"/>. </param>
         /// <param name="type">   Type of values in the terms. </param>
-        /// <param name="reverse"> True if natural order should be reversed. </param>
+        /// <param name="reverse"> <c>True</c> if natural order should be reversed. </param>
         public SortField(string field, SortFieldType type, bool reverse)
         {
             InitFieldType(field, type);
@@ -107,14 +107,14 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Creates a sort by terms in the given field, parsed
-        /// to numeric values using a custom <seealso cref="IFieldCache.Parser"/>. </summary>
-        /// <param name="field">  Name of field to sort by.  Must not be null. </param>
-        /// <param name="parser"> Instance of a <seealso cref="IFieldCache.Parser"/>,
+        /// to numeric values using a custom <see cref="FieldCache.IParser"/>. </summary>
+        /// <param name="field">  Name of field to sort by.  Must not be <c>null</c>. </param>
+        /// <param name="parser"> Instance of a <see cref="FieldCache.IParser"/>,
         ///  which must subclass one of the existing numeric
-        ///  parsers from <seealso cref="IFieldCache"/>. Sort type is inferred
+        ///  parsers from <see cref="IFieldCache"/>. Sort type is inferred
         ///  by testing which numeric parser the parser subclasses. </param>
-        /// <exception cref="IllegalArgumentException"> if the parser fails to
-        ///  subclass an existing numeric parser, or field is null </exception>
+        /// <exception cref="ArgumentException"> if the parser fails to
+        ///  subclass an existing numeric parser, or field is <c>null</c> </exception>
         public SortField(string field, FieldCache.IParser parser)
             : this(field, parser, false)
         {
@@ -122,15 +122,15 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Creates a sort, possibly in reverse, by terms in the given field, parsed
-        /// to numeric values using a custom <seealso cref="IFieldCache.Parser"/>. </summary>
-        /// <param name="field">  Name of field to sort by.  Must not be null. </param>
-        /// <param name="parser"> Instance of a <seealso cref="IFieldCache.Parser"/>,
+        /// to numeric values using a custom <see cref="FieldCache.IParser"/>. </summary>
+        /// <param name="field">  Name of field to sort by.  Must not be <c>null</c>. </param>
+        /// <param name="parser"> Instance of a <see cref="FieldCache.IParser"/>,
         ///  which must subclass one of the existing numeric
-        ///  parsers from <seealso cref="IFieldCache"/>. Sort type is inferred
+        ///  parsers from <see cref="IFieldCache"/>. Sort type is inferred
         ///  by testing which numeric parser the parser subclasses. </param>
-        /// <param name="reverse"> True if natural order should be reversed. </param>
-        /// <exception cref="IllegalArgumentException"> if the parser fails to
-        ///  subclass an existing numeric parser, or field is null </exception>
+        /// <param name="reverse"> <c>True</c> if natural order should be reversed. </param>
+        /// <exception cref="ArgumentException"> if the parser fails to
+        ///  subclass an existing numeric parser, or field is <c>null</c> </exception>
         public SortField(string field, FieldCache.IParser parser, bool reverse)
         {
             if (parser is FieldCache.IInt32Parser)
@@ -141,9 +141,9 @@ namespace Lucene.Net.Search
             {
                 InitFieldType(field, SortFieldType.SINGLE);
             }
+#pragma warning disable 612, 618
             else if (parser is FieldCache.IInt16Parser)
             {
-#pragma warning disable 612, 618
                 InitFieldType(field, SortFieldType.INT16);
             }
             else if (parser is FieldCache.IByteParser)
@@ -169,8 +169,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Pass this to <seealso cref="#setMissingValue"/> to have missing
-        ///  string values sort first.
+        /// Pass this to <see cref="MissingValue"/> to have missing
+        /// string values sort first.
         /// </summary>
         public static readonly object STRING_FIRST = new ObjectAnonymousInnerClassHelper();
 
@@ -190,8 +190,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Pass this to <seealso cref="#setMissingValue"/> to have missing
-        ///  string values sort last.
+        /// Pass this to <see cref="MissingValue"/> to have missing
+        /// string values sort last.
         /// </summary>
         public static readonly object STRING_LAST = new ObjectAnonymousInnerClassHelper2();
 
@@ -229,7 +229,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Creates a sort with a custom comparison function. </summary>
-        /// <param name="field"> Name of field to sort by; cannot be <code>null</code>. </param>
+        /// <param name="field"> Name of field to sort by; cannot be <c>null</c>. </param>
         /// <param name="comparer"> Returns a comparer for sorting hits. </param>
         public SortField(string field, FieldComparerSource comparer)
         {
@@ -239,9 +239,9 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Creates a sort, possibly in reverse, with a custom comparison function. </summary>
-        /// <param name="field"> Name of field to sort by; cannot be <code>null</code>. </param>
+        /// <param name="field"> Name of field to sort by; cannot be <c>null</c>. </param>
         /// <param name="comparer"> Returns a comparer for sorting hits. </param>
-        /// <param name="reverse"> True if natural order should be reversed. </param>
+        /// <param name="reverse"> <c>True</c> if natural order should be reversed. </param>
         public SortField(string field, FieldComparerSource comparer, bool reverse)
         {
             InitFieldType(field, SortFieldType.CUSTOM);
@@ -268,9 +268,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the name of the field.  Could return <code>null</code>
-        /// if the sort is by SCORE or DOC. </summary>
-        /// <returns> Name of field, possibly <code>null</code>. </returns>
+        /// Returns the name of the field.  Could return <c>null</c>
+        /// if the sort is by <see cref="SortFieldType.SCORE"/> or <see cref="SortFieldType.DOC"/>. </summary>
+        /// <returns> Name of field, possibly <c>null</c>. </returns>
         public virtual string Field
         {
             get
@@ -292,9 +292,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the instance of a <seealso cref="IFieldCache"/> parser that fits to the given sort type.
-        /// May return <code>null</code> if no parser was specified. Sorting is using the default parser then. </summary>
-        /// <returns> An instance of a <seealso cref="IFieldCache"/> parser, or <code>null</code>. </returns>
+        /// Returns the instance of a <see cref="IFieldCache"/> parser that fits to the given sort type.
+        /// May return <c>null</c> if no parser was specified. Sorting is using the default parser then. </summary>
+        /// <returns> An instance of a <see cref="IFieldCache"/> parser, or <c>null</c>. </returns>
         public virtual FieldCache.IParser Parser
         {
             get
@@ -305,7 +305,7 @@ namespace Lucene.Net.Search
 
         /// <summary>
         /// Returns whether the sort should be reversed. </summary>
-        /// <returns>  True if natural order should be reversed. </returns>
+        /// <returns> <c>True</c> if natural order should be reversed. </returns>
         public virtual bool IsReverse
         {
             get
@@ -315,8 +315,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the <seealso cref="FieldComparerSource"/> used for
-        /// custom sorting
+        /// Returns the <see cref="FieldComparerSource"/> used for
+        /// custom sorting.
         /// </summary>
         public virtual FieldComparerSource ComparerSource
         {
@@ -400,10 +400,10 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if <code>o</code> is equal to this.  If a
-        ///  <seealso cref="FieldComparerSource"/> or {@link
-        ///  FieldCache.Parser} was provided, it must properly
-        ///  implement equals (unless a singleton is always used).
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this.  If a
+        /// <see cref="FieldComparerSource"/> or 
+        /// <see cref="FieldCache.IParser"/> was provided, it must properly
+        /// implement equals (unless a singleton is always used).
         /// </summary>
         public override bool Equals(object o)
         {
@@ -423,11 +423,11 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true if <code>o</code> is equal to this.  If a
-        ///  <seealso cref="FieldComparerSource"/> or {@link
-        ///  FieldCache.Parser} was provided, it must properly
-        ///  implement hashCode (unless a singleton is always
-        ///  used).
+        /// Returns a hash code value for this object.  If a
+        /// <see cref="FieldComparerSource"/> or
+        /// <see cref="FieldCache.IParser"/> was provided, it must properly
+        /// implement GetHashCode() (unless a singleton is always
+        /// used).
         /// </summary>
         public override int GetHashCode()
         {
@@ -458,17 +458,17 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns the <seealso cref="FieldComparer"/> to use for
+        /// Returns the <see cref="FieldComparer"/> to use for
         /// sorting.
-        ///
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
-        /// <param name="numHits"> number of top hits the queue will store </param>
-        /// <param name="sortPos"> position of this SortField within {@link
-        ///   Sort}.  The comparer is primary if sortPos==0,
+        /// <param name="numHits"> Number of top hits the queue will store </param>
+        /// <param name="sortPos"> Position of this <see cref="SortField"/> within 
+        ///   <see cref="Sort"/>.  The comparer is primary if sortPos==0,
         ///   secondary if sortPos==1, etc.  Some comparers can
         ///   optimize themselves when they are the primary sort. </param>
-        /// <returns> <seealso cref="FieldComparer"/> to use when sorting </returns>
+        /// <returns> <see cref="FieldComparer"/> to use when sorting </returns>
         public virtual FieldComparer GetComparer(int numHits, int sortPos)
         {
             switch (type)
@@ -519,14 +519,15 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Rewrites this SortField, returning a new SortField if a change is made.
+        /// Rewrites this <see cref="SortField"/>, returning a new <see cref="SortField"/> if a change is made.
         /// Subclasses should override this define their rewriting behavior when this
-        /// SortField is of type <seealso cref="SortField.Type#REWRITEABLE"/>
+        /// SortField is of type <see cref="SortFieldType.REWRITEABLE"/>.
+        /// <para/>
+        /// @lucene.experimental
         /// </summary>
-        /// <param name="searcher"> IndexSearcher to use during rewriting </param>
-        /// <returns> New rewritten SortField, or {@code this} if nothing has changed. </returns>
-        /// <exception cref="IOException"> Can be thrown by the rewriting
-        /// @lucene.experimental </exception>
+        /// <param name="searcher"> <see cref="IndexSearcher"/> to use during rewriting </param>
+        /// <returns> New rewritten <see cref="SortField"/>, or <c>this</c> if nothing has changed. </returns>
+        /// <exception cref="System.IO.IOException"> Can be thrown by the rewriting </exception>
         public virtual SortField Rewrite(IndexSearcher searcher)
         {
             return this;
@@ -546,25 +547,25 @@ namespace Lucene.Net.Search
     public enum SortFieldType // LUCENENET NOTE: de-nested and renamed from Type to avoid naming collision with Type property and with System.Type
     {
         /// <summary>
-        /// Sort by document score (relevance).  Sort values are Float and higher
+        /// Sort by document score (relevance).  Sort values are <see cref="float"/> and higher
         /// values are at the front.
         /// </summary>
         SCORE,
 
         /// <summary>
-        /// Sort by document number (index order).  Sort values are Integer and lower
+        /// Sort by document number (index order).  Sort values are <see cref="int"/> and lower
         /// values are at the front.
         /// </summary>
         DOC,
 
         /// <summary>
-        /// Sort using term values as Strings.  Sort values are String and lower
+        /// Sort using term values as <see cref="string"/>s.  Sort values are <see cref="string"/>s and lower
         /// values are at the front.
         /// </summary>
         STRING,
 
         /// <summary>
-        /// Sort using term values as encoded Integers.  Sort values are Integer and
+        /// Sort using term values as encoded <see cref="int"/>s.  Sort values are <see cref="int"/> and
         /// lower values are at the front.
         /// <para/>
         /// NOTE: This was INT in Lucene
@@ -572,7 +573,7 @@ namespace Lucene.Net.Search
         INT32,
 
         /// <summary>
-        /// Sort using term values as encoded Floats.  Sort values are Float and
+        /// Sort using term values as encoded <see cref="float"/>s.  Sort values are <see cref="float"/> and
         /// lower values are at the front.
         /// <para/>
         /// NOTE: This was FLOAT in Lucene
@@ -580,7 +581,7 @@ namespace Lucene.Net.Search
         SINGLE,
 
         /// <summary>
-        /// Sort using term values as encoded Longs.  Sort values are Long and
+        /// Sort using term values as encoded <see cref="long"/>s.  Sort values are <see cref="long"/> and
         /// lower values are at the front.
         /// <para/>
         /// NOTE: This was LONG in Lucene
@@ -588,13 +589,13 @@ namespace Lucene.Net.Search
         INT64,
 
         /// <summary>
-        /// Sort using term values as encoded Doubles.  Sort values are Double and
+        /// Sort using term values as encoded <see cref="double"/>s.  Sort values are <see cref="double"/> and
         /// lower values are at the front.
         /// </summary>
         DOUBLE,
 
         /// <summary>
-        /// Sort using term values as encoded Shorts.  Sort values are Short and
+        /// Sort using term values as encoded <see cref="short"/>s.  Sort values are <see cref="short"/> and
         /// lower values are at the front.
         /// <para/>
         /// NOTE: This was SHORT in Lucene
@@ -603,32 +604,32 @@ namespace Lucene.Net.Search
         INT16,
 
         /// <summary>
-        /// Sort using a custom Comparer.  Sort values are any Comparable and
+        /// Sort using a custom <see cref="IComparer{T}"/>.  Sort values are any <see cref="IComparable{T}"/> and
         /// sorting is done according to natural order.
         /// </summary>
         CUSTOM,
 
         /// <summary>
-        /// Sort using term values as encoded Bytes.  Sort values are Byte and
+        /// Sort using term values as encoded <see cref="byte"/>s.  Sort values are <see cref="byte"/> and
         /// lower values are at the front.
         /// </summary>
         [System.Obsolete]
         BYTE,
 
         /// <summary>
-        /// Sort using term values as Strings, but comparing by
-        /// value (using String.compareTo) for all comparisons.
-        /// this is typically slower than <seealso cref="#STRING"/>, which
+        /// Sort using term values as <see cref="string"/>s, but comparing by
+        /// value (using <see cref="BytesRef.CompareTo(BytesRef)"/>) for all comparisons.
+        /// this is typically slower than <see cref="STRING"/>, which
         /// uses ordinals to do the sorting.
         /// </summary>
         STRING_VAL,
 
         /// <summary>
-        /// Sort use byte[] index values. </summary>
+        /// Sort use <see cref="T:byte[]"/> index values. </summary>
         BYTES,
 
         /// <summary>
-        /// Force rewriting of SortField using <seealso cref="SortField#rewrite(IndexSearcher)"/>
+        /// Force rewriting of <see cref="SortField"/> using <see cref="SortField.Rewrite(IndexSearcher)"/>
         /// before it can be used for sorting
         /// </summary>
         REWRITEABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/SortRescorer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/SortRescorer.cs b/src/Lucene.Net/Search/SortRescorer.cs
index 459573b..810f2ee 100644
--- a/src/Lucene.Net/Search/SortRescorer.cs
+++ b/src/Lucene.Net/Search/SortRescorer.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Search
     using AtomicReaderContext = Lucene.Net.Index.AtomicReaderContext;
 
     /// <summary>
-    /// A <seealso cref="Rescorer"/> that re-sorts according to a provided
+    /// A <see cref="Rescorer"/> that re-sorts according to a provided
     /// Sort.
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermCollectingRewrite.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermCollectingRewrite.cs b/src/Lucene.Net/Search/TermCollectingRewrite.cs
index 3250431..aac93b2 100644
--- a/src/Lucene.Net/Search/TermCollectingRewrite.cs
+++ b/src/Lucene.Net/Search/TermCollectingRewrite.cs
@@ -35,14 +35,14 @@ namespace Lucene.Net.Search
 #if FEATURE_SERIALIZABLE
     [Serializable]
 #endif
-    public abstract class TermCollectingRewrite<Q> : MultiTermQuery.RewriteMethod where Q : Query
+    public abstract class TermCollectingRewrite<Q> : MultiTermQuery.RewriteMethod where Q : Query // LUCENENET NOTE: Class was made public instaed of internal because it has public derived types
     {
         /// <summary>
-        /// Return a suitable top-level Query for holding all expanded terms. </summary>
+        /// Return a suitable top-level <see cref="Query"/> for holding all expanded terms. </summary>
         protected abstract Q GetTopLevelQuery();
 
         /// <summary>
-        /// Add a MultiTermQuery term to the top-level query </summary>
+        /// Add a <see cref="MultiTermQuery"/> term to the top-level query </summary>
         protected void AddClause(Q topLevel, Term term, int docCount, float boost)
         {
             AddClause(topLevel, term, docCount, boost, null);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermQuery.cs b/src/Lucene.Net/Search/TermQuery.cs
index 8289c7d..aee74a8 100644
--- a/src/Lucene.Net/Search/TermQuery.cs
+++ b/src/Lucene.Net/Search/TermQuery.cs
@@ -38,8 +38,8 @@ namespace Lucene.Net.Search
     using ToStringUtils = Lucene.Net.Util.ToStringUtils;
 
     /// <summary>
-    /// A Query that matches documents containing a term.
-    ///  this may be combined with other terms with a <seealso cref="BooleanQuery"/>.
+    /// A <see cref="Query"/> that matches documents containing a term.
+    /// this may be combined with other terms with a <see cref="BooleanQuery"/>.
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -107,8 +107,8 @@ namespace Lucene.Net.Search
             }
 
             /// <summary>
-            /// Returns a <seealso cref="TermsEnum"/> positioned at this weights Term or null if
-            /// the term does not exist in the given context
+            /// Returns a <see cref="TermsEnum"/> positioned at this weights <see cref="Index.Term"/> or <c>null</c> if
+            /// the term does not exist in the given context.
             /// </summary>
             private TermsEnum GetTermsEnum(AtomicReaderContext context)
             {
@@ -155,16 +155,16 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Constructs a query for the term <code>t</code>. </summary>
+        /// Constructs a query for the term <paramref name="t"/>. </summary>
         public TermQuery(Term t)
             : this(t, -1)
         {
         }
 
         /// <summary>
-        /// Expert: constructs a TermQuery that will use the
-        ///  provided docFreq instead of looking up the docFreq
-        ///  against the searcher.
+        /// Expert: constructs a <see cref="TermQuery"/> that will use the
+        /// provided <paramref name="docFreq"/> instead of looking up the docFreq
+        /// against the searcher.
         /// </summary>
         public TermQuery(Term t, int docFreq)
         {
@@ -174,9 +174,9 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Expert: constructs a TermQuery that will use the
-        ///  provided docFreq instead of looking up the docFreq
-        ///  against the searcher.
+        /// Expert: constructs a <see cref="TermQuery"/> that will use the
+        /// provided docFreq instead of looking up the docFreq
+        /// against the searcher.
         /// </summary>
         public TermQuery(Term t, TermContext states)
         {
@@ -241,7 +241,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns true iff <code>o</code> is equal to this. </summary>
+        /// Returns <c>true</c> if <paramref name="o"/> is equal to this. </summary>
         public override bool Equals(object o)
         {
             if (!(o is TermQuery))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b2db5313/src/Lucene.Net/Search/TermRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Search/TermRangeFilter.cs b/src/Lucene.Net/Search/TermRangeFilter.cs
index 39c27cb..128ac8a 100644
--- a/src/Lucene.Net/Search/TermRangeFilter.cs
+++ b/src/Lucene.Net/Search/TermRangeFilter.cs
@@ -22,16 +22,17 @@ namespace Lucene.Net.Search
     using BytesRef = Lucene.Net.Util.BytesRef;
 
     /// <summary>
-    /// A Filter that restricts search results to a range of term
+    /// A <see cref="Filter"/> that restricts search results to a range of term
     /// values in a given field.
     ///
-    /// <p>this filter matches the documents looking for terms that fall into the
-    /// supplied range according to {@link
-    /// Byte#compareTo(Byte)},  It is not intended
-    /// for numerical ranges; use <seealso cref="NumericRangeFilter"/> instead.
+    /// <para/>This filter matches the documents looking for terms that fall into the
+    /// supplied range according to 
+    /// <see cref="byte.CompareTo(byte)"/>,  It is not intended
+    /// for numerical ranges; use <see cref="NumericRangeFilter"/> instead.
     ///
-    /// <p>If you construct a large number of range filters with different ranges but on the
-    /// same field, <seealso cref="FieldCacheRangeFilter"/> may have significantly better performance.
+    /// <para/>If you construct a large number of range filters with different ranges but on the
+    /// same field, <see cref="FieldCacheRangeFilter"/> may have significantly better performance.
+    /// <para/>
     /// @since 2.9
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -44,8 +45,8 @@ namespace Lucene.Net.Search
         /// <param name="upperTerm"> The upper bound on this range </param>
         /// <param name="includeLower"> Does this range include the lower bound? </param>
         /// <param name="includeUpper"> Does this range include the upper bound? </param>
-        /// <exception cref="IllegalArgumentException"> if both terms are null or if
-        ///  lowerTerm is null and includeLower is true (similar for upperTerm
+        /// <exception cref="ArgumentException"> if both terms are <c>null</c> or if
+        ///  lowerTerm is <c>null</c> and includeLower is <c>true</c> (similar for upperTerm
         ///  and includeUpper) </exception>
         public TermRangeFilter(string fieldName, BytesRef lowerTerm, BytesRef upperTerm, bool includeLower, bool includeUpper)
             : base(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper))
@@ -53,7 +54,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Factory that creates a new TermRangeFilter using Strings for term text.
+        /// Factory that creates a new <see cref="TermRangeFilter"/> using <see cref="string"/>s for term text.
         /// </summary>
         public static TermRangeFilter NewStringRange(string field, string lowerTerm, string upperTerm, bool includeLower, bool includeUpper)
         {
@@ -63,8 +64,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Constructs a filter for field <code>fieldName</code> matching
-        /// less than or equal to <code>upperTerm</code>.
+        /// Constructs a filter for field <paramref name="fieldName"/> matching
+        /// less than or equal to <paramref name="upperTerm"/>.
         /// </summary>
         public static TermRangeFilter Less(string fieldName, BytesRef upperTerm)
         {
@@ -72,8 +73,8 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Constructs a filter for field <code>fieldName</code> matching
-        /// greater than or equal to <code>lowerTerm</code>.
+        /// Constructs a filter for field <paramref name="fieldName"/> matching
+        /// greater than or equal to <paramref name="lowerTerm"/>.
         /// </summary>
         public static TermRangeFilter More(string fieldName, BytesRef lowerTerm)
         {
@@ -101,14 +102,14 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the lower endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the lower endpoint is inclusive </summary>
         public virtual bool IncludesLower
         {
             get { return m_query.IncludesLower; }
         }
 
         /// <summary>
-        /// Returns <code>true</code> if the upper endpoint is inclusive </summary>
+        /// Returns <c>true</c> if the upper endpoint is inclusive </summary>
         public virtual bool IncludesUpper
         {
             get { return m_query.IncludesUpper; }


[48/48] lucenenet git commit: Lucene.Net.Util.Packed: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Util.Packed: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/6f22b5ab
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/6f22b5ab
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/6f22b5ab

Branch: refs/heads/master
Commit: 6f22b5ab8fc3dc7c75cefef25b7ef3aa3d0e47df
Parents: 5a0e4b6
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Tue Jun 6 03:10:04 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:43 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   4 +-
 .../Util/Packed/AbstractAppendingLongBuffer.cs  |  11 +-
 .../Util/Packed/AbstractBlockPackedWriter.cs    |  12 +-
 .../Util/Packed/AbstractPagedMutable.cs         |  16 +-
 .../Packed/AppendingDeltaPackedLongBuffer.cs    |  20 +-
 .../Util/Packed/AppendingPackedLongBuffer.cs    |  22 +-
 src/Lucene.Net/Util/Packed/BlockPackedReader.cs |   5 +-
 .../Util/Packed/BlockPackedReaderIterator.cs    |  24 +-
 src/Lucene.Net/Util/Packed/BlockPackedWriter.cs |  44 +-
 src/Lucene.Net/Util/Packed/BulkOperation.cs     |  20 +-
 .../Util/Packed/BulkOperationPacked.cs          |   6 +-
 .../Packed/BulkOperationPackedSingleBlock.cs    |   8 +-
 src/Lucene.Net/Util/Packed/Direct16.cs          |   1 +
 src/Lucene.Net/Util/Packed/Direct32.cs          |   1 +
 src/Lucene.Net/Util/Packed/Direct64.cs          |   1 +
 src/Lucene.Net/Util/Packed/Direct8.cs           |   1 +
 src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs  | 110 +--
 src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs |  21 +-
 src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs  | 173 +++--
 src/Lucene.Net/Util/Packed/GrowableWriter.cs    |   8 +-
 .../Util/Packed/MonotonicAppendingLongBuffer.cs |  18 +-
 .../Util/Packed/MonotonicBlockPackedReader.cs   |   6 +-
 .../Util/Packed/MonotonicBlockPackedWriter.cs   |  46 +-
 .../Util/Packed/Packed16ThreeBlocks.cs          |   1 +
 src/Lucene.Net/Util/Packed/Packed64.cs          |  29 +-
 .../Util/Packed/Packed64SingleBlock.cs          |   2 +-
 .../Util/Packed/Packed8ThreeBlocks.cs           |   1 +
 src/Lucene.Net/Util/Packed/PackedDataInput.cs   |  18 +-
 src/Lucene.Net/Util/Packed/PackedDataOutput.cs  |  18 +-
 src/Lucene.Net/Util/Packed/PackedInts.cs        | 774 +++++++++----------
 .../Util/Packed/PagedGrowableWriter.cs          |  16 +-
 src/Lucene.Net/Util/Packed/PagedMutable.cs      |  15 +-
 32 files changed, 709 insertions(+), 743 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 36b780b..5736674 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -50,9 +50,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 
 ### Documentation Comments == up for grabs:
 
-1. Lucene.Net (project)
-   1. Util.Packed (namespace)
-2. Lucene.Net.Codecs (project)
+1. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)
    3. Bloom (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs b/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
index eb650c6..718daed 100644
--- a/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
+++ b/src/Lucene.Net/Util/Packed/AbstractAppendingLongBuffer.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Common functionality shared by <seealso cref="AppendingDeltaPackedInt64Buffer"/> and <seealso cref="MonotonicAppendingInt64Buffer"/>. 
+    /// Common functionality shared by <see cref="AppendingDeltaPackedInt64Buffer"/> and <see cref="MonotonicAppendingInt64Buffer"/>. 
     /// <para/>
     /// NOTE: This was AbstractAppendingLongBuffer in Lucene
     /// </summary>
@@ -59,6 +59,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Get the number of values that have been added to the buffer.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public long Count
@@ -119,8 +120,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Bulk get: read at least one and at most <code>len</code> longs starting
-        /// from <code>index</code> into <code>arr[off:off+len]</code> and return
+        /// Bulk get: read at least one and at most <paramref name="len"/> <see cref="long"/>s starting
+        /// from <paramref name="index"/> into <c>arr[off:off+len]</c> and return
         /// the actual number of values that have been read.
         /// </summary>
         public int Get(long index, long[] arr, int off, int len)
@@ -189,7 +190,7 @@ namespace Lucene.Net.Util.Packed
 
             /// <summary>
             /// Whether or not there are remaining values. </summary>
-            public bool HasNext
+            public bool HasNext // LUCENENET TODO: API - Change to HasNext() method (makes calculation)
             {
                 get { return pOff < currentCount; }
             }
@@ -238,7 +239,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Pack all pending values in this buffer. Subsequent calls to <seealso cref="#add(long)"/> will fail. </summary>
+        /// Pack all pending values in this buffer. Subsequent calls to <see cref="Add(long)"/> will fail. </summary>
         public virtual void Freeze()
         {
             if (pendingOff > 0)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/AbstractBlockPackedWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/AbstractBlockPackedWriter.cs b/src/Lucene.Net/Util/Packed/AbstractBlockPackedWriter.cs
index 79c72b8..f3405a8 100644
--- a/src/Lucene.Net/Util/Packed/AbstractBlockPackedWriter.cs
+++ b/src/Lucene.Net/Util/Packed/AbstractBlockPackedWriter.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Util.Packed
 
         // same as DataOutput.WriteVInt64 but accepts negative values
         /// <summary>
-        /// NOTE: This was writeVLong() in Lucene
+        /// NOTE: This was writeVLong() in Lucene.
         /// </summary>
         internal static void WriteVInt64(DataOutput @out, long i)
         {
@@ -59,7 +59,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Sole constructor. </summary>
-        /// <param name="blockSize"> the number of values of a single block, must be a multiple of <tt>64</tt> </param>
+        /// <param name="blockSize"> the number of values of a single block, must be a multiple of <c>64</c>. </param>
         public AbstractBlockPackedWriter(DataOutput @out, int blockSize)
         {
             PackedInt32s.CheckBlockSize(blockSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
@@ -68,7 +68,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Reset this writer to wrap <code>out</code>. The block size remains unchanged. </summary>
+        /// Reset this writer to wrap <paramref name="out"/>. The block size remains unchanged. </summary>
         public virtual void Reset(DataOutput @out)
         {
             Debug.Assert(@out != null);
@@ -117,9 +117,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Flush all buffered data to disk. this instance is not usable anymore
-        ///  after this method has been called until <seealso cref="#reset(DataOutput)"/> has
-        ///  been called.
+        /// Flush all buffered data to disk. This instance is not usable anymore
+        /// after this method has been called until <see cref="Reset(DataOutput)"/> has
+        /// been called.
         /// </summary>
         public virtual void Finish()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/AbstractPagedMutable.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/AbstractPagedMutable.cs b/src/Lucene.Net/Util/Packed/AbstractPagedMutable.cs
index 82bd3ee..93f22a7 100644
--- a/src/Lucene.Net/Util/Packed/AbstractPagedMutable.cs
+++ b/src/Lucene.Net/Util/Packed/AbstractPagedMutable.cs
@@ -21,7 +21,8 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Base implementation for <seealso cref="PagedMutable"/> and <seealso cref="PagedGrowableWriter"/>.
+    /// Base implementation for <see cref="PagedMutable"/> and <see cref="PagedGrowableWriter"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public abstract class AbstractPagedMutable<T> : Int64Values where T : AbstractPagedMutable<T> // LUCENENET NOTE: made public rather than internal because has public subclasses
@@ -71,6 +72,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// The number of values.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public long Count
@@ -97,7 +99,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Set value at <code>index</code>. </summary>
+        /// Set value at <paramref name="index"/>. </summary>
         public void Set(long index, long value)
         {
             Debug.Assert(index >= 0 && index < size);
@@ -127,9 +129,9 @@ namespace Lucene.Net.Util.Packed
         protected abstract T NewUnfilledCopy(long newSize);
 
         /// <summary>
-        /// Create a new copy of size <code>newSize</code> based on the content of
-        ///  this buffer. this method is much more efficient than creating a new
-        ///  instance and copying values one by one.
+        /// Create a new copy of size <paramref name="newSize"/> based on the content of
+        /// this buffer. This method is much more efficient than creating a new
+        /// instance and copying values one by one.
         /// </summary>
         public T Resize(long newSize)
         {
@@ -151,7 +153,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Similar to <seealso cref="ArrayUtil#grow(long[], int)"/>. </summary>
+        /// Similar to <see cref="ArrayUtil.Grow(long[], int)"/>. </summary>
         public T Grow(long minSize)
         {
             Debug.Assert(minSize >= 0);
@@ -170,7 +172,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Similar to <seealso cref="ArrayUtil#grow(long[])"/>. </summary>
+        /// Similar to <see cref="ArrayUtil.Grow(long[])"/>. </summary>
         public T Grow()
         {
             return Grow(Count + 1);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/AppendingDeltaPackedLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/AppendingDeltaPackedLongBuffer.cs b/src/Lucene.Net/Util/Packed/AppendingDeltaPackedLongBuffer.cs
index e6da8cd..aa38829 100644
--- a/src/Lucene.Net/Util/Packed/AppendingDeltaPackedLongBuffer.cs
+++ b/src/Lucene.Net/Util/Packed/AppendingDeltaPackedLongBuffer.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Utility class to buffer a list of signed longs in memory. this class only
+    /// Utility class to buffer a list of signed longs in memory. This class only
     /// supports appending and is optimized for the case where values are close to
     /// each other.
     /// <para/>
     /// NOTE: This was AppendingDeltaPackedLongBuffer in Lucene
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class AppendingDeltaPackedInt64Buffer : AbstractAppendingInt64Buffer
@@ -34,10 +34,10 @@ namespace Lucene.Net.Util.Packed
         internal long[] minValues;
 
         /// <summary>
-        /// Create <seealso cref="AppendingDeltaPackedInt64Buffer"/> </summary>
-        /// <param name="initialPageCount">        the initial number of pages </param>
-        /// <param name="pageSize">                the size of a single page </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio per value </param>
+        /// Create <see cref="AppendingDeltaPackedInt64Buffer"/>. </summary>
+        /// <param name="initialPageCount">        The initial number of pages. </param>
+        /// <param name="pageSize">                The size of a single page. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio per value. </param>
         public AppendingDeltaPackedInt64Buffer(int initialPageCount, int pageSize, float acceptableOverheadRatio)
             : base(initialPageCount, pageSize, acceptableOverheadRatio)
         {
@@ -45,8 +45,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Create an <seealso cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024 and acceptableOverheadRatio=<seealso cref="PackedInt32s#DEFAULT"/>
+        /// Create an <see cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024 and acceptableOverheadRatio=<see cref="PackedInt32s.DEFAULT"/>.
         /// </summary>
         public AppendingDeltaPackedInt64Buffer()
             : this(16, 1024, PackedInt32s.DEFAULT)
@@ -54,8 +54,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Create an <seealso cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024
+        /// Create an <see cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024.
         /// </summary>
         public AppendingDeltaPackedInt64Buffer(float acceptableOverheadRatio)
             : this(16, 1024, acceptableOverheadRatio)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/AppendingPackedLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/AppendingPackedLongBuffer.cs b/src/Lucene.Net/Util/Packed/AppendingPackedLongBuffer.cs
index 2fa6402..6652d15 100644
--- a/src/Lucene.Net/Util/Packed/AppendingPackedLongBuffer.cs
+++ b/src/Lucene.Net/Util/Packed/AppendingPackedLongBuffer.cs
@@ -20,28 +20,28 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Utility class to buffer a list of signed longs in memory. this class only
-    /// supports appending and is optimized for non-negative numbers with a uniform distribution over a fixed (limited) range
+    /// Utility class to buffer a list of signed longs in memory. This class only
+    /// supports appending and is optimized for non-negative numbers with a uniform distribution over a fixed (limited) range.
     /// <para/>
     /// NOTE: This was AppendingPackedLongBuffer in Lucene
-    /// 
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class AppendingPackedInt64Buffer : AbstractAppendingInt64Buffer
     {
         /// <summary>
-        ///<seealso cref="AppendingPackedInt64Buffer"/> </summary>
-        /// <param name="initialPageCount">        the initial number of pages </param>
-        /// <param name="pageSize">                the size of a single page </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio per value </param>
+        /// Initialize a <see cref="AppendingPackedInt64Buffer"/>. </summary>
+        /// <param name="initialPageCount">        The initial number of pages. </param>
+        /// <param name="pageSize">                The size of a single page. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio per value. </param>
         public AppendingPackedInt64Buffer(int initialPageCount, int pageSize, float acceptableOverheadRatio)
             : base(initialPageCount, pageSize, acceptableOverheadRatio)
         {
         }
 
         /// <summary>
-        /// Create an <seealso cref="AppendingPackedInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024 and acceptableOverheadRatio=<seealso cref="PackedInt32s#DEFAULT"/>
+        /// Create an <see cref="AppendingPackedInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024 and acceptableOverheadRatio=<see cref="PackedInt32s.DEFAULT"/>.
         /// </summary>
         public AppendingPackedInt64Buffer()
             : this(16, 1024, PackedInt32s.DEFAULT)
@@ -49,8 +49,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Create an <seealso cref="AppendingPackedInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024
+        /// Create an <see cref="AppendingPackedInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024.
         /// </summary>
         public AppendingPackedInt64Buffer(float acceptableOverheadRatio)
             : this(16, 1024, acceptableOverheadRatio)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BlockPackedReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BlockPackedReader.cs b/src/Lucene.Net/Util/Packed/BlockPackedReader.cs
index 3e09a94..dfbd515 100644
--- a/src/Lucene.Net/Util/Packed/BlockPackedReader.cs
+++ b/src/Lucene.Net/Util/Packed/BlockPackedReader.cs
@@ -22,7 +22,8 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Provides random access to a stream written with <seealso cref="BlockPackedWriter"/>.
+    /// Provides random access to a stream written with <see cref="BlockPackedWriter"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class BlockPackedReader : Int64Values
@@ -89,7 +90,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Returns approximate RAM bytes used </summary>
+        /// Returns approximate RAM bytes used. </summary>
         public long RamBytesUsed()
         {
             long size = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BlockPackedReaderIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BlockPackedReaderIterator.cs b/src/Lucene.Net/Util/Packed/BlockPackedReaderIterator.cs
index 83c792b..79ff289 100644
--- a/src/Lucene.Net/Util/Packed/BlockPackedReaderIterator.cs
+++ b/src/Lucene.Net/Util/Packed/BlockPackedReaderIterator.cs
@@ -25,9 +25,11 @@ namespace Lucene.Net.Util.Packed
     using IndexInput = Lucene.Net.Store.IndexInput;
 
     /// <summary>
-    /// Reader for sequences of longs written with <seealso cref="BlockPackedWriter"/>. </summary>
-    /// <seealso cref= BlockPackedWriter
-    /// @lucene.internal </seealso>
+    /// Reader for sequences of <see cref="long"/>s written with <see cref="BlockPackedWriter"/>. 
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+    /// <seealso cref="BlockPackedWriter"/>
     public sealed class BlockPackedReaderIterator
     {
         internal static long ZigZagDecode(long n)
@@ -37,7 +39,7 @@ namespace Lucene.Net.Util.Packed
 
         // same as DataInput.ReadVInt64 but supports negative values
         /// <summary>
-        /// NOTE: This was readVLong() in Lucene
+        /// NOTE: This was readVLong() in Lucene.
         /// </summary>
         internal static long ReadVInt64(DataInput @in)
         {
@@ -106,9 +108,9 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Sole constructor. </summary>
-        /// <param name="blockSize"> the number of values of a block, must be equal to the
-        ///                  block size of the <seealso cref="BlockPackedWriter"/> which has
-        ///                  been used to write the stream </param>
+        /// <param name="blockSize"> The number of values of a block, must be equal to the
+        ///                  block size of the <see cref="BlockPackedWriter"/> which has
+        ///                  been used to write the stream. </param>
         public BlockPackedReaderIterator(DataInput @in, int packedIntsVersion, int blockSize, long valueCount)
         {
             PackedInt32s.CheckBlockSize(blockSize, AbstractBlockPackedWriter.MIN_BLOCK_SIZE, AbstractBlockPackedWriter.MAX_BLOCK_SIZE);
@@ -120,8 +122,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Reset the current reader to wrap a stream of <code>valueCount</code>
-        /// values contained in <code>in</code>. The block size remains unchanged.
+        /// Reset the current reader to wrap a stream of <paramref name="valueCount"/>
+        /// values contained in <paramref name="in"/>. The block size remains unchanged.
         /// </summary>
         public void Reset(DataInput @in, long valueCount)
         {
@@ -133,7 +135,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Skip exactly <code>count</code> values. </summary>
+        /// Skip exactly <paramref name="count"/> values. </summary>
         public void Skip(long count)
         {
             Debug.Assert(count >= 0);
@@ -224,7 +226,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Read between <tt>1</tt> and <code>count</code> values. </summary>
+        /// Read between <c>1</c> and <paramref name="count"/> values. </summary>
         public Int64sRef Next(int count)
         {
             Debug.Assert(count > 0);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BlockPackedWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BlockPackedWriter.cs b/src/Lucene.Net/Util/Packed/BlockPackedWriter.cs
index 96e4bd8..149c66f 100644
--- a/src/Lucene.Net/Util/Packed/BlockPackedWriter.cs
+++ b/src/Lucene.Net/Util/Packed/BlockPackedWriter.cs
@@ -24,37 +24,39 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// A writer for large sequences of longs.
-    /// <p>
+    /// <para/>
     /// The sequence is divided into fixed-size blocks and for each block, the
     /// difference between each value and the minimum value of the block is encoded
     /// using as few bits as possible. Memory usage of this class is proportional to
     /// the block size. Each block has an overhead between 1 and 10 bytes to store
     /// the minimum value and the number of bits per value of the block.
-    /// <p>
+    /// <para/>
     /// Format:
-    /// <ul>
-    /// <li>&lt;BLock&gt;<sup>BlockCount</sup>
-    /// <li>BlockCount: &lceil; ValueCount / BlockSize &rceil;
-    /// <li>Block: &lt;Header, (Ints)&gt;
-    /// <li>Header: &lt;Token, (MinValue)&gt;
-    /// <li>Token: a <seealso cref="DataOutput#writeByte(byte) byte"/>, first 7 bits are the
-    ///     number of bits per value (<tt>bitsPerValue</tt>). If the 8th bit is 1,
-    ///     then MinValue (see next) is <tt>0</tt>, otherwise MinValue and needs to
-    ///     be decoded
-    /// <li>MinValue: a
+    /// <list type="bullet">
+    /// <item><description>&lt;BLock&gt;<sup>BlockCount</sup></description></item>
+    /// <item><description>BlockCount: &#8968; ValueCount / BlockSize &#8969;</description></item>
+    /// <item><description>Block: &lt;Header, (Ints)&gt;</description></item>
+    /// <item><description>Header: &lt;Token, (MinValue)&gt;</description></item>
+    /// <item><description>Token: a byte (<see cref="DataOutput.WriteByte(byte)"/>), first 7 bits are the
+    ///     number of bits per value (<c>bitsPerValue</c>). If the 8th bit is 1,
+    ///     then MinValue (see next) is <c>0</c>, otherwise MinValue and needs to
+    ///     be decoded</description></item>
+    /// <item><description>MinValue: a
     ///     <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">zigzag-encoded</a>
-    ///     <seealso cref="DataOutput#writeVLong(long) variable-length long"/> whose value
+    ///      variable-length <see cref="long"/> (<see cref="DataOutput.WriteVInt64(long)"/>) whose value
     ///     should be added to every int from the block to restore the original
-    ///     values
-    /// <li>Ints: If the number of bits per value is <tt>0</tt>, then there is
+    ///     values</description></item>
+    /// <item><description>Ints: If the number of bits per value is <c>0</c>, then there is
     ///     nothing to decode and all ints are equal to MinValue. Otherwise: BlockSize
-    ///     <seealso cref="PackedInt32s packed ints"/> encoded on exactly <tt>bitsPerValue</tt>
+    ///     packed ints (<see cref="PackedInt32s"/>) encoded on exactly <c>bitsPerValue</c>
     ///     bits per value. They are the subtraction of the original values and
-    ///     MinValue
-    /// </ul> </summary>
-    /// <seealso cref= BlockPackedReaderIterator </seealso>
-    /// <seealso cref= BlockPackedReader
-    /// @lucene.internal </seealso>
+    ///     MinValue</description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+    /// <seealso cref="BlockPackedReaderIterator"/>
+    /// <seealso cref="BlockPackedReader"/>
     public sealed class BlockPackedWriter : AbstractBlockPackedWriter
     {
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BulkOperation.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BulkOperation.cs b/src/Lucene.Net/Util/Packed/BulkOperation.cs
index 8ede6e2..df2ee00 100644
--- a/src/Lucene.Net/Util/Packed/BulkOperation.cs
+++ b/src/Lucene.Net/Util/Packed/BulkOperation.cs
@@ -48,12 +48,12 @@ namespace Lucene.Net.Util.Packed
         public abstract int ByteBlockCount { get; }
 
         /// <summary>
-        /// NOTE: This was longValueCount() in Lucene
+        /// NOTE: This was longValueCount() in Lucene.
         /// </summary>
         public abstract int Int64ValueCount { get; }
 
         /// <summary>
-        /// NOTE: This was longBlockCount() in Lucene
+        /// NOTE: This was longBlockCount() in Lucene.
         /// </summary>
         public abstract int Int64BlockCount { get; }
 
@@ -179,7 +179,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was writeLong() in Lucene
+        /// NOTE: This was writeLong() in Lucene.
         /// </summary>
         protected virtual int WriteInt64(long block, byte[] blocks, int blocksOffset)
         {
@@ -199,14 +199,14 @@ namespace Lucene.Net.Util.Packed
         ///  - 50 bits per value -> b=25, v=4
         ///  - 63 bits per value -> b=63, v=8
         ///  - ...
-        ///
-        /// A bulk read consists in copying <code>iterations*v</code> values that are
-        /// contained in <code>iterations*b</code> blocks into a <code>long[]</code>
-        /// (higher values of <code>iterations</code> are likely to yield a better
+        /// <para/>
+        /// A bulk read consists in copying <c>iterations*v</c> values that are
+        /// contained in <c>iterations*b</c> blocks into a <c>long[]</c>
+        /// (higher values of <c>iterations</c> are likely to yield a better
         /// throughput) => this requires n * (b + 8v) bytes of memory.
-        ///
-        /// this method computes <code>iterations</code> as
-        /// <code>ramBudget / (b + 8v)</code> (since a long is 8 bytes).
+        /// <para/>
+        /// This method computes <c>iterations</c> as
+        /// <c>ramBudget / (b + 8v)</c> (since a long is 8 bytes).
         /// </summary>
         public int ComputeIterations(int valueCount, int ramBudget)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BulkOperationPacked.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BulkOperationPacked.cs b/src/Lucene.Net/Util/Packed/BulkOperationPacked.cs
index 32330fe..dcf36b3 100644
--- a/src/Lucene.Net/Util/Packed/BulkOperationPacked.cs
+++ b/src/Lucene.Net/Util/Packed/BulkOperationPacked.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Non-specialized <seealso cref="BulkOperation"/> for <seealso cref="PackedInt32s.Format#PACKED"/>.
+    /// Non-specialized <see cref="BulkOperation"/> for <see cref="PackedInt32s.Format.PACKED"/>.
     /// </summary>
     internal class BulkOperationPacked : BulkOperation
     {
@@ -65,7 +65,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was longBlockCount() in Lucene
+        /// NOTE: This was longBlockCount() in Lucene.
         /// </summary>
         public override int Int64BlockCount
         {
@@ -73,7 +73,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was longValueCount() in Lucene
+        /// NOTE: This was longValueCount() in Lucene.
         /// </summary>
         public override int Int64ValueCount
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/BulkOperationPackedSingleBlock.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/BulkOperationPackedSingleBlock.cs b/src/Lucene.Net/Util/Packed/BulkOperationPackedSingleBlock.cs
index 976784c..c5cf689 100644
--- a/src/Lucene.Net/Util/Packed/BulkOperationPackedSingleBlock.cs
+++ b/src/Lucene.Net/Util/Packed/BulkOperationPackedSingleBlock.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// Non-specialized <seealso cref="BulkOperation"/> for <seealso cref="PackedInt32s.Format#PACKED_SINGLE_BLOCK"/>.
+    /// Non-specialized <see cref="BulkOperation"/> for <see cref="PackedInt32s.Format.PACKED_SINGLE_BLOCK"/>.
     /// </summary>
     internal sealed class BulkOperationPackedSingleBlock : BulkOperation
     {
@@ -36,7 +36,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was longBlockCount() in Lucene
+        /// NOTE: This was longBlockCount() in Lucene.
         /// </summary>
         public override sealed int Int64BlockCount
         {
@@ -49,7 +49,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was longValueCount() in Lucene
+        /// NOTE: This was longValueCount() in Lucene.
         /// </summary>
         public override int Int64ValueCount
         {
@@ -62,7 +62,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was readLong() in Lucene
+        /// NOTE: This was readLong() in Lucene.
         /// </summary>
         private static long ReadInt64(byte[] blocks, int blocksOffset)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Direct16.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Direct16.cs b/src/Lucene.Net/Util/Packed/Direct16.cs
index a6088ac..42df7e3 100644
--- a/src/Lucene.Net/Util/Packed/Direct16.cs
+++ b/src/Lucene.Net/Util/Packed/Direct16.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Direct wrapping of 16-bits values to a backing array.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Direct16 : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Direct32.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Direct32.cs b/src/Lucene.Net/Util/Packed/Direct32.cs
index 6ecb585..4c7da41 100644
--- a/src/Lucene.Net/Util/Packed/Direct32.cs
+++ b/src/Lucene.Net/Util/Packed/Direct32.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Direct wrapping of 32-bits values to a backing array.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Direct32 : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Direct64.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Direct64.cs b/src/Lucene.Net/Util/Packed/Direct64.cs
index b48c8d7..f4b0b2f 100644
--- a/src/Lucene.Net/Util/Packed/Direct64.cs
+++ b/src/Lucene.Net/Util/Packed/Direct64.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Direct wrapping of 64-bits values to a backing array.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Direct64 : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Direct8.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Direct8.cs b/src/Lucene.Net/Util/Packed/Direct8.cs
index d2345dc..903ac4e 100644
--- a/src/Lucene.Net/Util/Packed/Direct8.cs
+++ b/src/Lucene.Net/Util/Packed/Direct8.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Direct wrapping of 8-bits values to a backing array.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Direct8 : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs b/src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs
index 0e62206..83bbe9d 100644
--- a/src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs
+++ b/src/Lucene.Net/Util/Packed/EliasFanoDecoder.cs
@@ -22,13 +22,14 @@ namespace Lucene.Net.Util.Packed
      */
 
     /// <summary>
-    /// A decoder for an <seealso cref="EliasFanoEncoder"/>.
+    /// A decoder for an <see cref="Packed.EliasFanoEncoder"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class EliasFanoDecoder
     {
         /// <summary>
-        /// NOTE: This was LOG2_LONG_SIZE in Lucene
+        /// NOTE: This was LOG2_LONG_SIZE in Lucene.
         /// </summary>
         private static readonly int LOG2_INT64_SIZE = Number.NumberOfTrailingZeros((sizeof(long) * 8));
 
@@ -43,7 +44,7 @@ namespace Lucene.Net.Util.Packed
         private readonly long indexMask;
 
         /// <summary>
-        /// Construct a decoder for a given <seealso cref="EliasFanoEncoder"/>.
+        /// Construct a decoder for a given <see cref="Packed.EliasFanoEncoder"/>.
         /// The decoding index is set to just before the first encoded value.
         /// </summary>
         public EliasFanoDecoder(EliasFanoEncoder efEncoder)
@@ -73,13 +74,13 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// The current decoding index.
-        /// The first value encoded by <seealso cref="EliasFanoEncoder#encodeNext"/> has index 0.
+        /// The first value encoded by <see cref="EliasFanoEncoder.EncodeNext(long)"/> has index 0.
         /// Only valid directly after
-        /// <seealso cref="#nextValue"/>, <seealso cref="#advanceToValue"/>,
-        /// <seealso cref="#previousValue"/>, or <seealso cref="#backToValue"/>
-        /// returned another value than <seealso cref="#NO_MORE_VALUES"/>,
-        /// or <seealso cref="#advanceToIndex"/> returned true. </summary>
-        /// <returns> The decoding index of the last decoded value, or as last set by <seealso cref="#advanceToIndex"/>. </returns>
+        /// <see cref="NextValue()"/>, <see cref="AdvanceToValue(long)"/>,
+        /// <see cref="PreviousValue()"/>, or <see cref="BackToValue(long)"/>
+        /// returned another value than <see cref="NO_MORE_VALUES"/>,
+        /// or <see cref="AdvanceToIndex(long)"/> returned <c>true</c>. </summary>
+        /// <returns> The decoding index of the last decoded value, or as last set by <see cref="AdvanceToIndex(long)"/>. </returns>
         public virtual long CurrentIndex()
         {
             if (efIndex < 0)
@@ -95,9 +96,10 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// The value at the current decoding index.
-        /// Only valid when <seealso cref="#currentIndex"/> would return a valid result.
-        /// <br>this is only intended for use after <seealso cref="#advanceToIndex"/> returned true. </summary>
-        /// <returns> The value encoded at <seealso cref="#currentIndex"/>. </returns>
+        /// Only valid when <see cref="CurrentIndex()"/> would return a valid result.
+        /// <para/>
+        /// This is only intended for use after <see cref="AdvanceToIndex(long)"/> returned <c>true</c>. </summary>
+        /// <returns> The value encoded at <see cref="CurrentIndex()"/>. </returns>
         public virtual long CurrentValue()
         {
             return CombineHighLowValues(CurrentHighValue(), CurrentLowValue());
@@ -110,7 +112,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// See also <seealso cref="EliasFanoEncoder#packValue"/> </summary>
+        /// See also <see cref="EliasFanoEncoder.PackValue(long, long[], int, long)"/> </summary>
         private static long UnPackValue(long[] longArray, int numBits, long packIndex, long bitsMask)
         {
             if (numBits == 0)
@@ -136,8 +138,8 @@ namespace Lucene.Net.Util.Packed
             return UnPackValue(efEncoder.lowerLongs, efEncoder.numLowBits, efIndex, efEncoder.lowerBitsMask);
         }
 
-        ///  <returns> The given highValue shifted left by the number of low bits from by the EliasFanoSequence,
-        ///           logically OR-ed with the given lowValue. </returns>
+        ///  <returns> The given <paramref name="highValue"/> shifted left by the number of low bits from by the EliasFanoSequence,
+        ///           logically OR-ed with the given <paramref name="lowValue"/>. </returns>
         private long CombineHighLowValues(long highValue, long lowValue)
         {
             return (highValue << efEncoder.numLowBits) | lowValue;
@@ -168,7 +170,7 @@ namespace Lucene.Net.Util.Packed
             setBitForIndex = -1;
         }
 
-        /// <returns> the number of bits in a long after (setBitForIndex modulo Long.SIZE) </returns>
+        /// <returns> The number of bits in a <see cref="long"/> after (<see cref="setBitForIndex"/> modulo <c>sizeof(long)</c>). </returns>
         private int CurrentRightShift
         {
             get
@@ -179,9 +181,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Increment efIndex and setBitForIndex and
-        /// shift curHighLong so that it does not contain the high bits before setBitForIndex. </summary>
-        /// <returns> true iff efIndex still smaller than numEncoded. </returns>
+        /// Increment <see cref="efIndex"/> and <see cref="setBitForIndex"/> and
+        /// shift <see cref="curHighLong"/> so that it does not contain the high bits before <see cref="setBitForIndex"/>. </summary>
+        /// <returns> <c>true</c> if <see cref="efIndex"/> still smaller than <see cref="numEncoded"/>. </returns>
         private bool ToAfterCurrentHighBit()
         {
             efIndex += 1;
@@ -197,9 +199,9 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// The current high long has been determined to not contain the set bit that is needed.
-        /// Increment setBitForIndex to the next high long and set curHighLong accordingly.
+        /// Increment <see cref="setBitForIndex"/> to the next high long and set <see cref="curHighLong"/> accordingly.
         /// <para/>
-        /// NOTE: this was toNextHighLong() in Lucene
+        /// NOTE: this was toNextHighLong() in Lucene.
         /// </summary>
         private void ToNextHighInt64()
         {
@@ -210,8 +212,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// setBitForIndex and efIndex have just been incremented, scan to the next high set bit
-        ///  by incrementing setBitForIndex, and by setting curHighLong accordingly.
+        /// <see cref="setBitForIndex"/> and <see cref="efIndex"/> have just been incremented, scan to the next high set bit
+        /// by incrementing <see cref="setBitForIndex"/>, and by setting <see cref="curHighLong"/> accordingly.
         /// </summary>
         private void ToNextHighValue()
         {
@@ -223,9 +225,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// setBitForIndex and efIndex have just been incremented, scan to the next high set bit
-        ///  by incrementing setBitForIndex, and by setting curHighLong accordingly. </summary>
-        ///  <returns> the next encoded high value. </returns>
+        /// <see cref="setBitForIndex"/> and <see cref="efIndex"/> have just been incremented, scan to the next high set bit
+        /// by incrementing <see cref="setBitForIndex"/>, and by setting <see cref="curHighLong"/> accordingly. </summary>
+        /// <returns> The next encoded high value. </returns>
         private long NextHighValue()
         {
             ToNextHighValue();
@@ -234,7 +236,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// If another value is available after the current decoding index, return this value and
-        /// and increase the decoding index by 1. Otherwise return <seealso cref="#NO_MORE_VALUES"/>.
+        /// and increase the decoding index by 1. Otherwise return <see cref="NO_MORE_VALUES"/>.
         /// </summary>
         public virtual long NextValue()
         {
@@ -247,11 +249,11 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Advance the decoding index to a given index.
-        /// and return <code>true</code> iff it is available.
-        /// <br>See also <seealso cref="#currentValue"/>.
-        /// <br>The current implementation does not use the index on the upper bit zero bit positions.
-        /// <br>Note: there is currently no implementation of <code>backToIndex</code>.
+        /// Advance the decoding index to a given <paramref name="index"/>.
+        /// and return <c>true</c> iff it is available.
+        /// <para/>See also <see cref="CurrentValue()"/>.
+        /// <para/>The current implementation does not use the index on the upper bit zero bit positions.
+        /// <para/>Note: there is currently no implementation of <c>BackToIndex()</c>.
         /// </summary>
         public virtual bool AdvanceToIndex(long index)
         {
@@ -289,9 +291,10 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Given a target value, advance the decoding index to the first bigger or equal value
-        /// and return it if it is available. Otherwise return <seealso cref="#NO_MORE_VALUES"/>.
-        /// <br>The current implementation uses the index on the upper zero bit positions.
+        /// Given a <paramref name="target"/> value, advance the decoding index to the first bigger or equal value
+        /// and return it if it is available. Otherwise return <see cref="NO_MORE_VALUES"/>.
+        /// <para/>
+        /// The current implementation uses the index on the upper zero bit positions.
         /// </summary>
         public virtual long AdvanceToValue(long target)
         {
@@ -425,7 +428,7 @@ namespace Lucene.Net.Util.Packed
             setBitForIndex = ((long)((ulong)efEncoder.lastEncoded >> efEncoder.numLowBits)) + numEncoded;
         }
 
-        /// <returns> the number of bits in a long before (setBitForIndex modulo Long.SIZE) </returns>
+        /// <returns> the number of bits in a long before (<see cref="setBitForIndex"/> modulo <c>sizeof(long)</c>) </returns>
         private int CurrentLeftShift
         {
             get
@@ -436,9 +439,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Decrement efindex and setBitForIndex and
-        /// shift curHighLong so that it does not contain the high bits after setBitForIndex. </summary>
-        /// <returns> true iff efindex still >= 0 </returns>
+        /// Decrement <see cref="efIndex"/> and <see cref="setBitForIndex"/> and
+        /// shift <see cref="curHighLong"/> so that it does not contain the high bits after <see cref="setBitForIndex"/>. </summary>
+        /// <returns> <c>true</c> if <see cref="efIndex"/> still >= 0. </returns>
         private bool ToBeforeCurrentHighBit()
         {
             efIndex -= 1;
@@ -454,9 +457,9 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// The current high long has been determined to not contain the set bit that is needed.
-        /// Decrement setBitForIndex to the previous high long and set curHighLong accordingly.
+        /// Decrement <see cref="setBitForIndex"/> to the previous high long and set <see cref="curHighLong"/> accordingly.
         /// <para/>
-        /// NOTE: this was toPreviousHighLong() in Lucene
+        /// NOTE: this was toPreviousHighLong() in Lucene.
         /// </summary>
         private void ToPreviousHighInt64()
         {
@@ -467,9 +470,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// setBitForIndex and efIndex have just been decremented, scan to the previous high set bit
-        ///  by decrementing setBitForIndex and by setting curHighLong accordingly. </summary>
-        ///  <returns> the previous encoded high value. </returns>
+        /// <see cref="setBitForIndex"/> and <see cref="efIndex"/> have just been decremented, scan to the previous high set bit
+        /// by decrementing <see cref="setBitForIndex"/> and by setting <see cref="curHighLong"/> accordingly. </summary>
+        /// <returns> The previous encoded high value. </returns>
         private long PreviousHighValue()
         {
             while (curHighLong == 0L)
@@ -482,7 +485,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// If another value is available before the current decoding index, return this value
-        /// and decrease the decoding index by 1. Otherwise return <seealso cref="#NO_MORE_VALUES"/>.
+        /// and decrease the decoding index by 1. Otherwise return <see cref="NO_MORE_VALUES"/>.
         /// </summary>
         public virtual long PreviousValue()
         {
@@ -495,11 +498,13 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// setBitForIndex and efIndex have just been decremented, scan backward to the high set bit
-        ///  of at most a given high value
-        ///  by decrementing setBitForIndex and by setting curHighLong accordingly.
-        /// <br>The current implementation does not use the index on the upper zero bit positions. </summary>
-        ///  <returns> the largest encoded high value that is at most the given one. </returns>
+        /// <see cref="setBitForIndex"/> and <see cref="efIndex"/> have just been decremented, scan backward to the high set bit
+        /// of at most a given high value
+        /// by decrementing <see cref="setBitForIndex"/> and by setting <see cref="curHighLong"/> accordingly.
+        /// <para/>
+        /// The current implementation does not use the index on the upper zero bit positions. 
+        /// </summary>
+        /// <returns> The largest encoded high value that is at most the given one. </returns>
         private long BackToHighValue(long highTarget)
         {
             /* CHECKME: Add using the index as in advanceToHighValue */
@@ -534,8 +539,9 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Given a target value, go back to the first smaller or equal value
-        /// and return it if it is available. Otherwise return <seealso cref="#NO_MORE_VALUES"/>.
-        /// <br>The current implementation does not use the index on the upper zero bit positions.
+        /// and return it if it is available. Otherwise return <see cref="NO_MORE_VALUES"/>.
+        /// <para/>
+        /// The current implementation does not use the index on the upper zero bit positions.
         /// </summary>
         public virtual long BackToValue(long target)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs b/src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs
index 05e93e7..9f6700f 100644
--- a/src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs
+++ b/src/Lucene.Net/Util/Packed/EliasFanoDocIdSet.cs
@@ -22,6 +22,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// A DocIdSet in Elias-Fano encoding.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class EliasFanoDocIdSet : DocIdSet
@@ -38,11 +39,11 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Provide an indication that is better to use an <seealso cref="EliasFanoDocIdSet"/> than a <seealso cref="FixedBitSet"/>
-        ///  to encode document identifiers. </summary>
-        ///  <param name="numValues"> The number of document identifiers that is to be encoded. Should be non negative. </param>
-        ///  <param name="upperBound"> The maximum possible value for a document identifier. Should be at least <code>numValues</code>. </param>
-        ///  <returns> See <seealso cref="EliasFanoEncoder#sufficientlySmallerThanBitSet(long, long)"/> </returns>
+        /// Provide an indication that is better to use an <see cref="EliasFanoDocIdSet"/> than a <see cref="FixedBitSet"/>
+        /// to encode document identifiers. </summary>
+        /// <param name="numValues"> The number of document identifiers that is to be encoded. Should be non negative. </param>
+        /// <param name="upperBound"> The maximum possible value for a document identifier. Should be at least <paramref name="numValues"/>. </param>
+        /// <returns> See <see cref="EliasFanoEncoder.SufficientlySmallerThanBitSet(long, long)"/> </returns>
         public static bool SufficientlySmallerThanBitSet(long numValues, long upperBound)
         {
             return EliasFanoEncoder.SufficientlySmallerThanBitSet(numValues, upperBound);
@@ -50,8 +51,8 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Encode the document ids from a DocIdSetIterator. </summary>
-        ///  <param name="disi"> this DocIdSetIterator should provide document ids that are consistent
-        ///              with <code>numValues</code> and <code>upperBound</code> as provided to the constructor.   </param>
+        /// <param name="disi"> This DocIdSetIterator should provide document ids that are consistent
+        ///              with <c>numValues</c> and <c>upperBound</c> as provided to the constructor.   </param>
         public virtual void EncodeFromDisi(DocIdSetIterator disi)
         {
             while (efEncoder.numEncoded < efEncoder.numValues)
@@ -66,7 +67,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Provides a <seealso cref="DocIdSetIterator"/> to access encoded document ids.
+        /// Provides a <see cref="DocIdSetIterator"/> to access encoded document ids.
         /// </summary>
         public override DocIdSetIterator GetIterator()
         {
@@ -119,8 +120,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// this DocIdSet implementation is cacheable. </summary>
-        /// <returns> <code>true</code> </returns>
+        /// This DocIdSet implementation is cacheable. </summary>
+        /// <returns> <c>true</c> </returns>
         public override bool IsCacheable
         {
             get

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs b/src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs
index 1ef3f3f..ec17dd5 100644
--- a/src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs
+++ b/src/Lucene.Net/Util/Packed/EliasFanoEncoder.cs
@@ -26,63 +26,64 @@ namespace Lucene.Net.Util.Packed
     /// <summary>
     /// Encode a non decreasing sequence of non negative whole numbers in the Elias-Fano encoding
     /// that was introduced in the 1970's by Peter Elias and Robert Fano.
-    /// <p>
+    /// <para/>
     /// The Elias-Fano encoding is a high bits / low bits representation of
-    /// a monotonically increasing sequence of <code>numValues > 0</code> natural numbers <code>x[i]</code>
-    /// <p>
-    /// <code>0 <= x[0] <= x[1] <= ... <= x[numValues-2] <= x[numValues-1] <= upperBound</code>
-    /// <p>
-    /// where <code>upperBound > 0</code> is an upper bound on the last value.
-    /// <br>
+    /// a monotonically increasing sequence of <c>numValues > 0</c> natural numbers <c>x[i]</c>
+    /// <para/>
+    /// <c>0 &lt;= x[0] &lt;= x[1] &lt;= ... &lt;= x[numValues-2] &lt;= x[numValues-1] &lt;= upperBound</c>
+    /// <para/>
+    /// where <c>upperBound > 0</c> is an upper bound on the last value.
+    /// <para/>
     /// The Elias-Fano encoding uses less than half a bit per encoded number more
     /// than the smallest representation
     /// that can encode any monotone sequence with the same bounds.
-    /// <p>
-    /// The lower <code>L</code> bits of each <code>x[i]</code> are stored explicitly and contiguously
-    /// in the lower-bits array, with <code>L</code> chosen as (<code>log()</code> base 2):
-    /// <p>
-    /// <code>L = max(0, floor(log(upperBound/numValues)))</code>
-    /// <p>
-    /// The upper bits are stored in the upper-bits array as a sequence of unary-coded gaps (<code>x[-1] = 0</code>):
-    /// <p>
-    /// <code>(x[i]/2**L) - (x[i-1]/2**L)</code>
-    /// <p>
-    /// The unary code encodes a natural number <code>n</code> by <code>n</code> 0 bits followed by a 1 bit:
-    /// <code>0...01</code>. <br>
-    /// In the upper bits the total the number of 1 bits is <code>numValues</code>
-    /// and the total number of 0 bits is:<p>
-    /// <code>floor(x[numValues-1]/2**L) <= upperBound/(2**max(0, floor(log(upperBound/numValues)))) <= 2*numValues</code>
-    /// <p>
+    /// <para/>
+    /// The lower <c>L</c> bits of each <c>x[i]</c> are stored explicitly and contiguously
+    /// in the lower-bits array, with <c>L</c> chosen as (<c>Log()</c> base 2):
+    /// <para/>
+    /// <c>L = max(0, floor(log(upperBound/numValues)))</c>
+    /// <para/>
+    /// The upper bits are stored in the upper-bits array as a sequence of unary-coded gaps (<c>x[-1] = 0</c>):
+    /// <para/>
+    /// <c>(x[i]/2**L) - (x[i-1]/2**L)</c>
+    /// <para/>
+    /// The unary code encodes a natural number <c>n</c> by <c>n</c> 0 bits followed by a 1 bit:
+    /// <c>0...01</c>. 
+    /// <para/>
+    /// In the upper bits the total the number of 1 bits is <c>numValues</c>
+    /// and the total number of 0 bits is:
+    /// <para/>
+    /// <c>floor(x[numValues-1]/2**L) &lt;= upperBound/(2**max(0, floor(log(upperBound/numValues)))) &lt;= 2*numValues</c>
+    /// <para/>
     /// The Elias-Fano encoding uses at most
-    /// <p>
-    /// <code>2 + ceil(log(upperBound/numValues))</code>
-    /// <p>
-    /// bits per encoded number. With <code>upperBound</code> in these bounds (<code>p</code> is an integer):
-    /// <p>
-    /// <code>2**p < x[numValues-1] <= upperBound <= 2**(p+1)</code>
-    /// <p>
+    /// <para/>
+    /// <c>2 + Ceil(Log(upperBound/numValues))</c>
+    /// <para/>
+    /// bits per encoded number. With <c>upperBound</c> in these bounds (<c>p</c> is an integer):
+    /// <para/>
+    /// <c>2**p &lt; x[numValues-1] &lt;= upperBound &lt;= 2**(p+1)</c>
+    /// <para/>
     /// the number of bits per encoded number is minimized.
-    /// <p>
-    /// In this implementation the values in the sequence can be given as <code>long</code>,
-    /// <code>numValues = 0</code> and <code>upperBound = 0</code> are allowed,
-    /// and each of the upper and lower bit arrays should fit in a <code>long[]</code>.
-    /// <br>
+    /// <para/>
+    /// In this implementation the values in the sequence can be given as <c>long</c>,
+    /// <c>numValues = 0</c> and <c>upperBound = 0</c> are allowed,
+    /// and each of the upper and lower bit arrays should fit in a <c>long[]</c>.
+    /// <para/>
     /// An index of positions of zero's in the upper bits is also built.
-    /// <p>
+    /// <para/>
     /// this implementation is based on this article:
-    /// <br>
+    /// <para/>
     /// Sebastiano Vigna, "Quasi Succinct Indices", June 19, 2012, sections 3, 4 and 9.
     /// Retrieved from http://arxiv.org/pdf/1206.4300 .
     ///
-    /// <p>The articles originally describing the Elias-Fano representation are:
-    /// <br>Peter Elias, "Efficient storage and retrieval by content and address of static files",
+    /// <para/>The articles originally describing the Elias-Fano representation are:
+    /// <para/>Peter Elias, "Efficient storage and retrieval by content and address of static files",
     /// J. Assoc. Comput. Mach., 21(2):246�"260, 1974.
-    /// <br>Robert M. Fano, "On the number of bits required to implement an associative memory",
+    /// <para/>Robert M. Fano, "On the number of bits required to implement an associative memory",
     ///  Memorandum 61, Computer Structures Group, Project MAC, MIT, Cambridge, Mass., 1971.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
-
     public class EliasFanoEncoder
     {
         internal readonly long numValues;
@@ -93,7 +94,7 @@ namespace Lucene.Net.Util.Packed
         internal readonly long[] lowerLongs;
 
         /// <summary>
-        /// NOTE: This was LOG2_LONG_SIZE in Lucene
+        /// NOTE: This was LOG2_LONG_SIZE in Lucene.
         /// </summary>
         private static readonly int LOG2_INT64_SIZE = Number.NumberOfTrailingZeros(sizeof(long) * 8);
 
@@ -110,7 +111,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// upperZeroBitPositionIndex[i] (filled using packValue) will contain the bit position
-        ///  just after the zero bit ((i+1) * indexInterval) in the upper bits.
+        /// just after the zero bit ((i+1) * indexInterval) in the upper bits.
         /// </summary>
         internal readonly long[] upperZeroBitPositionIndex;
 
@@ -118,30 +119,31 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Construct an Elias-Fano encoder.
-        /// After construction, call <seealso cref="#encodeNext"/> <code>numValues</code> times to encode
-        /// a non decreasing sequence of non negative numbers. </summary>
+        /// After construction, call <see cref="EncodeNext(long)"/> <paramref name="numValues"/> times to encode
+        /// a non decreasing sequence of non negative numbers. 
+        /// </summary>
         /// <param name="numValues"> The number of values that is to be encoded. </param>
         /// <param name="upperBound">  At least the highest value that will be encoded.
         ///                For space efficiency this should not exceed the power of two that equals
         ///                or is the first higher than the actual maximum.
-        ///                <br>When <code>numValues >= (upperBound/3)</code>
-        ///                a <seealso cref="FixedBitSet"/> will take less space. </param>
+        ///                <para/>When <c>numValues >= (upperBound/3)</c>
+        ///                a <see cref="FixedBitSet"/> will take less space. </param>
         /// <param name="indexInterval"> The number of high zero bits for which a single index entry is built.
-        ///                The index will have at most <code>2 * numValues / indexInterval</code> entries
-        ///                and each index entry will use at most <code>ceil(log2(3 * numValues))</code> bits,
-        ///                see <seealso cref="EliasFanoEncoder"/>. </param>
-        /// <exception cref="IllegalArgumentException"> when:
-        ///         <ul>
-        ///         <li><code>numValues</code> is negative, or
-        ///         <li><code>numValues</code> is non negative and <code>upperBound</code> is negative, or
-        ///         <li>the low bits do not fit in a <code>long[]</code>:
-        ///             <code>(L * numValues / 64) > System.Int32.MaxValue</code>, or
-        ///         <li>the high bits do not fit in a <code>long[]</code>:
-        ///             <code>(2 * numValues / 64) > System.Int32.MaxValue</code>, or
-        ///         <li><code>indexInterval < 2</code>,
-        ///         <li>the index bits do not fit in a <code>long[]</code>:
-        ///             <code>(numValues / indexInterval * ceil(2log(3 * numValues)) / 64) > System.Int32.MaxValue</code>.
-        ///         </ul> </exception>
+        ///                The index will have at most <c>2 * numValues / indexInterval</c> entries
+        ///                and each index entry will use at most <c>Ceil(Log2(3 * numValues))</c> bits,
+        ///                see <see cref="EliasFanoEncoder"/>. </param>
+        /// <exception cref="ArgumentException"> when:
+        ///         <list type="bullet">
+        ///         <item><description><paramref name="numValues"/> is negative, or</description></item>
+        ///         <item><description><paramref name="numValues"/> is non negative and <paramref name="upperBound"/> is negative, or</description></item>
+        ///         <item><description>the low bits do not fit in a <c>long[]</c>:
+        ///             <c>(L * numValues / 64) > System.Int32.MaxValue</c>, or</description></item>
+        ///         <item><description>the high bits do not fit in a <c>long[]</c>:
+        ///             <c>(2 * numValues / 64) > System.Int32.MaxValue</c>, or</description></item>
+        ///         <item><description><c>indexInterval &lt; 2</c>,</description></item>
+        ///         <item><description>the index bits do not fit in a <c>long[]</c>:
+        ///             <c>(numValues / indexInterval * ceil(2log(3 * numValues)) / 64) > System.Int32.MaxValue</c>.</description></item>
+        ///         </list> </exception>
         public EliasFanoEncoder(long numValues, long upperBound, long indexInterval)
         {
             if (numValues < 0L)
@@ -204,7 +206,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Construct an Elias-Fano encoder using <seealso cref="#DEFAULT_INDEX_INTERVAL"/>.
+        /// Construct an Elias-Fano encoder using <see cref="DEFAULT_INDEX_INTERVAL"/>.
         /// </summary>
         public EliasFanoEncoder(long numValues, long upperBound)
             : this(numValues, upperBound, DEFAULT_INDEX_INTERVAL)
@@ -212,7 +214,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// NOTE: This was numLongsForBits() in Lucene
+        /// NOTE: This was numLongsForBits() in Lucene.
         /// </summary>
         private static long NumInt64sForBits(long numBits) // Note: int version in FixedBitSet.bits2words()
         {
@@ -221,14 +223,14 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Call at most <code>numValues</code> times to encode a non decreasing sequence of non negative numbers. </summary>
+        /// Call at most <see cref="numValues"/> times to encode a non decreasing sequence of non negative numbers. </summary>
         /// <param name="x"> The next number to be encoded. </param>
-        /// <exception cref="IllegalStateException"> when called more than <code>numValues</code> times. </exception>
-        /// <exception cref="IllegalArgumentException"> when:
-        ///         <ul>
-        ///         <li><code>x</code> is smaller than an earlier encoded value, or
-        ///         <li><code>x</code> is larger than <code>upperBound</code>.
-        ///         </ul> </exception>
+        /// <exception cref="InvalidOperationException"> when called more than <see cref="numValues"/> times. </exception>
+        /// <exception cref="ArgumentException"> when:
+        ///         <list type="bullet">
+        ///         <item><description><paramref name="x"/> is smaller than an earlier encoded value, or</description></item>
+        ///         <item><description><paramref name="x"/> is larger than <see cref="upperBound"/>.</description></item>
+        ///         </list> </exception>
         public virtual void EncodeNext(long x)
         {
             if (numEncoded >= numValues)
@@ -286,17 +288,18 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Provide an indication that it is better to use an <seealso cref="EliasFanoEncoder"/> than a <seealso cref="FixedBitSet"/>
-        ///  to encode document identifiers.
-        ///  this indication is not precise and may change in the future.
-        ///  <br>An EliasFanoEncoder is favoured when the size of the encoding by the EliasFanoEncoder
-        ///  (including some space for its index) is at most about 5/6 of the size of the FixedBitSet,
-        ///  this is the same as comparing estimates of the number of bits accessed by a pair of FixedBitSets and
-        ///  by a pair of non indexed EliasFanoDocIdSets when determining the intersections of the pairs.
-        ///  <br>A bit set is preferred when <code>upperbound <= 256</code>.
-        ///  <br>It is assumed that <seealso cref="#DEFAULT_INDEX_INTERVAL"/> is used. </summary>
-        ///  <param name="numValues"> The number of document identifiers that is to be encoded. Should be non negative. </param>
-        ///  <param name="upperBound"> The maximum possible value for a document identifier. Should be at least <code>numValues</code>. </param>
+        /// Provide an indication that it is better to use an <see cref="EliasFanoEncoder"/> than a <see cref="FixedBitSet"/>
+        /// to encode document identifiers.
+        /// This indication is not precise and may change in the future.
+        /// <para/>An <see cref="EliasFanoEncoder"/> is favored when the size of the encoding by the <see cref="EliasFanoEncoder"/>
+        /// (including some space for its index) is at most about 5/6 of the size of the <see cref="FixedBitSet"/>,
+        /// this is the same as comparing estimates of the number of bits accessed by a pair of <see cref="FixedBitSet"/>s and
+        /// by a pair of non indexed <see cref="EliasFanoDocIdSet"/>s when determining the intersections of the pairs.
+        /// <para/>A bit set is preferred when <c>upperbound &lt;= 256</c>.
+        /// <para/>It is assumed that <see cref="DEFAULT_INDEX_INTERVAL"/> is used. 
+        /// </summary>
+        /// <param name="numValues"> The number of document identifiers that is to be encoded. Should be non negative. </param>
+        /// <param name="upperBound"> The maximum possible value for a document identifier. Should be at least <paramref name="numValues"/>. </param>
         public static bool SufficientlySmallerThanBitSet(long numValues, long upperBound)
         {
             /* When (upperBound / 6) == numValues,
@@ -310,8 +313,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Returns an <seealso cref="EliasFanoDecoder"/> to access the encoded values.
-        /// Perform all calls to <seealso cref="#encodeNext"/> before calling <seealso cref="#getDecoder"/>.
+        /// Returns an <see cref="EliasFanoDecoder"/> to access the encoded values.
+        /// Perform all calls to <see cref="EncodeNext(long)"/> before calling <see cref="GetDecoder()"/>.
         /// </summary>
         public virtual EliasFanoDecoder GetDecoder()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/GrowableWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/GrowableWriter.cs b/src/Lucene.Net/Util/Packed/GrowableWriter.cs
index 34da357..9e8130c 100644
--- a/src/Lucene.Net/Util/Packed/GrowableWriter.cs
+++ b/src/Lucene.Net/Util/Packed/GrowableWriter.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Util.Packed
     using DataOutput = Lucene.Net.Store.DataOutput;
 
     /// <summary>
-    /// Implements <seealso cref="PackedInt32s.Mutable"/>, but grows the
+    /// Implements <see cref="PackedInt32s.Mutable"/>, but grows the
     /// bit count of the underlying packed ints on-demand.
-    /// <p>Beware that this class will accept to set negative values but in order
+    /// <para/>Beware that this class will accept to set negative values but in order
     /// to do this, it will grow the number of bits per value to 64.
-    ///
-    /// <p>@lucene.internal</p>
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
     public class GrowableWriter : PackedInt32s.Mutable
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/MonotonicAppendingLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/MonotonicAppendingLongBuffer.cs b/src/Lucene.Net/Util/Packed/MonotonicAppendingLongBuffer.cs
index bfbaee3..7a7415c 100644
--- a/src/Lucene.Net/Util/Packed/MonotonicAppendingLongBuffer.cs
+++ b/src/Lucene.Net/Util/Packed/MonotonicAppendingLongBuffer.cs
@@ -26,8 +26,8 @@ namespace Lucene.Net.Util.Packed
     /// case where the sequence is monotonic, although it can encode any sequence of
     /// arbitrary longs. It only supports appending.
     /// <para/>
-    /// NOTE: This was MonotonicAppendingLongBuffer in Lucene
-    /// 
+    /// NOTE: This was MonotonicAppendingLongBuffer in Lucene.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class MonotonicAppendingInt64Buffer : AbstractAppendingInt64Buffer
@@ -45,9 +45,9 @@ namespace Lucene.Net.Util.Packed
         internal float[] averages;
         internal long[] minValues;
 
-        /// <param name="initialPageCount">        the initial number of pages </param>
-        /// <param name="pageSize">                the size of a single page </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio per value </param>
+        /// <param name="initialPageCount">        The initial number of pages. </param>
+        /// <param name="pageSize">                The size of a single page. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio per value. </param>
         public MonotonicAppendingInt64Buffer(int initialPageCount, int pageSize, float acceptableOverheadRatio)
             : base(initialPageCount, pageSize, acceptableOverheadRatio)
         {
@@ -56,8 +56,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Create an <seealso cref="MonotonicAppendingInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024 and acceptableOverheadRatio=<seealso cref="PackedInt32s#DEFAULT"/>
+        /// Create an <see cref="MonotonicAppendingInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024 and acceptableOverheadRatio=<see cref="PackedInt32s.DEFAULT"/>.
         /// </summary>
         public MonotonicAppendingInt64Buffer()
             : this(16, 1024, PackedInt32s.DEFAULT)
@@ -65,8 +65,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Create an <seealso cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
-        /// pageSize=1024
+        /// Create an <see cref="AppendingDeltaPackedInt64Buffer"/> with initialPageCount=16,
+        /// pageSize=1024.
         /// </summary>
         public MonotonicAppendingInt64Buffer(float acceptableOverheadRatio)
             : this(16, 1024, acceptableOverheadRatio)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/MonotonicBlockPackedReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/MonotonicBlockPackedReader.cs b/src/Lucene.Net/Util/Packed/MonotonicBlockPackedReader.cs
index 45c1b8f..f93f414 100644
--- a/src/Lucene.Net/Util/Packed/MonotonicBlockPackedReader.cs
+++ b/src/Lucene.Net/Util/Packed/MonotonicBlockPackedReader.cs
@@ -24,7 +24,8 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Provides random access to a stream written with
-    /// <seealso cref="MonotonicBlockPackedWriter"/>.
+    /// <see cref="MonotonicBlockPackedWriter"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class MonotonicBlockPackedReader : Int64Values
@@ -87,6 +88,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Returns the number of values.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public long Count
@@ -95,7 +97,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Returns the approximate RAM bytes used </summary>
+        /// Returns the approximate RAM bytes used. </summary>
         public long RamBytesUsed()
         {
             long sizeInBytes = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/MonotonicBlockPackedWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/MonotonicBlockPackedWriter.cs b/src/Lucene.Net/Util/Packed/MonotonicBlockPackedWriter.cs
index 01d6982..fb31aa2 100644
--- a/src/Lucene.Net/Util/Packed/MonotonicBlockPackedWriter.cs
+++ b/src/Lucene.Net/Util/Packed/MonotonicBlockPackedWriter.cs
@@ -24,39 +24,41 @@ namespace Lucene.Net.Util.Packed
     using DataOutput = Lucene.Net.Store.DataOutput;
 
     /// <summary>
-    /// A writer for large monotonically increasing sequences of positive longs.
-    /// <p>
+    /// A writer for large monotonically increasing sequences of positive <see cref="long"/>s.
+    /// <para/>
     /// The sequence is divided into fixed-size blocks and for each block, values
-    /// are modeled after a linear function f: x &rarr; A &times; x + B. The block
+    /// are modeled after a linear function f: x &#8594; A &#215; x + B. The block
     /// encodes deltas from the expected values computed from this function using as
     /// few bits as possible. Each block has an overhead between 6 and 14 bytes.
-    /// <p>
+    /// <para/>
     /// Format:
-    /// <ul>
-    /// <li>&lt;BLock&gt;<sup>BlockCount</sup>
-    /// <li>BlockCount: &lceil; ValueCount / BlockSize &rceil;
-    /// <li>Block: &lt;Header, (Ints)&gt;
-    /// <li>Header: &lt;B, A, BitsPerValue&gt;
-    /// <li>B: the B from f: x &rarr; A &times; x + B using a
-    ///     <seealso cref="DataOutput#writeVLong(long) variable-length long"/>
-    /// <li>A: the A from f: x &rarr; A &times; x + B encoded using
-    ///     <seealso cref="Float#floatToIntBits(float)"/> on
-    ///     <seealso cref="DataOutput#writeInt(int) 4 bytes"/>
-    /// <li>BitsPerValue: a <seealso cref="DataOutput#writeVInt(int) variable-length int"/>
-    /// <li>Ints: if BitsPerValue is <tt>0</tt>, then there is nothing to read and
+    /// <list type="bullet">
+    /// <item><description>&lt;BLock&gt;<sup>BlockCount</sup></description></item>
+    /// <item><description>BlockCount: &#8968; ValueCount / BlockSize &#8969;</description></item>
+    /// <item><description>Block: &lt;Header, (Ints)&gt;</description></item>
+    /// <item><description>Header: &lt;B, A, BitsPerValue&gt;</description></item>
+    /// <item><description>B: the B from f: x &#8594; A &#215; x + B using a
+    ///     variable-length <see cref="long"/> (<see cref="DataOutput.WriteVInt64(long)"/>)</description></item>
+    /// <item><description>A: the A from f: x &#8594; A &#215; x + B encoded using
+    ///     <see cref="Support.Number.SingleToInt32Bits(float)"/> on
+    ///     4 bytes (<see cref="DataOutput.WriteVInt32(int)"/>)</description></item>
+    /// <item><description>BitsPerValue: a variable-length <see cref="int"/> (<see cref="DataOutput.WriteVInt32(int)"/>)</description></item>
+    /// <item><description>Ints: if BitsPerValue is <c>0</c>, then there is nothing to read and
     ///     all values perfectly match the result of the function. Otherwise, these
     ///     are the
     ///     <a href="https://developers.google.com/protocol-buffers/docs/encoding#types">zigzag-encoded</a>
-    ///     <seealso cref="PackedInt32s packed"/> deltas from the expected value (computed from
-    ///     the function) using exaclty BitsPerValue bits per value
-    /// </ul> </summary>
-    /// <seealso cref= MonotonicBlockPackedReader
-    /// @lucene.internal </seealso>
+    ///     packed (<see cref="PackedInt32s"/>) deltas from the expected value (computed from
+    ///     the function) using exaclty BitsPerValue bits per value</description></item>
+    /// </list> 
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+    /// <seealso cref="MonotonicBlockPackedReader"/>
     public sealed class MonotonicBlockPackedWriter : AbstractBlockPackedWriter
     {
         /// <summary>
         /// Sole constructor. </summary>
-        /// <param name="blockSize"> the number of values of a single block, must be a power of 2 </param>
+        /// <param name="blockSize"> The number of values of a single block, must be a power of 2. </param>
         public MonotonicBlockPackedWriter(DataOutput @out, int blockSize)
             : base(@out, blockSize)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Packed16ThreeBlocks.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Packed16ThreeBlocks.cs b/src/Lucene.Net/Util/Packed/Packed16ThreeBlocks.cs
index af0d9e9..a152c90 100644
--- a/src/Lucene.Net/Util/Packed/Packed16ThreeBlocks.cs
+++ b/src/Lucene.Net/Util/Packed/Packed16ThreeBlocks.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Packs integers into 3 shorts (48 bits per value).
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Packed16ThreeBlocks : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Packed64.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Packed64.cs b/src/Lucene.Net/Util/Packed/Packed64.cs
index 301d26e..107e894 100644
--- a/src/Lucene.Net/Util/Packed/Packed64.cs
+++ b/src/Lucene.Net/Util/Packed/Packed64.cs
@@ -26,11 +26,11 @@ namespace Lucene.Net.Util.Packed
     /// <summary>
     /// Space optimized random access capable array of values with a fixed number of
     /// bits/value. Values are packed contiguously.
-    /// </p><p>
+    /// <para/>
     /// The implementation strives to perform af fast as possible under the
-    /// constraint of contiguous bits, by avoiding expensive operations. this comes
+    /// constraint of contiguous bits, by avoiding expensive operations. This comes
     /// at the cost of code clarity.
-    /// </p><p>
+    /// <para/>
     /// Technical details: this implementation is a refinement of a non-branching
     /// version. The non-branching get and set methods meant that 2 or 4 atomics in
     /// the underlying array were always accessed, even for the cases where only
@@ -40,7 +40,6 @@ namespace Lucene.Net.Util.Packed
     /// and masks, which also proved to be a bit slower than calculating the shifts
     /// and masks on the fly.
     /// See https://issues.apache.org/jira/browse/LUCENE-4062 for details.
-    ///
     /// </summary>
     public class Packed64 : PackedInt32s.MutableImpl
     {
@@ -54,20 +53,20 @@ namespace Lucene.Net.Util.Packed
         private readonly long[] blocks;
 
         /// <summary>
-        /// A right-aligned mask of width BitsPerValue used by <seealso cref="#get(int)"/>.
+        /// A right-aligned mask of width BitsPerValue used by <see cref="Get(int)"/>.
         /// </summary>
         private readonly long maskRight;
 
         /// <summary>
-        /// Optimization: Saves one lookup in <seealso cref="#get(int)"/>.
+        /// Optimization: Saves one lookup in <see cref="Get(int)"/>.
         /// </summary>
         private readonly int bpvMinusBlockSize;
 
         /// <summary>
         /// Creates an array with the internal structures adjusted for the given
         /// limits and initialized to 0. </summary>
-        /// <param name="valueCount">   the number of elements. </param>
-        /// <param name="bitsPerValue"> the number of bits available for any given value. </param>
+        /// <param name="valueCount">   The number of elements. </param>
+        /// <param name="bitsPerValue"> The number of bits available for any given value. </param>
         public Packed64(int valueCount, int bitsPerValue)
             : base(valueCount, bitsPerValue)
         {
@@ -89,11 +88,11 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Creates an array with content retrieved from the given DataInput. </summary>
-        /// <param name="in">       a DataInput, positioned at the start of Packed64-content. </param>
-        /// <param name="valueCount">  the number of elements. </param>
-        /// <param name="bitsPerValue"> the number of bits available for any given value. </param>
-        /// <exception cref="java.io.IOException"> if the values for the backing array could not
+        /// Creates an array with content retrieved from the given <see cref="DataInput"/>. </summary>
+        /// <param name="in">       A <see cref="DataInput"/>, positioned at the start of Packed64-content. </param>
+        /// <param name="valueCount">  The number of elements. </param>
+        /// <param name="bitsPerValue"> The number of bits available for any given value. </param>
+        /// <exception cref="System.IO.IOException"> If the values for the backing array could not
         ///                             be retrieved. </exception>
         public Packed64(int packedIntsVersion, DataInput @in, int valueCount, int bitsPerValue)
             : base(valueCount, bitsPerValue)
@@ -122,8 +121,8 @@ namespace Lucene.Net.Util.Packed
             bpvMinusBlockSize = bitsPerValue - BLOCK_SIZE;
         }
 
-        /// <param name="index"> the position of the value. </param>
-        /// <returns> the value at the given index. </returns>
+        /// <param name="index"> The position of the value. </param>
+        /// <returns> The value at the given index. </returns>
         public override long Get(int index)
         {
             // The abstract index in a bit stream

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Packed64SingleBlock.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Packed64SingleBlock.cs b/src/Lucene.Net/Util/Packed/Packed64SingleBlock.cs
index ce4f256..94fff8b 100644
--- a/src/Lucene.Net/Util/Packed/Packed64SingleBlock.cs
+++ b/src/Lucene.Net/Util/Packed/Packed64SingleBlock.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Util.Packed
     using DataInput = Lucene.Net.Store.DataInput;
 
     /// <summary>
-    /// this class is similar to <seealso cref="Packed64"/> except that it trades space for
+    /// This class is similar to <see cref="Packed64"/> except that it trades space for
     /// speed by ensuring that a single block needs to be read/written in order to
     /// read/write a value.
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/Packed8ThreeBlocks.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/Packed8ThreeBlocks.cs b/src/Lucene.Net/Util/Packed/Packed8ThreeBlocks.cs
index 9643a60..5fe7079 100644
--- a/src/Lucene.Net/Util/Packed/Packed8ThreeBlocks.cs
+++ b/src/Lucene.Net/Util/Packed/Packed8ThreeBlocks.cs
@@ -27,6 +27,7 @@ namespace Lucene.Net.Util.Packed
 
     /// <summary>
     /// Packs integers into 3 bytes (24 bits per value).
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     internal sealed class Packed8ThreeBlocks : PackedInt32s.MutableImpl

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/PackedDataInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/PackedDataInput.cs b/src/Lucene.Net/Util/Packed/PackedDataInput.cs
index 6dd5694..0764ec7 100644
--- a/src/Lucene.Net/Util/Packed/PackedDataInput.cs
+++ b/src/Lucene.Net/Util/Packed/PackedDataInput.cs
@@ -23,11 +23,13 @@ namespace Lucene.Net.Util.Packed
     using DataInput = Lucene.Net.Store.DataInput;
 
     /// <summary>
-    /// A <seealso cref="DataInput"/> wrapper to read unaligned, variable-length packed
-    /// integers. this API is much slower than the <seealso cref="PackedInt32s"/> fixed-length
-    /// API but can be convenient to save space. </summary>
-    /// <seealso cref= PackedDataOutput
-    /// @lucene.internal </seealso>
+    /// A <see cref="DataInput"/> wrapper to read unaligned, variable-length packed
+    /// integers. This API is much slower than the <see cref="PackedInt32s"/> fixed-length
+    /// API but can be convenient to save space. 
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+    /// <seealso cref="PackedDataOutput"/>
     public sealed class PackedDataInput
     {
         internal readonly DataInput @in;
@@ -35,7 +37,7 @@ namespace Lucene.Net.Util.Packed
         internal int remainingBits;
 
         /// <summary>
-        /// Create a new instance that wraps <code>in</code>.
+        /// Create a new instance that wraps <paramref name="in"/>.
         /// </summary>
         public PackedDataInput(DataInput @in)
         {
@@ -44,9 +46,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Read the next long using exactly <code>bitsPerValue</code> bits.
+        /// Read the next <see cref="long"/> using exactly <paramref name="bitsPerValue"/> bits.
         /// <para/>
-        /// NOTE: This was readLong() in Lucene
+        /// NOTE: This was readLong() in Lucene.
         /// </summary>
         public long ReadInt64(int bitsPerValue)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/PackedDataOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/PackedDataOutput.cs b/src/Lucene.Net/Util/Packed/PackedDataOutput.cs
index 109fae3..9f23f32 100644
--- a/src/Lucene.Net/Util/Packed/PackedDataOutput.cs
+++ b/src/Lucene.Net/Util/Packed/PackedDataOutput.cs
@@ -23,10 +23,12 @@ namespace Lucene.Net.Util.Packed
     using DataOutput = Lucene.Net.Store.DataOutput;
 
     /// <summary>
-    /// A <seealso cref="DataOutput"/> wrapper to write unaligned, variable-length packed
-    /// integers. </summary>
-    /// <seealso cref= PackedDataInput
-    /// @lucene.internal </seealso>
+    /// A <see cref="DataOutput"/> wrapper to write unaligned, variable-length packed
+    /// integers.
+    /// <para/>
+    /// @lucene.internal
+    /// </summary>
+    /// <seealso cref="PackedDataInput"/>
     public sealed class PackedDataOutput
     {
         internal readonly DataOutput @out;
@@ -34,7 +36,7 @@ namespace Lucene.Net.Util.Packed
         internal int remainingBits;
 
         /// <summary>
-        /// Create a new instance that wraps <code>out</code>.
+        /// Create a new instance that wraps <paramref name="out"/>.
         /// </summary>
         public PackedDataOutput(DataOutput @out)
         {
@@ -44,9 +46,9 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Write a value using exactly <code>bitsPerValue</code> bits.
+        /// Write a value using exactly <paramref name="bitsPerValue"/> bits.
         /// <para/>
-        /// NOTE: This was writeLong() in Lucene
+        /// NOTE: This was writeLong() in Lucene.
         /// </summary>
         public void WriteInt64(long value, int bitsPerValue)
         {
@@ -67,7 +69,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Flush pending bits to the underlying <seealso cref="DataOutput"/>.
+        /// Flush pending bits to the underlying <see cref="DataOutput"/>.
         /// </summary>
         public void Flush()
         {


[37/48] lucenenet git commit: Lucene.Net.Codecs.Lucene45: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene45: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b27d10c3
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b27d10c3
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b27d10c3

Branch: refs/heads/master
Commit: b27d10c3b70b0136e45a9387a5cdab5777180211
Parents: 5478f1b
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 08:33:37 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:40 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   3 +-
 src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs |  23 +-
 .../Lucene45/Lucene45DocValuesConsumer.cs       | 162 +-------------
 .../Codecs/Lucene45/Lucene45DocValuesFormat.cs  | 221 ++++++++++---------
 .../Lucene45/Lucene45DocValuesProducer.cs       |  49 ++--
 5 files changed, 156 insertions(+), 302 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b27d10c3/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0d350a9..ce132b9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -56,8 +56,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
    3. Codecs.Lucene40 (namespace)
    4. Codecs.Lucene41 (namespace)
    5. Codecs.Lucene42 (namespace)
-   6. Codecs.Lucene45 (namespace)
-   7. Util.Packed (namespace)
+   6. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b27d10c3/src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs b/src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs
index 8214bd6..b2ccca2 100644
--- a/src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs
+++ b/src/Lucene.Net/Codecs/Lucene45/Lucene45Codec.cs
@@ -31,13 +31,14 @@ namespace Lucene.Net.Codecs.Lucene45
     /// <summary>
     /// Implements the Lucene 4.5 index format, with configurable per-field postings
     /// and docvalues formats.
-    /// <p>
+    /// <para/>
     /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
+    /// <see cref="FilterCodec"/>.
+    /// <para/>
+    /// See <see cref="Lucene.Net.Codecs.Lucene45"/> package documentation for file format details.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene45 package documentation for file format details.
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.3-4.5 segments
     // NOTE: if we make largish changes in a minor release, easier to just make Lucene46Codec or whatever
     // if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
     // (it writes a minor version, etc).
@@ -126,9 +127,9 @@ namespace Lucene.Net.Codecs.Lucene45
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene41"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene41"
         /// </summary>
         public virtual PostingsFormat GetPostingsFormatForField(string field)
         {
@@ -137,9 +138,9 @@ namespace Lucene.Net.Codecs.Lucene45
 
         /// <summary>
         /// Returns the docvalues format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene45"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene45"
         /// </summary>
         public virtual DocValuesFormat GetDocValuesFormatForField(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b27d10c3/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs
index 76ebffa..9907b28 100644
--- a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs
+++ b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs
@@ -37,7 +37,7 @@ namespace Lucene.Net.Codecs.Lucene45
     using StringHelper = Lucene.Net.Util.StringHelper;
 
     /// <summary>
-    /// writer for <seealso cref="Lucene45DocValuesFormat"/> </summary>
+    /// Writer for <see cref="Lucene45DocValuesFormat"/> </summary>
     public class Lucene45DocValuesConsumer : DocValuesConsumer, IDisposable
     {
         internal static readonly int BLOCK_SIZE = 16384;
@@ -45,7 +45,7 @@ namespace Lucene.Net.Codecs.Lucene45
         internal static readonly long MISSING_ORD = BitConverter.DoubleToInt64Bits(-1);
 
         /// <summary>
-        /// Compressed using packed blocks of ints. </summary>
+        /// Compressed using packed blocks of <see cref="int"/>s. </summary>
         public const int DELTA_COMPRESSED = 0;
 
         /// <summary>
@@ -70,13 +70,13 @@ namespace Lucene.Net.Codecs.Lucene45
 
         /// <summary>
         /// Standard storage for sorted set values with 1 level of indirection:
-        ///  docId -> address -> ord.
+        /// docId -> address -> ord.
         /// </summary>
         public static readonly int SORTED_SET_WITH_ADDRESSES = 0;
 
         /// <summary>
         /// Single-valued sorted set values, encoded as sorted values, so no level
-        ///  of indirection: docId -> ord.
+        /// of indirection: docId -> ord.
         /// </summary>
         public static readonly int SORTED_SET_SINGLE_VALUED_SORTED = 1;
 
@@ -84,7 +84,7 @@ namespace Lucene.Net.Codecs.Lucene45
         internal readonly int maxDoc;
 
         /// <summary>
-        /// expert: Creates a new writer </summary>
+        /// Expert: Creates a new writer. </summary>
         public Lucene45DocValuesConsumer(SegmentWriteState state, string dataCodec, string dataExtension, string metaCodec, string metaExtension)
         {
             bool success = false;
@@ -353,7 +353,7 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// expert: writes a value dictionary for a sorted/sortedset field </summary>
+        /// Expert: writes a value dictionary for a sorted/sortedset field. </summary>
         protected internal virtual void AddTermsDict(FieldInfo field, IEnumerable<BytesRef> values)
         {
             // first check if its a "fixed-length" terms dict
@@ -498,156 +498,6 @@ namespace Lucene.Net.Codecs.Lucene45
             Debug.Assert(!ordsIter.MoveNext());
         }
 
-        /*
-      private class IterableAnonymousInnerClassHelper : IEnumerable<int>
-	  {
-		  private readonly Lucene45DocValuesConsumer OuterInstance;
-
-		  private IEnumerable<int> DocToOrdCount;
-		  private IEnumerable<long> Ords;
-
-		  public IterableAnonymousInnerClassHelper(IEnumerable<int> docToOrdCount, IEnumerable<long> ords)
-		  {
-			  //this.OuterInstance = outerInstance;
-			  this.DocToOrdCount = docToOrdCount;
-			  this.Ords = ords;
-		  }
-
-          public virtual IEnumerator<BytesRef> GetEnumerator()
-		  {
-			*/
-        /*IEnumerator<Number> docToOrdCountIt = DocToOrdCount.GetEnumerator();
-      IEnumerator<Number> ordsIt = Ords.GetEnumerator();
-      return new IteratorAnonymousInnerClassHelper(this, docToOrdCountIt, ordsIt);*/
-        /*
-return new SortedSetIterator(DocToOrdCount.GetEnumerator(), Ords.GetEnumerator());
-}
-
-System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
-{
-return GetEnumerator();
-}
-
-private class SortedSetIterator : IEnumerator<BytesRef>
-{
-internal byte[] buffer = new byte[10]; //Initial size, will grow if needed
-internal ByteArrayDataOutput output = new ByteArrayDataOutput();
-internal BytesRef bytesRef = new BytesRef();
-
-internal IEnumerator<int> counts;
-internal IEnumerator<long> ords;
-
-internal SortedSetIterator(IEnumerator<int> counts, IEnumerator<long> ords)
-{
-this.counts = counts;
-this.ords = ords;
-}
-
-public BytesRef Current
-{
-get
-{
-return bytesRef;
-}
-}
-
-public void Dispose()
-{
-counts.Dispose();
-ords.Dispose();
-}
-
-object System.Collections.IEnumerator.Current
-{
-get { return bytesRef;  }
-}
-
-public bool MoveNext()
-{
-if (!counts.MoveNext())
-return false;
-
-int count = counts.Current;
-int maxSize = count * 9;//worst case
-if (maxSize > buffer.Length)
-buffer = ArrayUtil.Grow(buffer, maxSize);
-
-try
-{
-EncodeValues(count);
-}
-catch (System.IO.IOException)
-{
-throw;
-}
-
-bytesRef.Bytes = buffer;
-bytesRef.Offset = 0;
-bytesRef.Length = output.Position;
-
-return true;
-}
-
-private void EncodeValues(int count)
-{
-output.Reset(buffer);
-long lastOrd = 0;
-for (int i = 0; i < count; i++)
-{
-ords.MoveNext();
-long ord = ords.Current;
-output.WriteVLong(ord - lastOrd);
-lastOrd = ord;
-}
-}
-
-public void Reset()
-{
-throw new NotImplementedException();
-}
-}*/
-
-        /*private class IteratorAnonymousInnerClassHelper : IEnumerator<Number>
-        {
-            private readonly IterableAnonymousInnerClassHelper OuterInstance;
-
-            private IEnumerator<Number> DocToOrdCountIt;
-            private IEnumerator<Number> OrdsIt;
-
-            public IteratorAnonymousInnerClassHelper(IterableAnonymousInnerClassHelper outerInstance, IEnumerator<Number> docToOrdCountIt, IEnumerator<Number> ordsIt)
-            {
-                this.OuterInstance = outerInstance;
-                this.DocToOrdCountIt = docToOrdCountIt;
-                this.OrdsIt = ordsIt;
-            }
-
-            public virtual bool HasNext()
-            {
-              return DocToOrdCountIt.HasNext();
-            }
-
-            public virtual Number Next()
-            {
-              Number ordCount = DocToOrdCountIt.next();
-              if ((long)ordCount == 0)
-              {
-                return MISSING_ORD;
-              }
-              else
-              {
-                Debug.Assert((long)ordCount == 1);
-                return OrdsIt.next();
-              }
-            }
-
-            public virtual void Remove()
-            {
-              throw new System.NotSupportedException();
-            }
-        }*/
-
-        //}
-
         protected override void Dispose(bool disposing)
         {
             if (disposing)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b27d10c3/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesFormat.cs b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesFormat.cs
index 30cae45..780a2a1 100644
--- a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesFormat.cs
@@ -22,130 +22,131 @@ namespace Lucene.Net.Codecs.Lucene45
 
     /// <summary>
     /// Lucene 4.5 DocValues format.
-    /// <p>
+    /// <para/>
     /// Encodes the four per-document value types (Numeric,Binary,Sorted,SortedSet) with these strategies:
-    /// <p>
-    /// <seealso cref="DocValuesType#NUMERIC NUMERIC"/>:
-    /// <ul>
-    ///    <li>Delta-compressed: per-document integers written in blocks of 16k. For each block
+    /// <para/>
+    /// <see cref="Index.DocValuesType.NUMERIC"/>:
+    /// <list type="bullet">
+    ///    <item><description>Delta-compressed: per-document integers written in blocks of 16k. For each block
     ///        the minimum value in that block is encoded, and each entry is a delta from that
     ///        minimum value. Each block of deltas is compressed with bitpacking. For more
-    ///        information, see <seealso cref="BlockPackedWriter"/>.
-    ///    <li>Table-compressed: when the number of unique values is very small (&lt; 256), and
-    ///        when there are unused "gaps" in the range of values used (such as <seealso cref="SmallFloat"/>),
+    ///        information, see <see cref="Util.Packed.BlockPackedWriter"/>.</description></item>
+    ///    <item><description>Table-compressed: when the number of unique values is very small (&lt; 256), and
+    ///        when there are unused "gaps" in the range of values used (such as <see cref="Util.SmallSingle"/>),
     ///        a lookup table is written instead. Each per-document entry is instead the ordinal
-    ///        to this table, and those ordinals are compressed with bitpacking (<seealso cref="PackedInts"/>).
-    ///    <li>GCD-compressed: when all numbers share a common divisor, such as dates, the greatest
-    ///        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.
-    /// </ul>
-    /// <p>
-    /// <seealso cref="DocValuesType#BINARY BINARY"/>:
-    /// <ul>
-    ///    <li>Fixed-width Binary: one large concatenated byte[] is written, along with the fixed length.
-    ///        Each document's value can be addressed directly with multiplication ({@code docID * length}).
-    ///    <li>Variable-width Binary: one large concatenated byte[] is written, along with end addresses
+    ///        to this table, and those ordinals are compressed with bitpacking (<see cref="Util.Packed.PackedInt32s"/>).</description></item>
+    ///    <item><description>GCD-compressed: when all numbers share a common divisor, such as dates, the greatest
+    ///        common denominator (GCD) is computed, and quotients are stored using Delta-compressed Numerics.</description></item>
+    /// </list>
+    /// <para/>
+    /// <see cref="Index.DocValuesType.BINARY"/>:
+    /// <list type="bullet">
+    ///    <item><description>Fixed-width Binary: one large concatenated <see cref="T:byte[]"/> is written, along with the fixed length.
+    ///        Each document's value can be addressed directly with multiplication (<c>docID * length</c>).</description></item>
+    ///    <item><description>Variable-width Binary: one large concatenated <see cref="T:byte[]"/> is written, along with end addresses
     ///        for each document. The addresses are written in blocks of 16k, with the current absolute
     ///        start for the block, and the average (expected) delta per entry. For each document the
-    ///        deviation from the delta (actual - expected) is written.
-    ///    <li>Prefix-compressed Binary: values are written in chunks of 16, with the first value written
-    ///        completely and other values sharing prefixes. chunk addresses are written in blocks of 16k,
+    ///        deviation from the delta (actual - expected) is written.</description></item>
+    ///    <item><description>Prefix-compressed Binary: values are written in chunks of 16, with the first value written
+    ///        completely and other values sharing prefixes. Chunk addresses are written in blocks of 16k,
     ///        with the current absolute start for the block, and the average (expected) delta per entry.
-    ///        For each chunk the deviation from the delta (actual - expected) is written.
-    /// </ul>
-    /// <p>
-    /// <seealso cref="DocValuesType#SORTED SORTED"/>:
-    /// <ul>
-    ///    <li>Sorted: a mapping of ordinals to deduplicated terms is written as Prefix-Compressed Binary,
-    ///        along with the per-document ordinals written using one of the numeric strategies above.
-    /// </ul>
-    /// <p>
-    /// <seealso cref="DocValuesType#SORTED_SET SORTED_SET"/>:
-    /// <ul>
-    ///    <li>SortedSet: a mapping of ordinals to deduplicated terms is written as Prefix-Compressed Binary,
+    ///        For each chunk the deviation from the delta (actual - expected) is written.</description></item>
+    /// </list>
+    /// <para/>
+    /// <see cref="Index.DocValuesType.SORTED"/>:
+    /// <list type="bullet">
+    ///    <item><description>Sorted: a mapping of ordinals to deduplicated terms is written as Prefix-Compressed Binary,
+    ///        along with the per-document ordinals written using one of the numeric strategies above.</description></item>
+    /// </list>
+    /// <para/>
+    /// <see cref="Index.DocValuesType.SORTED_SET"/>:
+    /// <list type="bullet">
+    ///    <item><description>SortedSet: a mapping of ordinals to deduplicated terms is written as Prefix-Compressed Binary,
     ///        an ordinal list and per-document index into this list are written using the numeric strategies
-    ///        above.
-    /// </ul>
-    /// <p>
+    ///        above.</description></item>
+    /// </list>
+    /// <para/>
     /// Files:
-    /// <ol>
-    ///   <li><tt>.dvd</tt>: DocValues data</li>
-    ///   <li><tt>.dvm</tt>: DocValues metadata</li>
-    /// </ol>
-    /// <ol>
-    ///   <li><a name="dvm" id="dvm"></a>
-    ///   <p>The DocValues metadata or .dvm file.</p>
-    ///   <p>For DocValues field, this stores metadata, such as the offset into the
-    ///      DocValues data (.dvd)</p>
-    ///   <p>DocValues metadata (.dvm) --&gt; Header,&lt;Entry&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>Entry --&gt; NumericEntry | BinaryEntry | SortedEntry | SortedSetEntry</li>
-    ///     <li>NumericEntry --&gt; GCDNumericEntry | TableNumericEntry | DeltaNumericEntry</li>
-    ///     <li>GCDNumericEntry --&gt; NumericHeader,MinValue,GCD</li>
-    ///     <li>TableNumericEntry --&gt; NumericHeader,TableSize,<seealso cref="DataOutput#writeLong Int64"/><sup>TableSize</sup></li>
-    ///     <li>DeltaNumericEntry --&gt; NumericHeader</li>
-    ///     <li>NumericHeader --&gt; FieldNumber,EntryType,NumericType,MissingOffset,PackedVersion,DataOffset,Count,BlockSize</li>
-    ///     <li>BinaryEntry --&gt; FixedBinaryEntry | VariableBinaryEntry | PrefixBinaryEntry</li>
-    ///     <li>FixedBinaryEntry --&gt; BinaryHeader</li>
-    ///     <li>VariableBinaryEntry --&gt; BinaryHeader,AddressOffset,PackedVersion,BlockSize</li>
-    ///     <li>PrefixBinaryEntry --&gt; BinaryHeader,AddressInterval,AddressOffset,PackedVersion,BlockSize</li>
-    ///     <li>BinaryHeader --&gt; FieldNumber,EntryType,BinaryType,MissingOffset,MinLength,MaxLength,DataOffset</li>
-    ///     <li>SortedEntry --&gt; FieldNumber,EntryType,BinaryEntry,NumericEntry</li>
-    ///     <li>SortedSetEntry --&gt; EntryType,BinaryEntry,NumericEntry,NumericEntry</li>
-    ///     <li>FieldNumber,PackedVersion,MinLength,MaxLength,BlockSize,ValueCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///     <li>EntryType,CompressionType --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///     <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///     <li>MinValue,GCD,MissingOffset,AddressOffset,DataOffset --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///     <li>TableSize --&gt; <seealso cref="DataOutput#writeVInt vInt"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>Sorted fields have two entries: a BinaryEntry with the value metadata,
-    ///      and an ordinary NumericEntry for the document-to-ord metadata.</p>
-    ///   <p>SortedSet fields have three entries: a BinaryEntry with the value metadata,
-    ///      and two NumericEntries for the document-to-ord-index and ordinal list metadata.</p>
-    ///   <p>FieldNumber of -1 indicates the end of metadata.</p>
-    ///   <p>EntryType is a 0 (NumericEntry) or 1 (BinaryEntry)</p>
-    ///   <p>DataOffset is the pointer to the start of the data in the DocValues data (.dvd)</p>
-    ///   <p>NumericType indicates how Numeric values will be compressed:
-    ///      <ul>
-    ///         <li>0 --&gt; delta-compressed. For each block of 16k integers, every integer is delta-encoded
-    ///             from the minimum value within the block.
-    ///         <li>1 --&gt, gcd-compressed. When all integers share a common divisor, only quotients are stored
-    ///             using blocks of delta-encoded ints.
-    ///         <li>2 --&gt; table-compressed. When the number of unique numeric values is small and it would save space,
-    ///             a lookup table of unique values is written, followed by the ordinal for each document.
-    ///      </ul>
-    ///   <p>BinaryType indicates how Binary values will be stored:
-    ///      <ul>
-    ///         <li>0 --&gt; fixed-width. All values have the same length, addressing by multiplication.
-    ///         <li>1 --&gt, variable-width. An address for each value is stored.
-    ///         <li>2 --&gt; prefix-compressed. An address to the start of every interval'th value is stored.
-    ///      </ul>
-    ///   <p>MinLength and MaxLength represent the min and max byte[] value lengths for Binary values.
+    /// <list type="number">
+    ///   <item><description><c>.dvd</c>: DocValues data</description></item>
+    ///   <item><description><c>.dvm</c>: DocValues metadata</description></item>
+    /// </list>
+    /// <list type="number">
+    ///   <item><description><a name="dvm" id="dvm"></a>
+    ///   <para>The DocValues metadata or .dvm file.</para>
+    ///   <para>For DocValues field, this stores metadata, such as the offset into the
+    ///      DocValues data (.dvd)</para>
+    ///   <para>DocValues metadata (.dvm) --&gt; Header,&lt;Entry&gt;<sup>NumFields</sup>,Footer</para>
+    ///   <list type="bullet">
+    ///     <item><description>Entry --&gt; NumericEntry | BinaryEntry | SortedEntry | SortedSetEntry</description></item>
+    ///     <item><description>NumericEntry --&gt; GCDNumericEntry | TableNumericEntry | DeltaNumericEntry</description></item>
+    ///     <item><description>GCDNumericEntry --&gt; NumericHeader,MinValue,GCD</description></item>
+    ///     <item><description>TableNumericEntry --&gt; NumericHeader,TableSize,Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) <sup>TableSize</sup></description></item>
+    ///     <item><description>DeltaNumericEntry --&gt; NumericHeader</description></item>
+    ///     <item><description>NumericHeader --&gt; FieldNumber,EntryType,NumericType,MissingOffset,PackedVersion,DataOffset,Count,BlockSize</description></item>
+    ///     <item><description>BinaryEntry --&gt; FixedBinaryEntry | VariableBinaryEntry | PrefixBinaryEntry</description></item>
+    ///     <item><description>FixedBinaryEntry --&gt; BinaryHeader</description></item>
+    ///     <item><description>VariableBinaryEntry --&gt; BinaryHeader,AddressOffset,PackedVersion,BlockSize</description></item>
+    ///     <item><description>PrefixBinaryEntry --&gt; BinaryHeader,AddressInterval,AddressOffset,PackedVersion,BlockSize</description></item>
+    ///     <item><description>BinaryHeader --&gt; FieldNumber,EntryType,BinaryType,MissingOffset,MinLength,MaxLength,DataOffset</description></item>
+    ///     <item><description>SortedEntry --&gt; FieldNumber,EntryType,BinaryEntry,NumericEntry</description></item>
+    ///     <item><description>SortedSetEntry --&gt; EntryType,BinaryEntry,NumericEntry,NumericEntry</description></item>
+    ///     <item><description>FieldNumber,PackedVersion,MinLength,MaxLength,BlockSize,ValueCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/></description></item>
+    ///     <item><description>EntryType,CompressionType --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/></description></item>
+    ///     <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///     <item><description>MinValue,GCD,MissingOffset,AddressOffset,DataOffset --&gt; Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///     <item><description>TableSize --&gt; vInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///     <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    ///   </list>
+    ///   <para>Sorted fields have two entries: a <see cref="Lucene45DocValuesProducer.BinaryEntry"/> with the value metadata,
+    ///      and an ordinary <see cref="Lucene45DocValuesProducer.NumericEntry"/> for the document-to-ord metadata.</para>
+    ///   <para>SortedSet fields have three entries: a <see cref="Lucene45DocValuesProducer.BinaryEntry"/> with the value metadata,
+    ///      and two <see cref="Lucene45DocValuesProducer.NumericEntry"/>s for the document-to-ord-index and ordinal list metadata.</para>
+    ///   <para>FieldNumber of -1 indicates the end of metadata.</para>
+    ///   <para>EntryType is a 0 (<see cref="Lucene45DocValuesProducer.NumericEntry"/>) or 1 (<see cref="Lucene45DocValuesProducer.BinaryEntry"/>)</para>
+    ///   <para>DataOffset is the pointer to the start of the data in the DocValues data (.dvd)</para>
+    ///   <para/>NumericType indicates how Numeric values will be compressed:
+    ///      <list type="bullet">
+    ///         <item><description>0 --&gt; delta-compressed. For each block of 16k integers, every integer is delta-encoded
+    ///             from the minimum value within the block.</description></item>
+    ///         <item><description>1 --&gt; gcd-compressed. When all integers share a common divisor, only quotients are stored
+    ///             using blocks of delta-encoded ints.</description></item>
+    ///         <item><description>2 --&gt; table-compressed. When the number of unique numeric values is small and it would save space,
+    ///             a lookup table of unique values is written, followed by the ordinal for each document.</description></item>
+    ///      </list>
+    ///   <para/>BinaryType indicates how Binary values will be stored:
+    ///      <list type="bullet">
+    ///         <item><description>0 --&gt; fixed-width. All values have the same length, addressing by multiplication.</description></item>
+    ///         <item><description>1 --&gt; variable-width. An address for each value is stored.</description></item>
+    ///         <item><description>2 --&gt; prefix-compressed. An address to the start of every interval'th value is stored.</description></item>
+    ///      </list>
+    ///   <para/>MinLength and MaxLength represent the min and max byte[] value lengths for Binary values.
     ///      If they are equal, then all values are of a fixed size, and can be addressed as DataOffset + (docID * length).
     ///      Otherwise, the binary values are of variable size, and packed integer metadata (PackedVersion,BlockSize)
     ///      is written for the addresses.
-    ///   <p>MissingOffset points to a byte[] containing a bitset of all documents that had a value for the field.
+    ///   <para/>MissingOffset points to a <see cref="T:byte[]"/> containing a bitset of all documents that had a value for the field.
     ///      If its -1, then there are no missing values.
-    ///   <p>Checksum contains the CRC32 checksum of all bytes in the .dvm file up
+    ///   <para/>Checksum contains the CRC32 checksum of all bytes in the .dvm file up
     ///      until the checksum. this is used to verify integrity of the file on opening the
-    ///      index.
-    ///   <li><a name="dvd" id="dvd"></a>
-    ///   <p>The DocValues data or .dvd file.</p>
-    ///   <p>For DocValues field, this stores the actual per-document data (the heavy-lifting)</p>
-    ///   <p>DocValues data (.dvd) --&gt; Header,&lt;NumericData | BinaryData | SortedData&gt;<sup>NumFields</sup>,Footer</p>
-    ///   <ul>
-    ///     <li>NumericData --&gt; DeltaCompressedNumerics | TableCompressedNumerics | GCDCompressedNumerics</li>
-    ///     <li>BinaryData --&gt;  <seealso cref="DataOutput#writeByte Byte"/><sup>DataLength</sup>,Addresses</li>
-    ///     <li>SortedData --&gt; <seealso cref="FST FST&lt;Int64&gt;"/></li>
-    ///     <li>DeltaCompressedNumerics --&gt; <seealso cref="BlockPackedWriter BlockPackedInts(blockSize=16k)"/></li>
-    ///     <li>TableCompressedNumerics --&gt; <seealso cref="PackedInts PackedInts"/></li>
-    ///     <li>GCDCompressedNumerics --&gt; <seealso cref="BlockPackedWriter BlockPackedInts(blockSize=16k)"/></li>
-    ///     <li>Addresses --&gt; <seealso cref="MonotonicBlockPackedWriter MonotonicBlockPackedInts(blockSize=16k)"/></li>
-    ///     <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    ///   </ul>
-    ///   <p>SortedSet entries store the list of ordinals in their BinaryData as a
-    ///      sequences of increasing <seealso cref="DataOutput#writeVLong vLong"/>s, delta-encoded.</p>
-    /// </ol>
+    ///      index.</description></item>
+    ///   <item><description><a name="dvd" id="dvd"></a>
+    ///   <para>The DocValues data or .dvd file.</para>
+    ///   <para>For DocValues field, this stores the actual per-document data (the heavy-lifting)</para>
+    ///   <para>DocValues data (.dvd) --&gt; Header,&lt;NumericData | BinaryData | SortedData&gt;<sup>NumFields</sup>,Footer</para>
+    ///   <list type="bullet">
+    ///     <item><description>NumericData --&gt; DeltaCompressedNumerics | TableCompressedNumerics | GCDCompressedNumerics</description></item>
+    ///     <item><description>BinaryData --&gt;  Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>DataLength</sup>,Addresses</description></item>
+    ///     <item><description>SortedData --&gt; FST&lt;Int64&gt; (<see cref="Util.Fst.FST{T}"/>) </description></item>
+    ///     <item><description>DeltaCompressedNumerics --&gt; BlockPackedInts(blockSize=16k) (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///     <item><description>TableCompressedNumerics --&gt; PackedInts (<see cref="Util.Packed.PackedInt32s"/>) </description></item>
+    ///     <item><description>GCDCompressedNumerics --&gt; BlockPackedInts(blockSize=16k) (<see cref="Util.Packed.BlockPackedWriter"/>) </description></item>
+    ///     <item><description>Addresses --&gt; MonotonicBlockPackedInts(blockSize=16k) (<see cref="Util.Packed.MonotonicBlockPackedWriter"/>) </description></item>
+    ///     <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    ///   </list>
+    ///   <para>SortedSet entries store the list of ordinals in their BinaryData as a
+    ///      sequences of increasing vLongs (<see cref="Store.DataOutput.WriteVInt64(long)"/>), delta-encoded.</para></description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [DocValuesFormatName("Lucene45")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b27d10c3/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesProducer.cs b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesProducer.cs
index 025af22..300b1cf 100644
--- a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesProducer.cs
+++ b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesProducer.cs
@@ -49,7 +49,7 @@ namespace Lucene.Net.Codecs.Lucene45
     using TermsEnum = Lucene.Net.Index.TermsEnum;
 
     /// <summary>
-    /// reader for <seealso cref="Lucene45DocValuesFormat"/> </summary>
+    /// Reader for <see cref="Lucene45DocValuesFormat"/>. </summary>
     public class Lucene45DocValuesProducer : DocValuesProducer, IDisposable
     {
         private readonly IDictionary<int, NumericEntry> numerics;
@@ -68,7 +68,7 @@ namespace Lucene.Net.Codecs.Lucene45
         private readonly IDictionary<int, MonotonicBlockPackedReader> ordIndexInstances = new Dictionary<int, MonotonicBlockPackedReader>();
 
         /// <summary>
-        /// expert: instantiates a new reader </summary>
+        /// Expert: instantiates a new reader. </summary>
         protected internal Lucene45DocValuesProducer(SegmentReadState state, string dataCodec, string dataExtension, string metaCodec, string metaExtension)
         {
             string metaName = IndexFileNames.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, metaExtension);
@@ -502,8 +502,9 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// returns an address instance for variable-length binary values.
-        ///  @lucene.internal
+        /// Returns an address instance for variable-length binary values.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         protected internal virtual MonotonicBlockPackedReader GetAddressInstance(IndexInput data, FieldInfo field, BinaryEntry bytes)
         {
@@ -572,7 +573,8 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// returns an address instance for prefix-compressed binary values.
+        /// Returns an address instance for prefix-compressed binary values.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         protected internal virtual MonotonicBlockPackedReader GetIntervalInstance(IndexInput data, FieldInfo field, BinaryEntry bytes)
@@ -684,7 +686,8 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// returns an address instance for sortedset ordinal lists
+        /// Returns an address instance for sortedset ordinal lists.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         protected internal virtual MonotonicBlockPackedReader GetOrdIndexInstance(IndexInput data, FieldInfo field, NumericEntry entry)
@@ -895,7 +898,7 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// metadata entry for a numeric docvalues field </summary>
+        /// Metadata entry for a numeric docvalues field. </summary>
         protected internal class NumericEntry
         {
             internal NumericEntry()
@@ -903,28 +906,28 @@ namespace Lucene.Net.Codecs.Lucene45
             }
 
             /// <summary>
-            /// offset to the bitset representing docsWithField, or -1 if no documents have missing values </summary>
+            /// Offset to the bitset representing docsWithField, or -1 if no documents have missing values. </summary>
             internal long missingOffset;
 
             /// <summary>
-            /// offset to the actual numeric values </summary>
+            /// Offset to the actual numeric values. </summary>
             public long Offset { get; set; }
 
             internal int format;
 
             /// <summary>
-            /// packed ints version used to encode these numerics 
+            /// Packed <see cref="int"/>s version used to encode these numerics. 
             /// <para/>
             /// NOTE: This was packedIntsVersion (field) in Lucene
             /// </summary>
             public int PackedInt32sVersion { get; set; }
 
             /// <summary>
-            /// count of values written </summary>
+            /// Count of values written. </summary>
             public long Count { get; set; }
 
             /// <summary>
-            /// packed ints blocksize </summary>
+            /// Packed <see cref="int"/>s blocksize. </summary>
             public int BlockSize { get; set; }
 
             internal long minValue;
@@ -933,7 +936,7 @@ namespace Lucene.Net.Codecs.Lucene45
         }
 
         /// <summary>
-        /// metadata entry for a binary docvalues field </summary>
+        /// Metadata entry for a binary docvalues field. </summary>
         protected internal class BinaryEntry
         {
             internal BinaryEntry()
@@ -941,44 +944,44 @@ namespace Lucene.Net.Codecs.Lucene45
             }
 
             /// <summary>
-            /// offset to the bitset representing docsWithField, or -1 if no documents have missing values </summary>
+            /// Offset to the bitset representing docsWithField, or -1 if no documents have missing values. </summary>
             internal long missingOffset;
 
             /// <summary>
-            /// offset to the actual binary values </summary>
+            /// Offset to the actual binary values. </summary>
             internal long offset;
 
             internal int format;
 
             /// <summary>
-            /// count of values written </summary>
+            /// Count of values written. </summary>
             public long Count { get; set; }
 
             internal int minLength;
             internal int maxLength;
 
             /// <summary>
-            /// offset to the addressing data that maps a value to its slice of the byte[] </summary>
+            /// Offset to the addressing data that maps a value to its slice of the <see cref="T:byte[]"/>. </summary>
             public long AddressesOffset { get; set; }
 
             /// <summary>
-            /// interval of shared prefix chunks (when using prefix-compressed binary) </summary>
+            /// Interval of shared prefix chunks (when using prefix-compressed binary). </summary>
             public long AddressInterval { get; set; }
 
             /// <summary>
-            /// packed ints version used to encode addressing information 
+            /// Packed ints version used to encode addressing information.
             /// <para/>
-            /// NOTE: This was packedIntsVersion (field) in Lucene
+            /// NOTE: This was packedIntsVersion (field) in Lucene.
             /// </summary>
             public int PackedInt32sVersion { get; set; }
 
             /// <summary>
-            /// packed ints blocksize </summary>
+            /// Packed ints blocksize. </summary>
             public int BlockSize { get; set; }
         }
 
         /// <summary>
-        /// metadata entry for a sorted-set docvalues field </summary>
+        /// Metadata entry for a sorted-set docvalues field. </summary>
         protected internal class SortedSetEntry
         {
             internal SortedSetEntry()
@@ -990,7 +993,7 @@ namespace Lucene.Net.Codecs.Lucene45
 
         // internally we compose complex dv (sorted/sortedset) from other ones
         /// <summary>
-        /// NOTE: This was LongBinaryDocValues in Lucene
+        /// NOTE: This was LongBinaryDocValues in Lucene.
         /// </summary>
         internal abstract class Int64BinaryDocValues : BinaryDocValues
         {


[47/48] lucenenet git commit: Lucene.Net.Util.Packed: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/PackedInts.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/PackedInts.cs b/src/Lucene.Net/Util/Packed/PackedInts.cs
index 49f6101..fcd2adc 100644
--- a/src/Lucene.Net/Util/Packed/PackedInts.cs
+++ b/src/Lucene.Net/Util/Packed/PackedInts.cs
@@ -34,8 +34,8 @@ namespace Lucene.Net.Util.Packed
     /// values are stored as packed ints, with each value
     /// consuming a fixed number of bits.
     /// <para/>
-    /// NOTE: This was PackedInts in Lucene
-    ///
+    /// NOTE: This was PackedInts in Lucene.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class PackedInt32s
@@ -85,147 +85,6 @@ namespace Lucene.Net.Util.Packed
             }
         }
 
-        /// <summary>
-        /// A format to write packed ints.
-        ///
-        /// @lucene.internal
-        /// </summary>
-        //public enum Format
-        //{
-        /// <summary>
-        /// Compact format, all bits are written contiguously.
-        /// </summary>
-
-        /// <summary>
-        /// A format that may insert padding bits to improve encoding and decoding
-        /// speed. Since this format doesn't support all possible bits per value, you
-        /// should never use it directly, but rather use
-        /// <seealso cref="PackedInt32s#fastestFormatAndBits(int, int, float)"/> to find the
-        /// format that best suits your needs.
-        /// </summary>
-
-        /// <summary>
-        /// Get a format according to its ID.
-        /// </summary>
-        //	{
-        //	  for (Format format : Format.values())
-        //	  {
-        //		if (format.getId() == id)
-        //		{
-        //		  return format;
-        //		}
-        //	  }
-        //	  throw new IllegalArgumentException("Unknown format id: " + id);
-        //	}
-
-        //	{
-        //	  this.id = id;
-        //	}
-
-        /// <summary>
-        /// Returns the ID of the format.
-        /// </summary>
-
-        /// <summary>
-        /// Computes how many byte blocks are needed to store <code>values</code>
-        /// values of size <code>bitsPerValue</code>.
-        /// </summary>
-
-        /// <summary>
-        /// Computes how many long blocks are needed to store <code>values</code>
-        /// values of size <code>bitsPerValue</code>.
-        /// </summary>
-
-        /// <summary>
-        /// Tests whether the provided number of bits per value is supported by the
-        /// format.
-        /// </summary>
-
-        /// <summary>
-        /// Returns the overhead per value, in bits.
-        /// </summary>
-
-        /// <summary>
-        /// Returns the overhead ratio (<code>overhead per value / bits per value</code>).
-        /// </summary>
-        //}
-        /*	public static partial class EnumExtensionMethods
-            {
-                internal PACKED(this Format instance, 0)
-                {
-                  public long outerInstance.ByteCount(int packedIntsVersion, int valueCount, int bitsPerValue)
-                  {
-                    if (packedIntsVersion < VERSION_BYTE_ALIGNED)
-                    {
-                      return 8L * (long) Math.Ceiling((double) valueCount * bitsPerValue / 64);
-                    }
-                    else
-                    {
-                      return (long) Math.Ceiling((double) valueCount * bitsPerValue / 8);
-                    }
-                  }
-                },
-                outerInstance.PACKED_SINGLE_BLOCK(this Format instance, 1)
-                {
-                  public int outerInstance.LongCount(int packedIntsVersion, int valueCount, int bitsPerValue)
-                  {
-                    int valuesPerBlock = 64 / bitsPerValue;
-                    return (int) Math.Ceiling((double) valueCount / valuesPerBlock);
-                  }
-
-                  public bool outerInstance.IsSupported(int bitsPerValue)
-                  {
-                    return Packed64SingleBlock.IsSupported(bitsPerValue);
-                  }
-
-                  public float outerInstance.OverheadPerValue(int bitsPerValue)
-                  {
-                    Debug.Assert(outerInstance.IsSupported(bitsPerValue));
-                    int valuesPerBlock = 64 / bitsPerValue;
-                    int overhead = 64 % bitsPerValue;
-                    return (float) overhead / valuesPerBlock;
-                  }
-                }
-                public static int outerInstance.Id //Tangible note: extension parameterthis Format instance
-                {
-                  return outerInstance.Id_Renamed;
-                }
-                public static long outerInstance.ByteCount(this Format instance, int packedIntsVersion, int valueCount, int bitsPerValue)
-                {
-                  Debug.Assert(bitsPerValue >= 0 && bitsPerValue <= 64, bitsPerValue);
-                  // assume long-aligned
-                  return 8L * outerInstance.LongCount(packedIntsVersion, valueCount, bitsPerValue);
-                }
-                public static int outerInstance.LongCount(this Format instance, int packedIntsVersion, int valueCount, int bitsPerValue)
-                {
-                  Debug.Assert(bitsPerValue >= 0 && bitsPerValue <= 64, bitsPerValue);
-                  long byteCount = outerInstance.ByteCount(packedIntsVersion, valueCount, bitsPerValue);
-                  Debug.Assert(byteCount < 8L * int.MaxValue);
-                  if ((byteCount % 8) == 0)
-                  {
-                    return (int)(byteCount / 8);
-                  }
-                  else
-                  {
-                    return (int)(byteCount / 8 + 1);
-                  }
-                }
-                public static bool outerInstance.IsSupported(this Format instance, int bitsPerValue)
-                {
-                  return bitsPerValue >= 1 && bitsPerValue <= 64;
-                }
-                public static float outerInstance.OverheadPerValue(this Format instance, int bitsPerValue)
-                {
-                  Debug.Assert(outerInstance.IsSupported(bitsPerValue));
-                  return 0f;
-                }
-                public static final float outerInstance.OverheadRatio(this Format instance, int bitsPerValue)
-                {
-                  Debug.Assert(outerInstance.IsSupported(bitsPerValue));
-                  return outerInstance.OverheadPerValue(bitsPerValue) / bitsPerValue;
-                }
-            }*/
-
         private sealed class PackedFormat : Format
         {
             public PackedFormat()
@@ -233,6 +92,10 @@ namespace Lucene.Net.Util.Packed
             {
             }
 
+            /// <summary>
+            /// Computes how many <see cref="byte"/> blocks are needed to store <paramref name="valueCount"/>
+            /// values of size <paramref name="bitsPerValue"/>.
+            /// </summary>
             public override long ByteCount(int packedIntsVersion, int valueCount, int bitsPerValue)
             {
                 if (packedIntsVersion < VERSION_BYTE_ALIGNED)
@@ -251,7 +114,10 @@ namespace Lucene.Net.Util.Packed
             }
 
             /// <summary>
-            /// NOTE: This was longCount() in Lucene
+            /// Computes how many <see cref="long"/> blocks are needed to store <paramref name="valueCount"/>
+            /// values of size <paramref name="bitsPerValue"/>.
+            /// <para/>
+            /// NOTE: This was longCount() in Lucene.
             /// </summary>
             public override int Int64Count(int packedIntsVersion, int valueCount, int bitsPerValue)
             {
@@ -259,11 +125,18 @@ namespace Lucene.Net.Util.Packed
                 return (int)Math.Ceiling((double)valueCount / valuesPerBlock);
             }
 
+            /// <summary>
+            /// Tests whether the provided number of bits per value is supported by the
+            /// format.
+            /// </summary>
             public override bool IsSupported(int bitsPerValue)
             {
                 return Packed64SingleBlock.IsSupported(bitsPerValue);
             }
 
+            /// <summary>
+            /// Returns the overhead per value, in bits.
+            /// </summary>
             public override float OverheadPerValue(int bitsPerValue)
             {
                 int valuesPerBlock = 64 / bitsPerValue;
@@ -273,19 +146,37 @@ namespace Lucene.Net.Util.Packed
             }
         }
 
+        /// <summary>
+        /// A format to write packed <see cref="int"/>s.
+        /// <para/>
+        /// @lucene.internal
+        /// </summary>
         public class Format
         {
+            /// <summary>
+            /// Compact format, all bits are written contiguously.
+            /// </summary>
             public static readonly Format PACKED = new PackedFormat();
 
+            /// <summary>
+            /// A format that may insert padding bits to improve encoding and decoding
+            /// speed. Since this format doesn't support all possible bits per value, you
+            /// should never use it directly, but rather use
+            /// <see cref="PackedInt32s.FastestFormatAndBits(int, int, float)"/> to find the
+            /// format that best suits your needs.
+            /// </summary>
             public static readonly Format PACKED_SINGLE_BLOCK = new PackedSingleBlockFormat();
 
             private static readonly Format[] values = new Format[] { PACKED, PACKED_SINGLE_BLOCK };
 
-            public static IEnumerable<Format> Values()
+            public static IEnumerable<Format> Values() // LUCENENET TODO: API - make property
             {
                 return values;
             }
 
+            /// <summary>
+            /// Get a format according to its ID.
+            /// </summary>
             public static Format ById(int id)
             {
                 foreach (Format format in Values())
@@ -305,11 +196,18 @@ namespace Lucene.Net.Util.Packed
 
             private int id; // LUCENENET specific - made private, since it is already exposed through public property
 
+            /// <summary>
+            /// Returns the ID of the format.
+            /// </summary>
             public int Id
             {
                 get { return id; }
             }
 
+            /// <summary>
+            /// Computes how many <see cref="byte"/> blocks are needed to store <paramref name="valueCount"/>
+            /// values of size <paramref name="bitsPerValue"/>.
+            /// </summary>
             public virtual long ByteCount(int packedIntsVersion, int valueCount, int bitsPerValue)
             {
                 // assume long-aligned
@@ -317,7 +215,10 @@ namespace Lucene.Net.Util.Packed
             }
 
             /// <summary>
-            /// NOTE: This was longCount() in Lucene
+            /// Computes how many <see cref="long"/> blocks are needed to store <paramref name="valueCount"/>
+            /// values of size <paramref name="bitsPerValue"/>.
+            /// <para/>
+            /// NOTE: This was longCount() in Lucene.
             /// </summary>
             public virtual int Int64Count(int packedIntsVersion, int valueCount, int bitsPerValue)
             {
@@ -333,17 +234,27 @@ namespace Lucene.Net.Util.Packed
                 }
             }
 
+            /// <summary>
+            /// Tests whether the provided number of bits per value is supported by the
+            /// format.
+            /// </summary>
             public virtual bool IsSupported(int bitsPerValue)
             {
                 return bitsPerValue >= 1 && bitsPerValue <= 64;
             }
 
+            /// <summary>
+            /// Returns the overhead per value, in bits.
+            /// </summary>
             public virtual float OverheadPerValue(int bitsPerValue)
             {
                 Debug.Assert(IsSupported(bitsPerValue));
                 return 0f;
             }
 
+            /// <summary>
+            /// Returns the overhead ratio (<c>overhead per value / bits per value</c>).
+            /// </summary>
             public virtual float OverheadRatio(int bitsPerValue)
             {
                 Debug.Assert(IsSupported(bitsPerValue));
@@ -372,17 +283,17 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Try to find the <seealso cref="Format"/> and number of bits per value that would
+        /// Try to find the <see cref="Format"/> and number of bits per value that would
         /// restore from disk the fastest reader whose overhead is less than
-        /// <code>acceptableOverheadRatio</code>.
-        /// </p><p>
-        /// The <code>acceptableOverheadRatio</code> parameter makes sense for
-        /// random-access <seealso cref="Reader"/>s. In case you only plan to perform
+        /// <paramref name="acceptableOverheadRatio"/>.
+        /// <para/>
+        /// The <paramref name="acceptableOverheadRatio"/> parameter makes sense for
+        /// random-access <see cref="Reader"/>s. In case you only plan to perform
         /// sequential access on this stream later on, you should probably use
-        /// <seealso cref="PackedInt32s#COMPACT"/>.
-        /// </p><p>
+        /// <see cref="PackedInt32s.COMPACT"/>.
+        /// <para/>
         /// If you don't know how many values you are going to write, use
-        /// <code>valueCount = -1</code>.
+        /// <c><paramref name="valueCount"/> = -1</c>.
         /// </summary>
         public static FormatAndBits FastestFormatAndBits(int valueCount, int bitsPerValue, float acceptableOverheadRatio)
         {
@@ -455,79 +366,79 @@ namespace Lucene.Net.Util.Packed
         public interface IDecoder
         {
             /// <summary>
-            /// The minimum number of long blocks to encode in a single iteration, when
+            /// The minimum number of <see cref="long"/> blocks to encode in a single iteration, when
             /// using long encoding.
             /// <para/>
-            /// NOTE: This was longBlockCount() in Lucene
+            /// NOTE: This was longBlockCount() in Lucene.
             /// </summary>
             int Int64BlockCount { get; }
 
             /// <summary>
-            /// The number of values that can be stored in <seealso cref="#longBlockCount()"/> long
+            /// The number of values that can be stored in <see cref="Int64BlockCount"/> <see cref="long"/>
             /// blocks.
             /// <para/>
-            /// NOTE: This was longValueCount() in Lucene
+            /// NOTE: This was longValueCount() in Lucene.
             /// </summary>
             int Int64ValueCount { get; }
 
             /// <summary>
-            /// The minimum number of byte blocks to encode in a single iteration, when
+            /// The minimum number of <see cref="byte"/> blocks to encode in a single iteration, when
             /// using byte encoding.
             /// </summary>
             int ByteBlockCount { get; }
 
             /// <summary>
-            /// The number of values that can be stored in <seealso cref="#byteBlockCount()"/> byte
+            /// The number of values that can be stored in <see cref="ByteBlockCount"/> <see cref="byte"/>
             /// blocks.
             /// </summary>
             int ByteValueCount { get; }
 
             /// <summary>
-            /// Read <code>iterations * blockCount()</code> blocks from <code>blocks</code>,
-            /// decode them and write <code>iterations * valueCount()</code> values into
-            /// <code>values</code>.
+            /// Read <c>iterations * BlockCount</c> blocks from <paramref name="blocks"/>,
+            /// decode them and write <c>iterations * ValueCount</c> values into
+            /// <paramref name="values"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start reading blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start writing values </param>
-            /// <param name="iterations">   controls how much data to decode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start reading blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start writing values. </param>
+            /// <param name="iterations">   Controls how much data to decode. </param>
             void Decode(long[] blocks, int blocksOffset, long[] values, int valuesOffset, int iterations);
 
             /// <summary>
-            /// Read <code>8 * iterations * blockCount()</code> blocks from <code>blocks</code>,
-            /// decode them and write <code>iterations * valueCount()</code> values into
-            /// <code>values</code>.
+            /// Read <c>8 * iterations * BlockCount</c> blocks from <paramref name="blocks"/>,
+            /// decode them and write <c>iterations * ValueCount</c> values into
+            /// <paramref name="values"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start reading blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start writing values </param>
-            /// <param name="iterations">   controls how much data to decode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start reading blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start writing values. </param>
+            /// <param name="iterations">   Controls how much data to decode. </param>
             void Decode(byte[] blocks, int blocksOffset, long[] values, int valuesOffset, int iterations);
 
             /// <summary>
-            /// Read <code>iterations * blockCount()</code> blocks from <code>blocks</code>,
-            /// decode them and write <code>iterations * valueCount()</code> values into
-            /// <code>values</code>.
+            /// Read <c>iterations * BlockCount</c> blocks from <paramref name="blocks"/>,
+            /// decode them and write <c>iterations * ValueCount</c> values into
+            /// <paramref name="values"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start reading blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start writing values </param>
-            /// <param name="iterations">   controls how much data to decode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start reading blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start writing values. </param>
+            /// <param name="iterations">   Controls how much data to decode. </param>
             void Decode(long[] blocks, int blocksOffset, int[] values, int valuesOffset, int iterations);
 
             /// <summary>
-            /// Read <code>8 * iterations * blockCount()</code> blocks from <code>blocks</code>,
-            /// decode them and write <code>iterations * valueCount()</code> values into
-            /// <code>values</code>.
+            /// Read <c>8 * iterations * BlockCount</c> blocks from <paramref name="blocks"/>,
+            /// decode them and write <c>iterations * ValueCount</c> values into
+            /// <paramref name="values"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start reading blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start writing values </param>
-            /// <param name="iterations">   controls how much data to decode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start reading blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start writing values. </param>
+            /// <param name="iterations">   Controls how much data to decode. </param>
             void Decode(byte[] blocks, int blocksOffset, int[] values, int valuesOffset, int iterations);
         }
 
@@ -545,7 +456,7 @@ namespace Lucene.Net.Util.Packed
             int Int64BlockCount { get; }
 
             /// <summary>
-            /// The number of values that can be stored in <seealso cref="#longBlockCount()"/> long
+            /// The number of values that can be stored in <see cref="Int64BlockCount"/> long
             /// blocks.
             /// <para/>
             /// NOTE: This was longValueCount() in Lucene
@@ -559,69 +470,70 @@ namespace Lucene.Net.Util.Packed
             int ByteBlockCount { get; }
 
             /// <summary>
-            /// The number of values that can be stored in <seealso cref="#byteBlockCount()"/> byte
+            /// The number of values that can be stored in <see cref="ByteBlockCount"/> byte
             /// blocks.
             /// </summary>
             int ByteValueCount { get; }
 
             /// <summary>
-            /// Read <code>iterations * valueCount()</code> values from <code>values</code>,
-            /// encode them and write <code>iterations * blockCount()</code> blocks into
-            /// <code>blocks</code>.
+            /// Read <c>iterations * ValueCount</c> values from <paramref name="values"/>,
+            /// encode them and write <c>iterations * BlockCount</c> blocks into
+            /// <paramref name="blocks"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start writing blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start reading values </param>
-            /// <param name="iterations">   controls how much data to encode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start writing blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start reading values. </param>
+            /// <param name="iterations">   Controls how much data to encode. </param>
             void Encode(long[] values, int valuesOffset, long[] blocks, int blocksOffset, int iterations);
 
             /// <summary>
-            /// Read <code>iterations * valueCount()</code> values from <code>values</code>,
-            /// encode them and write <code>8 * iterations * blockCount()</code> blocks into
-            /// <code>blocks</code>.
+            /// Read <c>iterations * ValueCount</c> values from <paramref name="values"/>,
+            /// encode them and write <c>8 * iterations * BlockCount</c> blocks into
+            /// <paramref name="blocks"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start writing blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start reading values </param>
-            /// <param name="iterations">   controls how much data to encode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start writing blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start reading values. </param>
+            /// <param name="iterations">   Controls how much data to encode. </param>
             void Encode(long[] values, int valuesOffset, byte[] blocks, int blocksOffset, int iterations);
 
             /// <summary>
-            /// Read <code>iterations * valueCount()</code> values from <code>values</code>,
-            /// encode them and write <code>iterations * blockCount()</code> blocks into
-            /// <code>blocks</code>.
+            /// Read <c>iterations * ValueCount</c> values from <paramref name="values"/>,
+            /// encode them and write <c>iterations * BlockCount</c> blocks into
+            /// <paramref name="blocks"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start writing blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start reading values </param>
-            /// <param name="iterations">   controls how much data to encode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start writing blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start reading values. </param>
+            /// <param name="iterations">   Controls how much data to encode. </param>
             void Encode(int[] values, int valuesOffset, long[] blocks, int blocksOffset, int iterations);
 
             /// <summary>
-            /// Read <code>iterations * valueCount()</code> values from <code>values</code>,
-            /// encode them and write <code>8 * iterations * blockCount()</code> blocks into
-            /// <code>blocks</code>.
+            /// Read <c>iterations * ValueCount</c> values from <paramref name="values"/>,
+            /// encode them and write <c>8 * iterations * BlockCount</c> blocks into
+            /// <paramref name="blocks"/>.
             /// </summary>
-            /// <param name="blocks">       the long blocks that hold packed integer values </param>
-            /// <param name="blocksOffset"> the offset where to start writing blocks </param>
-            /// <param name="values">       the values buffer </param>
-            /// <param name="valuesOffset"> the offset where to start reading values </param>
-            /// <param name="iterations">   controls how much data to encode </param>
+            /// <param name="blocks">       The long blocks that hold packed integer values. </param>
+            /// <param name="blocksOffset"> The offset where to start writing blocks. </param>
+            /// <param name="values">       The values buffer. </param>
+            /// <param name="valuesOffset"> The offset where to start reading values. </param>
+            /// <param name="iterations">   Controls how much data to encode. </param>
             void Encode(int[] values, int valuesOffset, byte[] blocks, int blocksOffset, int iterations);
         }
 
         /// <summary>
         /// A read-only random access array of positive integers.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract class Reader : NumericDocValues
         {
             /// <summary>
-            /// Bulk get: read at least one and at most <code>len</code> longs starting
-            /// from <code>index</code> into <code>arr[off:off+len]</code> and return
+            /// Bulk get: read at least one and at most <paramref name="len"/> longs starting
+            /// from <paramref name="index"/> into <c>arr[off:off+len]</c> and return
             /// the actual number of values that have been read.
             /// </summary>
             public virtual int Get(int index, long[] arr, int off, int len)
@@ -638,14 +550,15 @@ namespace Lucene.Net.Util.Packed
                 return gets;
             }
 
-            /// <returns> the number of bits used to store any given value.
+            /// <returns> The number of bits used to store any given value.
             ///         Note: this does not imply that memory usage is
-            ///         {@code bitsPerValue * #values} as implementations are free to
+            ///         <c>bitsPerValue * #values</c> as implementations are free to
             ///         use non-space-optimal packing of bits. </returns>
             public abstract int BitsPerValue { get; }
 
             /// <summary>
-            /// the number of values. 
+            /// The number of values.
+            /// <para/>
             /// NOTE: This was size() in Lucene.
             /// </summary>
             public abstract int Count { get; }
@@ -657,12 +570,12 @@ namespace Lucene.Net.Util.Packed
 
             /// <summary>
             /// Expert: if the bit-width of this reader matches one of
-            /// java's native types, returns the underlying array
+            /// .NET's native types, returns the underlying array
             /// (ie, byte[], short[], int[], long[]); else, returns
-            /// null.  Note that when accessing the array you must
+            /// <c>null</c>.  Note that when accessing the array you must
             /// upgrade the type (bitwise AND with all ones), to
             /// interpret the full value as unsigned.  Ie,
-            /// bytes[idx]&0xFF, shorts[idx]&0xFFFF, etc.
+            /// bytes[idx]&amp;0xFF, shorts[idx]&amp;0xFFFF, etc.
             /// </summary>
             public virtual object GetArray()
             {
@@ -671,10 +584,10 @@ namespace Lucene.Net.Util.Packed
             }
 
             /// <summary>
-            /// Returns true if this implementation is backed by a
-            /// native java array.
+            /// Returns <c>true</c> if this implementation is backed by a
+            /// native .NET array.
             /// </summary>
-            /// <seealso cref= #getArray </seealso>
+            /// <seealso cref="GetArray"/>
             public virtual bool HasArray
             {
                 get { return false; }
@@ -682,32 +595,33 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Run-once iterator interface, to decode previously saved PackedInts.
+        /// Run-once iterator interface, to decode previously saved <see cref="PackedInt32s"/>.
         /// </summary>
         public interface IReaderIterator
         {
             /// <summary>
-            /// Returns next value </summary>
+            /// Returns next value. </summary>
             long Next();
 
             /// <summary>
-            /// Returns at least 1 and at most <code>count</code> next values,
-            /// the returned ref MUST NOT be modified
+            /// Returns at least 1 and at most <paramref name="count"/> next values,
+            /// the returned ref MUST NOT be modified.
             /// </summary>
             Int64sRef Next(int count);
 
             /// <summary>
-            /// Returns number of bits per value </summary>
+            /// Returns number of bits per value. </summary>
             int BitsPerValue { get; }
 
             /// <summary>
             /// Returns number of values.
+            /// <para/>
             /// NOTE: This was size() in Lucene.
             /// </summary>
             int Count { get; }
 
             /// <summary>
-            /// Returns the current position </summary>
+            /// Returns the current position. </summary>
             int Ord { get; }
         }
 
@@ -755,20 +669,21 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// A packed integer array that can be modified.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract class Mutable : Reader
         {
             /// <summary>
             /// Set the value at the given index in the array. </summary>
-            /// <param name="index"> where the value should be positioned. </param>
-            /// <param name="value"> a value conforming to the constraints set by the array. </param>
+            /// <param name="index"> Where the value should be positioned. </param>
+            /// <param name="value"> A value conforming to the constraints set by the array. </param>
             public abstract void Set(int index, long value);
 
             /// <summary>
-            /// Bulk set: set at least one and at most <code>len</code> longs starting
-            /// at <code>off</code> in <code>arr</code> into this mutable, starting at
-            /// <code>index</code>. Returns the actual number of values that have been
+            /// Bulk set: set at least one and at most <paramref name="len"/> longs starting
+            /// at <paramref name="off"/> in <paramref name="arr"/> into this mutable, starting at
+            /// <paramref name="index"/>. Returns the actual number of values that have been
             /// set.
             /// </summary>
             public virtual int Set(int index, long[] arr, int off, int len)
@@ -786,8 +701,8 @@ namespace Lucene.Net.Util.Packed
             }
 
             /// <summary>
-            /// Fill the mutable from <code>fromIndex</code> (inclusive) to
-            /// <code>toIndex</code> (exclusive) with <code>val</code>.
+            /// Fill the mutable from <paramref name="fromIndex"/> (inclusive) to
+            /// <paramref name="toIndex"/> (exclusive) with <paramref name="val"/>.
             /// </summary>
             public virtual void Fill(int fromIndex, int toIndex, long val)
             {
@@ -808,7 +723,7 @@ namespace Lucene.Net.Util.Packed
             }
 
             /// <summary>
-            /// Save this mutable into <code>out</code>. Instantiating a reader from
+            /// Save this mutable into <paramref name="out"/>. Instantiating a reader from
             /// the generated data will return a reader with the same number of bits
             /// per value.
             /// </summary>
@@ -835,7 +750,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// A simple base for Readers that keeps track of valueCount and bitsPerValue.
+        /// A simple base for <see cref="Reader"/>s that keeps track of valueCount and bitsPerValue.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         internal abstract class ReaderImpl : Reader
@@ -893,7 +809,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// A <seealso cref="Reader"/> which has all its values equal to 0 (bitsPerValue = 0). </summary>
+        /// A <see cref="Reader"/> which has all its values equal to 0 (bitsPerValue = 0). </summary>
         public sealed class NullReader : Reader
         {
             private readonly int valueCount;
@@ -940,6 +856,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// A write-once Writer.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public abstract class Writer
@@ -993,12 +910,12 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Get a <seealso cref="IDecoder"/>.
+        /// Get a <see cref="IDecoder"/>.
         /// </summary>
-        /// <param name="format">         the format used to store packed ints </param>
-        /// <param name="version">        the compatibility version </param>
-        /// <param name="bitsPerValue">   the number of bits per value </param>
-        /// <returns> a decoder </returns>
+        /// <param name="format">         The format used to store packed <see cref="int"/>s. </param>
+        /// <param name="version">        The compatibility version. </param>
+        /// <param name="bitsPerValue">   The number of bits per value. </param>
+        /// <returns> A decoder. </returns>
         public static IDecoder GetDecoder(Format format, int version, int bitsPerValue)
         {
             CheckVersion(version);
@@ -1006,12 +923,12 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Get an <seealso cref="IEncoder"/>.
+        /// Get an <see cref="IEncoder"/>.
         /// </summary>
-        /// <param name="format">         the format used to store packed ints </param>
-        /// <param name="version">        the compatibility version </param>
-        /// <param name="bitsPerValue">   the number of bits per value </param>
-        /// <returns> an encoder </returns>
+        /// <param name="format">         The format used to store packed <see cref="int"/>s. </param>
+        /// <param name="version">        The compatibility version. </param>
+        /// <param name="bitsPerValue">   The number of bits per value. </param>
+        /// <returns> An encoder. </returns>
         public static IEncoder GetEncoder(Format format, int version, int bitsPerValue)
         {
             CheckVersion(version);
@@ -1019,20 +936,21 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: Restore a <seealso cref="Reader"/> from a stream without reading metadata at
-        /// the beginning of the stream. this method is useful to restore data from
+        /// Expert: Restore a <see cref="Reader"/> from a stream without reading metadata at
+        /// the beginning of the stream. This method is useful to restore data from
         /// streams which have been created using
-        /// <seealso cref="PackedInt32s#getWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <see cref="PackedInt32s.GetWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from, positioned at the beginning of the packed values </param>
-        /// <param name="format">       the format used to serialize </param>
-        /// <param name="version">      the version used to serialize the data </param>
-        /// <param name="valueCount">   how many values the stream holds </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <returns>             a Reader </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-        /// <seealso cref= PackedInt32s#getWriterNoHeader(DataOutput, Format, int, int, int)
-        /// @lucene.internal </seealso>
+        /// <param name="in">           The stream to read data from, positioned at the beginning of the packed values. </param>
+        /// <param name="format">       The format used to serialize. </param>
+        /// <param name="version">      The version used to serialize the data. </param>
+        /// <param name="valueCount">   How many values the stream holds. </param>
+        /// <param name="bitsPerValue"> The number of bits per value. </param>
+        /// <returns>             A <see cref="Reader"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
+        /// <seealso cref="PackedInt32s.GetWriterNoHeader(DataOutput, Format, int, int, int)"/>
         public static Reader GetReaderNoHeader(DataInput @in, Format format, int version, int valueCount, int bitsPerValue)
         {
             CheckVersion(version);
@@ -1080,28 +998,30 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: Restore a <seealso cref="Reader"/> from a stream without reading metadata at
+        /// Expert: Restore a <see cref="Reader"/> from a stream without reading metadata at
         /// the beginning of the stream. this method is useful to restore data when
-        /// metadata has been previously read using <seealso cref="#readHeader(DataInput)"/>.
+        /// metadata has been previously read using <see cref="ReadHeader(DataInput)"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from, positioned at the beginning of the packed values </param>
-        /// <param name="header">       metadata result from <code>readHeader()</code> </param>
-        /// <returns>             a Reader </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-        /// <seealso cref= #readHeader(DataInput)
-        /// @lucene.internal </seealso>
+        /// <param name="in">           The stream to read data from, positioned at the beginning of the packed values. </param>
+        /// <param name="header">       Metadata result from <see cref="ReadHeader(DataInput)"/>. </param>
+        /// <returns>             A <see cref="Reader"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
+        /// <seealso cref="ReadHeader(DataInput)"/>
         public static Reader GetReaderNoHeader(DataInput @in, Header header)
         {
             return GetReaderNoHeader(@in, header.format, header.version, header.valueCount, header.bitsPerValue);
         }
 
         /// <summary>
-        /// Restore a <seealso cref="Reader"/> from a stream.
+        /// Restore a <see cref="Reader"/> from a stream.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from </param>
-        /// <returns>             a Reader </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error
-        /// @lucene.internal </exception>
+        /// <param name="in">           The stream to read data from. </param>
+        /// <returns>             A <see cref="Reader"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error </exception>
         public static Reader GetReader(DataInput @in)
         {
             int version = CodecUtil.CheckHeader(@in, CODEC_NAME, VERSION_START, VERSION_CURRENT);
@@ -1114,20 +1034,21 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: Restore a <seealso cref="IReaderIterator"/> from a stream without reading
-        /// metadata at the beginning of the stream. this method is useful to restore
+        /// Expert: Restore a <see cref="IReaderIterator"/> from a stream without reading
+        /// metadata at the beginning of the stream. This method is useful to restore
         /// data from streams which have been created using
-        /// <seealso cref="PackedInt32s#getWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <see cref="PackedInt32s.GetWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from, positioned at the beginning of the packed values </param>
-        /// <param name="format">       the format used to serialize </param>
-        /// <param name="version">      the version used to serialize the data </param>
-        /// <param name="valueCount">   how many values the stream holds </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <param name="mem">          how much memory the iterator is allowed to use to read-ahead (likely to speed up iteration) </param>
-        /// <returns>             a ReaderIterator </returns>
-        /// <seealso cref= PackedInt32s#getWriterNoHeader(DataOutput, Format, int, int, int)
-        /// @lucene.internal </seealso>
+        /// <param name="in">           The stream to read data from, positioned at the beginning of the packed values. </param>
+        /// <param name="format">       The format used to serialize. </param>
+        /// <param name="version">      The version used to serialize the data. </param>
+        /// <param name="valueCount">   How many values the stream holds. </param>
+        /// <param name="bitsPerValue"> the number of bits per value. </param>
+        /// <param name="mem">          How much memory the iterator is allowed to use to read-ahead (likely to speed up iteration). </param>
+        /// <returns>             A <see cref="IReaderIterator"/>. </returns>
+        /// <seealso cref="PackedInt32s.GetWriterNoHeader(DataOutput, Format, int, int, int)"/>
         public static IReaderIterator GetReaderIteratorNoHeader(DataInput @in, Format format, int version, int valueCount, int bitsPerValue, int mem)
         {
             CheckVersion(version);
@@ -1135,12 +1056,14 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Retrieve PackedInts as a <seealso cref="IReaderIterator"/> </summary>
-        /// <param name="in"> positioned at the beginning of a stored packed int structure. </param>
-        /// <param name="mem"> how much memory the iterator is allowed to use to read-ahead (likely to speed up iteration) </param>
-        /// <returns> an iterator to access the values </returns>
-        /// <exception cref="IOException"> if the structure could not be retrieved.
-        /// @lucene.internal </exception>
+        /// Retrieve <see cref="PackedInt32s"/> as a <see cref="IReaderIterator"/>. 
+        /// <para/>
+        /// @lucene.internal
+        /// </summary>
+        /// <param name="in"> Positioned at the beginning of a stored packed int structure. </param>
+        /// <param name="mem"> How much memory the iterator is allowed to use to read-ahead (likely to speed up iteration). </param>
+        /// <returns> An iterator to access the values. </returns>
+        /// <exception cref="System.IO.IOException"> If the structure could not be retrieved. </exception>
         public static IReaderIterator GetReaderIterator(DataInput @in, int mem)
         {
             int version = CodecUtil.CheckHeader(@in, CODEC_NAME, VERSION_START, VERSION_CURRENT);
@@ -1152,21 +1075,22 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: Construct a direct <seealso cref="Reader"/> from a stream without reading
-        /// metadata at the beginning of the stream. this method is useful to restore
+        /// Expert: Construct a direct <see cref="Reader"/> from a stream without reading
+        /// metadata at the beginning of the stream. This method is useful to restore
         /// data from streams which have been created using
-        /// <seealso cref="PackedInt32s#getWriterNoHeader(DataOutput, Format, int, int, int)"/>.
-        /// </p><p>
+        /// <see cref="PackedInt32s.GetWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <para/>
         /// The returned reader will have very little memory overhead, but every call
-        /// to <seealso cref="Reader#get(int)"/> is likely to perform a disk seek.
+        /// to <see cref="NumericDocValues.Get(int)"/> is likely to perform a disk seek.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from </param>
-        /// <param name="format">       the format used to serialize </param>
-        /// <param name="version">      the version used to serialize the data </param>
-        /// <param name="valueCount">   how many values the stream holds </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <returns> a direct Reader
-        /// @lucene.internal </returns>
+        /// <param name="in">           The stream to read data from. </param>
+        /// <param name="format">       The format used to serialize. </param>
+        /// <param name="version">      The version used to serialize the data. </param>
+        /// <param name="valueCount">   How many values the stream holds. </param>
+        /// <param name="bitsPerValue"> The number of bits per value. </param>
+        /// <returns> A direct <see cref="Reader"/>. </returns>
         public static Reader GetDirectReaderNoHeader(IndexInput @in, Format format, int version, int valueCount, int bitsPerValue)
         {
             CheckVersion(version);
@@ -1233,34 +1157,36 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: Construct a direct <seealso cref="Reader"/> from an <seealso cref="IndexInput"/>
+        /// Expert: Construct a direct <see cref="Reader"/> from an <see cref="IndexInput"/>
         /// without reading metadata at the beginning of the stream. this method is
         /// useful to restore data when metadata has been previously read using
-        /// <seealso cref="#readHeader(DataInput)"/>.
+        /// <see cref="ReadHeader(DataInput)"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from, positioned at the beginning of the packed values </param>
-        /// <param name="header">       metadata result from <code>readHeader()</code> </param>
-        /// <returns>             a Reader </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-        /// <seealso cref= #readHeader(DataInput)
-        /// @lucene.internal </seealso>
+        /// <param name="in">           The stream to read data from, positioned at the beginning of the packed values. </param>
+        /// <param name="header">       Metadata result from <see cref="ReadHeader(DataInput)"/>. </param>
+        /// <returns>             A <see cref="Reader"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
+        /// <seealso cref="ReadHeader(DataInput)"/>
         public static Reader GetDirectReaderNoHeader(IndexInput @in, Header header)
         {
             return GetDirectReaderNoHeader(@in, header.format, header.version, header.valueCount, header.bitsPerValue);
         }
 
         /// <summary>
-        /// Construct a direct <seealso cref="Reader"/> from an <seealso cref="IndexInput"/>. this method
+        /// Construct a direct <see cref="Reader"/> from an <see cref="IndexInput"/>. this method
         /// is useful to restore data from streams which have been created using
-        /// <seealso cref="PackedInt32s#getWriter(DataOutput, int, int, float)"/>.
-        /// </p><p>
+        /// <see cref="PackedInt32s.GetWriter(DataOutput, int, int, float)"/>.
+        /// <para/>
         /// The returned reader will have very little memory overhead, but every call
-        /// to <seealso cref="Reader#get(int)"/> is likely to perform a disk seek.
+        /// to <see cref="NumericDocValues.Get(int)"/> is likely to perform a disk seek.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="in">           the stream to read data from </param>
-        /// <returns> a direct Reader </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error
-        /// @lucene.internal </exception>
+        /// <param name="in">           The stream to read data from. </param>
+        /// <returns> A direct <see cref="Reader"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         public static Reader GetDirectReader(IndexInput @in)
         {
             int version = CodecUtil.CheckHeader(@in, CODEC_NAME, VERSION_START, VERSION_CURRENT);
@@ -1273,22 +1199,23 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Create a packed integer array with the given amount of values initialized
-        /// to 0. the valueCount and the bitsPerValue cannot be changed after creation.
+        /// to 0. The <paramref name="valueCount"/> and the <paramref name="bitsPerValue"/> cannot be changed after creation.
         /// All Mutables known by this factory are kept fully in RAM.
-        /// </p><p>
-        /// Positive values of <code>acceptableOverheadRatio</code> will trade space
+        /// <para/>
+        /// Positive values of <paramref name="acceptableOverheadRatio"/> will trade space
         /// for speed by selecting a faster but potentially less memory-efficient
-        /// implementation. An <code>acceptableOverheadRatio</code> of
-        /// <seealso cref="PackedInt32s#COMPACT"/> will make sure that the most memory-efficient
-        /// implementation is selected whereas <seealso cref="PackedInt32s#FASTEST"/> will make sure
+        /// implementation. An <paramref name="acceptableOverheadRatio"/> of
+        /// <see cref="PackedInt32s.COMPACT"/> will make sure that the most memory-efficient
+        /// implementation is selected whereas <see cref="PackedInt32s.FASTEST"/> will make sure
         /// that the fastest implementation is selected.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="valueCount">   the number of elements </param>
-        /// <param name="bitsPerValue"> the number of bits available for any given value </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead
-        ///        ratio per value </param>
-        /// <returns> a mutable packed integer array
-        /// @lucene.internal </returns>
+        /// <param name="valueCount">   The number of elements. </param>
+        /// <param name="bitsPerValue"> The number of bits available for any given value. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead
+        ///        ratio per value. </param>
+        /// <returns> A mutable packed integer array. </returns>
         public static Mutable GetMutable(int valueCount, int bitsPerValue, float acceptableOverheadRatio)
         {
             FormatAndBits formatAndBits = FastestFormatAndBits(valueCount, bitsPerValue, acceptableOverheadRatio);
@@ -1296,9 +1223,10 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Same as <seealso cref="#getMutable(int, int, float)"/> with a pre-computed number
-        ///  of bits per value and format.
-        ///  @lucene.internal
+        /// Same as <see cref="GetMutable(int, int, float)"/> with a pre-computed number
+        /// of bits per value and format.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
         public static Mutable GetMutable(int valueCount, int bitsPerValue, PackedInt32s.Format format)
         {
@@ -1349,45 +1277,46 @@ namespace Lucene.Net.Util.Packed
         /// <summary>
         /// Expert: Create a packed integer array writer for the given output, format,
         /// value count, and number of bits per value.
-        /// </p><p>
+        /// <para/>
         /// The resulting stream will be long-aligned. this means that depending on
         /// the format which is used, up to 63 bits will be wasted. An easy way to
-        /// make sure that no space is lost is to always use a <code>valueCount</code>
+        /// make sure that no space is lost is to always use a <paramref name="valueCount"/>
         /// that is a multiple of 64.
-        /// </p><p>
-        /// this method does not write any metadata to the stream, meaning that it is
+        /// <para/>
+        /// This method does not write any metadata to the stream, meaning that it is
         /// your responsibility to store it somewhere else in order to be able to
         /// recover data from the stream later on:
-        /// <ul>
-        ///   <li><code>format</code> (using <seealso cref="Format#getId()"/>),</li>
-        ///   <li><code>valueCount</code>,</li>
-        ///   <li><code>bitsPerValue</code>,</li>
-        ///   <li><seealso cref="#VERSION_CURRENT"/>.</li>
-        /// </ul>
-        /// </p><p>
+        /// <list type="bullet">
+        ///   <item><description><paramref name="format"/> (using <see cref="Format.Id"/>),</description></item>
+        ///   <item><description><paramref name="valueCount"/>,</description></item>
+        ///   <item><description><paramref name="bitsPerValue"/>,</description></item>
+        ///   <item><description><see cref="VERSION_CURRENT"/>.</description></item>
+        /// </list>
+        /// <para/>
         /// It is possible to start writing values without knowing how many of them you
-        /// are actually going to write. To do this, just pass <code>-1</code> as
-        /// <code>valueCount</code>. On the other hand, for any positive value of
-        /// <code>valueCount</code>, the returned writer will make sure that you don't
+        /// are actually going to write. To do this, just pass <c>-1</c> as
+        /// <paramref name="valueCount"/>. On the other hand, for any positive value of
+        /// <paramref name="valueCount"/>, the returned writer will make sure that you don't
         /// write more values than expected and pad the end of stream with zeros in
-        /// case you have written less than <code>valueCount</code> when calling
-        /// <seealso cref="Writer#finish()"/>.
-        /// </p><p>
-        /// The <code>mem</code> parameter lets you control how much memory can be used
+        /// case you have written less than <paramref name="valueCount"/> when calling
+        /// <see cref="Writer.Finish()"/>.
+        /// <para/>
+        /// The <paramref name="mem"/> parameter lets you control how much memory can be used
         /// to buffer changes in memory before flushing to disk. High values of
-        /// <code>mem</code> are likely to improve throughput. On the other hand, if
-        /// speed is not that important to you, a value of <code>0</code> will use as
+        /// <paramref name="mem"/> are likely to improve throughput. On the other hand, if
+        /// speed is not that important to you, a value of <c>0</c> will use as
         /// little memory as possible and should already offer reasonable throughput.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="out">          the data output </param>
-        /// <param name="format">       the format to use to serialize the values </param>
-        /// <param name="valueCount">   the number of values </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <param name="mem">          how much memory (in bytes) can be used to speed up serialization </param>
-        /// <returns>             a Writer </returns>
-        /// <seealso cref= PackedInt32s#getReaderIteratorNoHeader(DataInput, Format, int, int, int, int) </seealso>
-        /// <seealso cref= PackedInts#getReaderNoHeader(DataInput, Format, int, int, int)
-        /// @lucene.internal </seealso>
+        /// <param name="out">          The data output. </param>
+        /// <param name="format">       The format to use to serialize the values. </param>
+        /// <param name="valueCount">   The number of values. </param>
+        /// <param name="bitsPerValue"> The number of bits per value. </param>
+        /// <param name="mem">          How much memory (in bytes) can be used to speed up serialization. </param>
+        /// <returns>             A <see cref="Writer"/>. </returns>
+        /// <seealso cref="PackedInt32s.GetReaderIteratorNoHeader(DataInput, Format, int, int, int, int)"/>
+        /// <seealso cref="PackedInt32s.GetReaderNoHeader(DataInput, Format, int, int, int)"/>
         public static Writer GetWriterNoHeader(DataOutput @out, Format format, int valueCount, int bitsPerValue, int mem)
         {
             return new PackedWriter(format, @out, valueCount, bitsPerValue, mem);
@@ -1396,35 +1325,36 @@ namespace Lucene.Net.Util.Packed
         /// <summary>
         /// Create a packed integer array writer for the given output, format, value
         /// count, and number of bits per value.
-        /// </p><p>
+        /// <para/>
         /// The resulting stream will be long-aligned. this means that depending on
         /// the format which is used under the hoods, up to 63 bits will be wasted.
         /// An easy way to make sure that no space is lost is to always use a
-        /// <code>valueCount</code> that is a multiple of 64.
-        /// </p><p>
-        /// this method writes metadata to the stream, so that the resulting stream is
-        /// sufficient to restore a <seealso cref="Reader"/> from it. You don't need to track
-        /// <code>valueCount</code> or <code>bitsPerValue</code> by yourself. In case
+        /// <paramref name="valueCount"/> that is a multiple of 64.
+        /// <para/>
+        /// This method writes metadata to the stream, so that the resulting stream is
+        /// sufficient to restore a <see cref="Reader"/> from it. You don't need to track
+        /// <paramref name="valueCount"/> or <paramref name="bitsPerValue"/> by yourself. In case
         /// this is a problem, you should probably look at
-        /// <seealso cref="#getWriterNoHeader(DataOutput, Format, int, int, int)"/>.
-        /// </p><p>
-        /// The <code>acceptableOverheadRatio</code> parameter controls how
+        /// <see cref="GetWriterNoHeader(DataOutput, Format, int, int, int)"/>.
+        /// <para/>
+        /// The <paramref name="acceptableOverheadRatio"/> parameter controls how
         /// readers that will be restored from this stream trade space
         /// for speed by selecting a faster but potentially less memory-efficient
-        /// implementation. An <code>acceptableOverheadRatio</code> of
-        /// <seealso cref="PackedInt32s#COMPACT"/> will make sure that the most memory-efficient
-        /// implementation is selected whereas <seealso cref="PackedInt32s#FASTEST"/> will make sure
+        /// implementation. An <paramref name="acceptableOverheadRatio"/> of
+        /// <see cref="PackedInt32s.COMPACT"/> will make sure that the most memory-efficient
+        /// implementation is selected whereas <see cref="PackedInt32s.FASTEST"/> will make sure
         /// that the fastest implementation is selected. In case you are only interested
         /// in reading this stream sequentially later on, you should probably use
-        /// <seealso cref="PackedInt32s#COMPACT"/>.
+        /// <see cref="PackedInt32s.COMPACT"/>.
+        /// <para/>
+        /// @lucene.internal
         /// </summary>
-        /// <param name="out">          the data output </param>
-        /// <param name="valueCount">   the number of values </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio per value </param>
-        /// <returns>             a Writer </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error
-        /// @lucene.internal </exception>
+        /// <param name="out">          The data output. </param>
+        /// <param name="valueCount">   The number of values. </param>
+        /// <param name="bitsPerValue"> The number of bits per value. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio per value. </param>
+        /// <returns>             A <see cref="Writer"/>. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         public static Writer GetWriter(DataOutput @out, int valueCount, int bitsPerValue, float acceptableOverheadRatio)
         {
             Debug.Assert(valueCount >= 0);
@@ -1437,10 +1367,12 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Returns how many bits are required to hold values up
-        ///  to and including maxValue </summary>
-        /// <param name="maxValue"> the maximum value that should be representable. </param>
-        /// <returns> the amount of bits needed to represent values from 0 to maxValue.
-        /// @lucene.internal </returns>
+        /// to and including <paramref name="maxValue"/>. 
+        /// <para/>
+        /// @lucene.internal
+        /// </summary>
+        /// <param name="maxValue"> The maximum value that should be representable. </param>
+        /// <returns> The amount of bits needed to represent values from 0 to <paramref name="maxValue"/>. </returns>
         public static int BitsRequired(long maxValue)
         {
             if (maxValue < 0)
@@ -1452,18 +1384,20 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Calculates the maximum unsigned long that can be expressed with the given
-        /// number of bits. </summary>
-        /// <param name="bitsPerValue"> the number of bits available for any given value. </param>
-        /// <returns> the maximum value for the given bits.
-        /// @lucene.internal </returns>
+        /// number of bits. 
+        /// <para/>
+        /// @lucene.internal
+        /// </summary>
+        /// <param name="bitsPerValue"> The number of bits available for any given value. </param>
+        /// <returns> The maximum value for the given bits. </returns>
         public static long MaxValue(int bitsPerValue)
         {
             return bitsPerValue == 64 ? long.MaxValue : ~(~0L << bitsPerValue);
         }
 
         /// <summary>
-        /// Copy <code>src[srcPos:srcPos+len]</code> into
-        /// <code>dest[destPos:destPos+len]</code> using at most <code>mem</code>
+        /// Copy <c>src[srcPos:srcPos+len]</c> into
+        /// <c>dest[destPos:destPos+len]</c> using at most <paramref name="mem"/>
         /// bytes.
         /// </summary>
         public static void Copy(Reader src, int srcPos, Mutable dest, int destPos, int len, int mem)
@@ -1487,7 +1421,7 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Same as <seealso cref="#copy(Reader, int, Mutable, int, int, int)"/> but using a pre-allocated buffer. </summary>
+        /// Same as <see cref="Copy(Reader, int, Mutable, int, int, int)"/> but using a pre-allocated buffer. </summary>
         internal static void Copy(Reader src, int srcPos, Mutable dest, int destPos, int len, long[] buf)
         {
             Debug.Assert(buf.Length > 0);
@@ -1518,15 +1452,15 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Expert: reads only the metadata from a stream. this is useful to later
+        /// Expert: reads only the metadata from a stream. This is useful to later
         /// restore a stream or open a direct reader via
-        /// <seealso cref="#getReaderNoHeader(DataInput, Header)"/>
-        /// or <seealso cref="#getDirectReaderNoHeader(IndexInput, Header)"/>. </summary>
-        /// <param name="in"> the stream to read data </param>
-        /// <returns>   packed integer metadata. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
-        /// <seealso cref= #getReaderNoHeader(DataInput, Header) </seealso>
-        /// <seealso cref= #getDirectReaderNoHeader(IndexInput, Header) </seealso>
+        /// <see cref="GetReaderNoHeader(DataInput, Header)"/>
+        /// or <see cref="GetDirectReaderNoHeader(IndexInput, Header)"/>. </summary>
+        /// <param name="in"> The stream to read data. </param>
+        /// <returns>   Packed integer metadata. </returns>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
+        /// <seealso cref="GetReaderNoHeader(DataInput, Header)"/>
+        /// <seealso cref="GetDirectReaderNoHeader(IndexInput, Header)"/>
         public static Header ReadHeader(DataInput @in)
         {
             int version = CodecUtil.CheckHeader(@in, CODEC_NAME, VERSION_START, VERSION_CURRENT);
@@ -1557,7 +1491,7 @@ namespace Lucene.Net.Util.Packed
 
         /// <summary>
         /// Check that the block size is a power of 2, in the right bounds, and return
-        ///  its log in base 2.
+        /// its log in base 2.
         /// </summary>
         internal static int CheckBlockSize(int blockSize, int minBlockSize, int maxBlockSize)
         {
@@ -1573,8 +1507,8 @@ namespace Lucene.Net.Util.Packed
         }
 
         /// <summary>
-        /// Return the number of blocks required to store <code>size</code> values on
-        ///  <code>blockSize</code>.
+        /// Return the number of blocks required to store <paramref name="size"/> values on
+        /// <paramref name="blockSize"/>.
         /// </summary>
         internal static int NumBlocks(long size, int blockSize)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/PagedGrowableWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/PagedGrowableWriter.cs b/src/Lucene.Net/Util/Packed/PagedGrowableWriter.cs
index f05202b..fbc85ff 100644
--- a/src/Lucene.Net/Util/Packed/PagedGrowableWriter.cs
+++ b/src/Lucene.Net/Util/Packed/PagedGrowableWriter.cs
@@ -20,11 +20,13 @@ namespace Lucene.Net.Util.Packed
     using Mutable = Lucene.Net.Util.Packed.PackedInt32s.Mutable;
 
     /// <summary>
-    /// A <seealso cref="PagedGrowableWriter"/>. this class slices data into fixed-size blocks
+    /// A <see cref="PagedGrowableWriter"/>. This class slices data into fixed-size blocks
     /// which have independent numbers of bits per value and grow on-demand.
-    /// <p>You should use this class instead of the <seealso cref="AbstractAppendingInt64Buffer"/> related ones only when
+    /// <para/>
+    /// You should use this class instead of the <see cref="AbstractAppendingInt64Buffer"/> related ones only when
     /// you need random write-access. Otherwise this class will likely be slower and
     /// less memory-efficient.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class PagedGrowableWriter : AbstractPagedMutable<PagedGrowableWriter>
@@ -32,12 +34,12 @@ namespace Lucene.Net.Util.Packed
         internal readonly float acceptableOverheadRatio;
 
         /// <summary>
-        /// Create a new <seealso cref="PagedGrowableWriter"/> instance.
+        /// Create a new <see cref="PagedGrowableWriter"/> instance.
         /// </summary>
-        /// <param name="size"> the number of values to store. </param>
-        /// <param name="pageSize"> the number of values per page </param>
-        /// <param name="startBitsPerValue"> the initial number of bits per value </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio </param>
+        /// <param name="size"> The number of values to store. </param>
+        /// <param name="pageSize"> The number of values per page. </param>
+        /// <param name="startBitsPerValue"> The initial number of bits per value. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio. </param>
         public PagedGrowableWriter(long size, int pageSize, int startBitsPerValue, float acceptableOverheadRatio)
             : this(size, pageSize, startBitsPerValue, acceptableOverheadRatio, true)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6f22b5ab/src/Lucene.Net/Util/Packed/PagedMutable.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Packed/PagedMutable.cs b/src/Lucene.Net/Util/Packed/PagedMutable.cs
index d7912df..d662e5e 100644
--- a/src/Lucene.Net/Util/Packed/PagedMutable.cs
+++ b/src/Lucene.Net/Util/Packed/PagedMutable.cs
@@ -22,9 +22,10 @@ namespace Lucene.Net.Util.Packed
     using Mutable = Lucene.Net.Util.Packed.PackedInt32s.Mutable;
 
     /// <summary>
-    /// A <seealso cref="PagedMutable"/>. this class slices data into fixed-size blocks
+    /// A <see cref="PagedMutable"/>. This class slices data into fixed-size blocks
     /// which have the same number of bits per value. It can be a useful replacement
-    /// for <seealso cref="PackedInt32s.Mutable"/> to store more than 2B values.
+    /// for <see cref="PackedInt32s.Mutable"/> to store more than 2B values.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class PagedMutable : AbstractPagedMutable<PagedMutable>
@@ -32,12 +33,12 @@ namespace Lucene.Net.Util.Packed
         internal readonly PackedInt32s.Format format;
 
         /// <summary>
-        /// Create a new <seealso cref="PagedMutable"/> instance.
+        /// Create a new <see cref="PagedMutable"/> instance.
         /// </summary>
-        /// <param name="size"> the number of values to store. </param>
-        /// <param name="pageSize"> the number of values per page </param>
-        /// <param name="bitsPerValue"> the number of bits per value </param>
-        /// <param name="acceptableOverheadRatio"> an acceptable overhead ratio </param>
+        /// <param name="size"> The number of values to store. </param>
+        /// <param name="pageSize"> The number of values per page. </param>
+        /// <param name="bitsPerValue"> The number of bits per value. </param>
+        /// <param name="acceptableOverheadRatio"> An acceptable overhead ratio. </param>
         public PagedMutable(long size, int pageSize, int bitsPerValue, float acceptableOverheadRatio)
             : this(size, pageSize, PackedInt32s.FastestFormatAndBits(pageSize, bitsPerValue, acceptableOverheadRatio))
         {


[45/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML documentation warnings

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTPulsing41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTPulsing41PostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/FSTPulsing41PostingsFormat.cs
index 14d3de9..839588a 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTPulsing41PostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTPulsing41PostingsFormat.cs
@@ -26,8 +26,9 @@
 
     /// <summary>
     /// FST + Pulsing41, test only, since
-    ///  FST does no delta encoding here!
-    ///  @lucene.experimental 
+    /// FST does no delta encoding here!
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("FSTPulsing41")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class FSTPulsing41PostingsFormat : PostingsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs b/src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs
index 040b999..82d39ac 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs
@@ -28,8 +28,8 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// An FST implementation for 
-    /// <seealso cref="FSTTermsWriter"/>.
-    /// 
+    /// <see cref="FSTTermsWriter"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 
@@ -117,13 +117,12 @@ namespace Lucene.Net.Codecs.Memory
 
         /// <summary>
         /// The return value will be the smaller one, when these two are 
-        /// 'comparable', i.e. 
-        /// 1. every value in t1 is not larger than in t2, or
-        /// 2. every value in t1 is not smaller than t2.
+        /// 'comparable', i.e.
+        /// <list type="number">
+        ///     <item><description>every value in t1 is not larger than in t2, or</description></item>
+        ///     <item><description>every value in t1 is not smaller than t2.</description></item>
+        /// </list>
         /// </summary>
-        /// <param name="t1"></param>
-        /// <param name="t2"></param>
-        /// <returns></returns>
         public override TermData Common(TermData t1, TermData t2)
         {
             if (Equals(t1, NO_OUTPUT) || Equals(t2, NO_OUTPUT))
@@ -371,7 +370,7 @@ namespace Lucene.Net.Codecs.Memory
         }
 
         /// <summary>
-        /// NOTE: This was longsEqual() in Lucene
+        /// NOTE: This was longsEqual() in Lucene.
         /// </summary>
         private static bool Int64sEqual(TermData t1, TermData t2)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs b/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
index 5141776..6a19546 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs
@@ -50,10 +50,10 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based terms dictionary reader.
-    /// 
+    /// <para/>
     /// The FST directly maps each term and its metadata, 
     /// it is memory resident.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FSTTermsReader : FieldsProducer
@@ -287,18 +287,18 @@ namespace Lucene.Net.Codecs.Memory
             {
                 private readonly FSTTermsReader.TermsReader outerInstance;
 
-                /// <summary>Current term, null when enum ends or unpositioned</summary>
+                /// <summary>Current term, null when enum ends or unpositioned.</summary>
                 internal BytesRef term_Renamed;
 
-                /// <summary>Current term stats + decoded metadata (customized by PBF)</summary>
+                /// <summary>Current term stats + decoded metadata (customized by PBF).</summary>
                 internal readonly BlockTermState state;
 
-                /// <summary>Current term stats + undecoded metadata (long[] & byte[])</summary>
+                /// <summary>Current term stats + undecoded metadata (long[] &amp; byte[]).</summary>
                 internal FSTTermOutputs.TermData meta;
                 internal ByteArrayDataInput bytesReader;
 
                 /// <summary>
-                /// Decodes metadata into customized term state </summary>
+                /// Decodes metadata into customized term state. </summary>
                 internal abstract void DecodeMetaData();
 
                 internal BaseTermsEnum(FSTTermsReader.TermsReader outerInstance)
@@ -366,10 +366,10 @@ namespace Lucene.Net.Codecs.Memory
 
                 private readonly BytesRefFSTEnum<FSTTermOutputs.TermData> fstEnum;
 
-                /// <summary>True when current term's metadata is decoded</summary>
+                /// <summary>True when current term's metadata is decoded.</summary>
                 private bool decoded;
 
-                /// <summary>True when current enum is 'positioned' by seekExact(TermState)</summary>
+                /// <summary>True when current enum is 'positioned' by <see cref="SeekExact(BytesRef, TermState)"/>.</summary>
                 private bool seekPending;
 
                 internal SegmentTermsEnum(FSTTermsReader.TermsReader outerInstance) 
@@ -469,43 +469,43 @@ namespace Lucene.Net.Codecs.Memory
             {
                 private readonly FSTTermsReader.TermsReader outerInstance;
 
-                /// <summary>True when current term's metadata is decoded</summary>
+                /// <summary>True when current term's metadata is decoded.</summary>
                 private bool decoded;
 
-                /// <summary>True when there is pending term when calling Next()</summary>
+                /// <summary>True when there is pending term when calling <see cref="Next()"/>.</summary>
                 private bool pending;
      
                 /// <summary>
                 /// stack to record how current term is constructed,
                 /// used to accumulate metadata or rewind term:
                 ///   level == term.Length + 1,
-                ///     == 0 when term is null */
+                ///     == 0 when term is null
                 /// </summary>
                 private Frame[] stack;
                 private int level;
 
                 /// <summary>
-                /// to which level the metadata is accumulated
-                /// so that we can accumulate metadata lazily
+                /// To which level the metadata is accumulated
+                /// so that we can accumulate metadata lazily.
                 /// </summary>
                 private int metaUpto;
 
-                /// <summary>term dict fst</summary>
+                /// <summary>Term dict fst.</summary>
                 private readonly FST<FSTTermOutputs.TermData> fst;
                 private readonly FST.BytesReader fstReader;
                 private readonly Outputs<FSTTermOutputs.TermData> fstOutputs;
 
-                /// <summary>query automaton to intersect with</summary>
+                /// <summary>Query automaton to intersect with.</summary>
                 private readonly ByteRunAutomaton fsa;
 
                 internal sealed class Frame
                 {
                     private readonly FSTTermsReader.TermsReader.IntersectTermsEnum outerInstance;
 
-                    /// <summary>fst stats</summary>
+                    /// <summary>Fst stats.</summary>
                     internal FST.Arc<FSTTermOutputs.TermData> fstArc;
 
-                    /// <summary>automaton stats</summary>
+                    /// <summary>Automaton stats.</summary>
                     internal int fsaState;
 
                     internal Frame(FSTTermsReader.TermsReader.IntersectTermsEnum outerInstance)
@@ -581,7 +581,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Lazily accumulate meta data, when we got a accepted term </summary>
+                /// Lazily accumulate meta data, when we got a accepted term. </summary>
                 /// <exception cref="System.IO.IOException"/>
                 internal void LoadMetaData()
                 {
@@ -708,7 +708,7 @@ namespace Lucene.Net.Codecs.Memory
                     return null;
                 }
 
-                /// <summary> Virtual frame, never pop </summary>
+                /// <summary> Virtual frame, never pop. </summary>
                 private Frame LoadVirtualFrame(Frame frame)
                 {
                     frame.fstArc.Output = fstOutputs.NoOutput;
@@ -717,7 +717,7 @@ namespace Lucene.Net.Codecs.Memory
                     return frame;
                 }
 
-                /// <summary> Load frame for start arc(node) on fst </summary>
+                /// <summary> Load frame for start arc(node) on fst. </summary>
                 private Frame LoadFirstFrame(Frame frame)
                 {
                     frame.fstArc = fst.GetFirstArc(frame.fstArc);
@@ -726,7 +726,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for target arc(node) on fst </summary>
+                /// Load frame for target arc(node) on fst. </summary>
                 private Frame LoadExpandFrame(Frame top, Frame frame)
                 {
                     if (!CanGrow(top))
@@ -743,7 +743,7 @@ namespace Lucene.Net.Codecs.Memory
                     return frame;
                 }
 
-                /// <summary> Load frame for sibling arc(node) on fst </summary>
+                /// <summary> Load frame for sibling arc(node) on fst. </summary>
                 private Frame LoadNextFrame(Frame top, Frame frame)
                 {
                     if (!CanRewind(frame))
@@ -769,7 +769,7 @@ namespace Lucene.Net.Codecs.Memory
 
                 /// <summary>
                 /// Load frame for target arc(node) on fst, so that 
-                ///  arc.label >= label and !fsa.reject(arc.label) 
+                /// arc.label >= label and !fsa.reject(arc.label) 
                 /// </summary>
                 private Frame LoadCeilFrame(int label, Frame top, Frame frame)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs b/src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs
index 2f2b7d6..8077e37 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs
@@ -36,19 +36,20 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based term dict, using metadata as FST output.
-    /// 
+    /// <para/>
     /// The FST directly holds the mapping between &lt;term, metadata&gt;.
-    /// 
+    /// <para/>
     /// Term metadata consists of three parts:
-    /// 1. term statistics: docFreq, totalTermFreq;
-    /// 2. monotonic long[], e.g. the pointer to the postings list for that term;
-    /// 3. generic byte[], e.g. other information need by postings reader.
-    /// 
+    /// <list type="number">
+    ///     <item><description>term statistics: docFreq, totalTermFreq;</description></item>
+    ///     <item><description>monotonic long[], e.g. the pointer to the postings list for that term;</description></item>
+    ///     <item><description>generic byte[], e.g. other information need by postings reader.</description></item>
+    /// </list>
     /// <para>
     /// File:
-    /// <ul>
-    ///   <li><tt>.tst</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description><c>.tst</c>: <a href="#Termdictionary">Term Dictionary</a></description></item>
+    /// </list>
     /// </para>
     /// <para>
     /// 
@@ -62,53 +63,53 @@ namespace Lucene.Net.Codecs.Memory
     ///  to postings list).
     /// </para>
     /// <para>
-    ///  Typically the metadata is separated into two parts:
-    ///  <ul>
-    ///   <li>
+    /// Typically the metadata is separated into two parts:
+    /// <list type="bullet">
+    ///   <item><description>
     ///    Monotonical long array: Some metadata will always be ascending in order
     ///    with the corresponding term. This part is used by FST to share outputs between arcs.
-    ///   </li>
-    ///   <li>
+    ///   </description></item>
+    ///   <item><description>
     ///    Generic byte array: Used to store non-monotonic metadata.
-    ///   </li>
-    ///  </ul>
+    ///   </description></item>
+    /// </list>
     /// </para>
     /// 
     /// File format:
-    /// <ul>
-    ///  <li>TermsDict(.tst) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</li>
-    ///  <li>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, 
-    ///                                      SumDocFreq, DocCount, LongsSize, TermFST &gt;<sup>NumFields</sup></li>
-    ///  <li>TermFST TermData
-    ///  <li>TermData --&gt; Flag, BytesSize?, LongDelta<sup>LongsSize</sup>?, Byte<sup>BytesSize</sup>?, 
-    ///                      &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) &gt; ? </li>
-    ///  <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///  <li>DirOffset --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    ///  <li>DocFreq, LongsSize, BytesSize, NumFields,
-    ///        FieldNumber, DocCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///  <li>TotalTermFreq, NumTerms, SumTotalTermFreq, SumDocFreq, LongDelta --&gt; 
-    ///        <seealso cref="DataOutput#writeVLong VLong"/></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///  <item><description>TermsDict(.tst) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</description></item>
+    ///  <item><description>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, 
+    ///                                      SumDocFreq, DocCount, LongsSize, TermFST &gt;<sup>NumFields</sup></description></item>
+    ///  <item><description>TermFST TermData</description></item>
+    ///  <item><description>TermData --&gt; Flag, BytesSize?, LongDelta<sup>LongsSize</sup>?, Byte<sup>BytesSize</sup>?, 
+    ///                      &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) &gt; ? </description></item>
+    ///  <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///  <item><description>DirOffset --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///  <item><description>DocFreq, LongsSize, BytesSize, NumFields,
+    ///        FieldNumber, DocCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///  <item><description>TotalTermFreq, NumTerms, SumTotalTermFreq, SumDocFreq, LongDelta --&gt; 
+    ///        VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    /// </list>
     /// <para>Notes:</para>
-    /// <ul>
-    ///  <li>
+    /// <list type="bullet">
+    ///  <item><description>
     ///   The format of PostingsHeader and generic meta bytes are customized by the specific postings implementation:
     ///   they contain arbitrary per-file data (such as parameters or versioning information), and per-term data
     ///   (non-monotonic ones like pulsed postings data).
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   The format of TermData is determined by FST, typically monotonic metadata will be dense around shallow arcs,
     ///   while in deeper arcs only generic bytes and term statistics exist.
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   The byte Flag is used to indicate which part of metadata exists on current arc. Specially the monotonic part
     ///   is omitted when it is an array of 0s.
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   Since LongsSize is per-field fixed, it is only written once in field summary.
-    ///  </li>
-    /// </ul>
-    /// 
+    ///  </description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FSTTermsWriter : FieldsConsumer
@@ -213,7 +214,7 @@ namespace Lucene.Net.Codecs.Memory
             public long SumDocFreq { get; private set; }
             public int DocCount { get; private set; }
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             public int Int64sSize { get; private set; }
             public FST<FSTTermOutputs.TermData> Dict { get; private set; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesConsumer.cs b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesConsumer.cs
index 0a798cb..4aef40c 100644
--- a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesConsumer.cs
+++ b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesConsumer.cs
@@ -43,7 +43,7 @@ namespace Lucene.Net.Codecs.Memory
     using Util = Util.Fst.Util;
 
     /// <summary>
-    /// Writer for <seealso cref="MemoryDocValuesFormat"/>
+    /// Writer for <see cref="MemoryDocValuesFormat"/>.
     /// </summary>
     internal class MemoryDocValuesConsumer : DocValuesConsumer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs
index 3dad21c..4df853c 100644
--- a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Codecs.Memory
      */
 
     /// <summary>
-    /// In-memory docvalues format </summary>
+    /// In-memory docvalues format. </summary>
     [DocValuesFormatName("Memory")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class MemoryDocValuesFormat : DocValuesFormat
     {
@@ -31,8 +31,8 @@ namespace Lucene.Net.Codecs.Memory
         internal readonly float acceptableOverheadRatio;
 
         /// <summary>
-        /// Calls {@link #MemoryDocValuesFormat(float) 
-        /// MemoryDocValuesFormat(PackedInts.DEFAULT)} 
+        /// Calls <c>MemoryDocValuesFormat(PackedInts.DEFAULT)</c> 
+        /// (<see cref="MemoryDocValuesFormat(float)"/>)
         /// </summary>
         public MemoryDocValuesFormat() 
             : this(PackedInt32s.DEFAULT)
@@ -40,12 +40,13 @@ namespace Lucene.Net.Codecs.Memory
         }
 
         /// <summary>
-        /// Creates a new MemoryDocValuesFormat with the specified
-        /// <code>acceptableOverheadRatio</code> for NumericDocValues. </summary>
-        /// <param name="acceptableOverheadRatio"> compression parameter for numerics. 
-        ///        Currently this is only used when the number of unique values is small.
-        ///        
-        /// @lucene.experimental </param>
+        /// Creates a new <see cref="MemoryDocValuesFormat"/> with the specified
+        /// <paramref name="acceptableOverheadRatio"/> for <see cref="NumericDocValues"/>. 
+        /// <para/>
+        /// @lucene.experimental
+        /// </summary>
+        /// <param name="acceptableOverheadRatio"> Compression parameter for numerics. 
+        ///        Currently this is only used when the number of unique values is small. </param>
         public MemoryDocValuesFormat(float acceptableOverheadRatio) 
             : base()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesProducer.cs b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesProducer.cs
index b26bf2b..9e32c5a 100644
--- a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesProducer.cs
+++ b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesProducer.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Codecs.Memory
     using Util = Lucene.Net.Util.Fst.Util;
 
     /// <summary>
-    /// TextReader for <seealso cref="MemoryDocValuesFormat"/>
+    /// TextReader for <see cref="MemoryDocValuesFormat"/>.
     /// </summary>
     internal class MemoryDocValuesProducer : DocValuesProducer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
index 9336c7b..eba1941 100644
--- a/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/MemoryPostingsFormat.cs
@@ -57,16 +57,15 @@ namespace Lucene.Net.Codecs.Memory
     // to disk.
 
     /// <summary>
-    /// Stores terms & postings (docs, positions, payloads) in
-    ///  RAM, using an FST.
+    /// Stores terms &amp; postings (docs, positions, payloads) in
+    /// RAM, using an FST.
     /// 
     /// <para>Note that this codec implements advance as a linear
     /// scan!  This means if you store large fields in here,
     /// queries that rely on advance will (AND BooleanQuery,
     /// PhraseQuery) will be relatively slow!
-    /// 
-    /// @lucene.experimental 
     /// </para>
+    /// @lucene.experimental 
     /// </summary>
 
     // TODO: Maybe name this 'Cached' or something to reflect
@@ -84,10 +83,10 @@ namespace Lucene.Net.Codecs.Memory
         }
 
         /// <summary>
-        /// Create MemoryPostingsFormat, specifying advanced FST options. </summary>
-        /// <param name="doPackFST"> true if a packed FST should be built.
+        /// Create <see cref="MemoryPostingsFormat"/>, specifying advanced FST options. </summary>
+        /// <param name="doPackFST"> <c>true</c> if a packed FST should be built.
         ///        NOTE: packed FSTs are limited to ~2.1 GB of postings. </param>
-        /// <param name="acceptableOverheadRatio"> allowable overhead for packed ints
+        /// <param name="acceptableOverheadRatio"> Allowable overhead for packed <see cref="int"/>s
         ///        during FST construction. </param>
         public MemoryPostingsFormat(bool doPackFST, float acceptableOverheadRatio) 
             : base()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Pulsing/Pulsing41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Pulsing/Pulsing41PostingsFormat.cs b/src/Lucene.Net.Codecs/Pulsing/Pulsing41PostingsFormat.cs
index dc1958d..5424996 100644
--- a/src/Lucene.Net.Codecs/Pulsing/Pulsing41PostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Pulsing/Pulsing41PostingsFormat.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Codecs.Pulsing
      */
 
     /// <summary>
-    /// Concrete pulsing implementation over {@link Lucene41PostingsFormat}.
-    /// 
+    /// Concrete pulsing implementation over <see cref="Lucene41PostingsFormat"/>.
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("Pulsing41")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
@@ -33,13 +33,13 @@ namespace Lucene.Net.Codecs.Pulsing
         {
         }
 
-        /// <summary>Inlines docFreq=<code>freqCutoff</code> terms, otherwise uses the normal "Lucene41" format.</summary>
+        /// <summary>Inlines docFreq=<paramref name="freqCutoff"/> terms, otherwise uses the normal "Lucene41" format.</summary>
         public Pulsing41PostingsFormat(int freqCutoff) 
             : this(freqCutoff, BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE)
         {
         }
 
-        /// <summary>Inlines docFreq=<code>freqCutoff</code> terms, otherwise uses the normal "Lucene41" format.</summary>
+        /// <summary>Inlines docFreq=<paramref name="freqCutoff"/> terms, otherwise uses the normal "Lucene41" format.</summary>
         public Pulsing41PostingsFormat(int freqCutoff, int minBlockSize, int maxBlockSize) 
             : base(new Lucene41PostingsBaseFormat(), freqCutoff, minBlockSize, maxBlockSize)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsFormat.cs b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsFormat.cs
index 47a804d..550c422 100644
--- a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsFormat.cs
@@ -25,6 +25,7 @@ namespace Lucene.Net.Codecs.Pulsing
     /// This postings format "inlines" the postings for terms that have
     /// low docFreq.  It wraps another postings format, which is used for
     /// writing the non-inlined terms.
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public abstract class PulsingPostingsFormat : PostingsFormat
@@ -40,7 +41,7 @@ namespace Lucene.Net.Codecs.Pulsing
         {
         }
 
-        /// <summary>Terms with freq less than or equal freqCutoff are inlined into terms dict.</summary>
+        /// <summary>Terms with freq less than or equal <paramref name="freqCutoff"/> are inlined into terms dict.</summary>
         public PulsingPostingsFormat(PostingsBaseFormat wrappedPostingsBaseFormat, int freqCutoff,
             int minBlockSize, int maxBlockSize) 
             : base()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsReader.cs b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsReader.cs
index 1e8a23d..9b3627d 100644
--- a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsReader.cs
+++ b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsReader.cs
@@ -27,14 +27,13 @@ namespace Lucene.Net.Codecs.Pulsing
      */
 
     /// <summary>
-    /// Concrete class that reads the current doc/freq/skip postings format 
-    /// 
+    /// Concrete class that reads the current doc/freq/skip postings format.
+    /// <para/>
     /// @lucene.experimental
-    /// 
-    /// TODO: -- should we switch "hasProx" higher up?  and
-    /// create two separate docs readers, one that also reads
-    /// prox and one that doesn't?
     /// </summary>
+    // TODO: -- should we switch "hasProx" higher up?  and
+    // create two separate docs readers, one that also reads
+    // prox and one that doesn't?
     public class PulsingPostingsReader : PostingsReaderBase
     {
         // Fallback reader for non-pulsed terms:
@@ -651,11 +650,11 @@ namespace Lucene.Net.Codecs.Pulsing
         }
 
         /// <summary>
-        /// for a docsenum, gets the 'other' reused enum.
+        /// For a docsenum, gets the 'other' reused enum.
         /// Example: Pulsing(Standard).
-        /// when doing a term range query you are switching back and forth
-        /// between Pulsing and Standard
-        ///  
+        /// When doing a term range query you are switching back and forth
+        /// between Pulsing and Standard.
+        /// <para/>
         /// The way the reuse works is that Pulsing.other = Standard and
         /// Standard.other = Pulsing.
         /// </summary>
@@ -671,8 +670,8 @@ namespace Lucene.Net.Codecs.Pulsing
         }
 
         /// <summary>
-        /// for a docsenum, sets the 'other' reused enum.
-        /// see GetOther for an example.
+        /// For a docsenum, sets the 'other' reused enum.
+        /// see <see cref="GetOther(DocsEnum)"/> for an example.
         /// </summary>
         private DocsEnum SetOther(DocsEnum de, DocsEnum other)
         {
@@ -684,7 +683,7 @@ namespace Lucene.Net.Codecs.Pulsing
         /// A per-docsenum attribute that stores additional reuse information
         /// so that pulsing enums can keep a reference to their wrapped enums,
         /// and vice versa. this way we can always reuse.
-        /// 
+        /// <para/>
         /// @lucene.internal 
         /// </summary>
         public interface IPulsingEnumAttribute : IAttribute
@@ -693,9 +692,9 @@ namespace Lucene.Net.Codecs.Pulsing
         }
 
         /// <summary>
-        /// Implementation of {@link PulsingEnumAttribute} for reuse of
+        /// Implementation of <see cref="PulsingEnumAttribute"/> for reuse of
         /// wrapped postings readers underneath pulsing.
-        /// 
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public sealed class PulsingEnumAttribute : Util.Attribute, IPulsingEnumAttribute

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsWriter.cs b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsWriter.cs
index 3cae4d2..c515426 100644
--- a/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsWriter.cs
+++ b/src/Lucene.Net.Codecs/Pulsing/PulsingPostingsWriter.cs
@@ -23,20 +23,20 @@ namespace Lucene.Net.Codecs.Pulsing
      * limitations under the License.
      */
 
+    // TODO: we now inline based on total TF of the term,
+    // but it might be better to inline by "net bytes used"
+    // so that a term that has only 1 posting but a huge
+    // payload would not be inlined.  Though this is
+    // presumably rare in practice...
+
     /// <summary>
-    /// TODO: we now inline based on total TF of the term,
-    /// but it might be better to inline by "net bytes used"
-    /// so that a term that has only 1 posting but a huge
-    /// payload would not be inlined.  Though this is
-    /// presumably rare in practice...
-    /// 
     /// Writer for the pulsing format. 
-    ///
+    /// <para/>
     /// Wraps another postings implementation and decides 
     /// (based on total number of occurrences), whether a terms 
     /// postings should be inlined into the term dictionary,
     /// or passed through to the wrapped writer.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class PulsingPostingsWriter : PostingsWriterBase
@@ -96,7 +96,7 @@ namespace Lucene.Net.Codecs.Pulsing
             internal int FieldNumber { get; private set; }
 
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             internal int Int64sSize { get; private set; }
 
@@ -115,8 +115,8 @@ namespace Lucene.Net.Codecs.Pulsing
 
         /// <summary>
         /// If the total number of positions (summed across all docs
-        /// for this term) is less than or equal maxPositions, then the postings are
-        /// inlined into terms dict
+        /// for this term) is less than or equal <paramref name="maxPositions"/>, then the postings are
+        /// inlined into terms dict.
         /// </summary>
         public PulsingPostingsWriter(SegmentWriteState state, int maxPositions, PostingsWriterBase wrappedPostingsWriter)
         {
@@ -152,14 +152,13 @@ namespace Lucene.Net.Codecs.Pulsing
             Debug.Assert(_pendingCount == 0);
         }
 
+        // TODO: -- should we NOT reuse across fields?  would
+        // be cleaner
+
         /// <summary>
-        /// TODO: -- should we NOT reuse across fields?  would
-        /// be cleaner
         /// Currently, this instance is re-used across fields, so
-        /// our parent calls setField whenever the field changes
+        /// our parent calls setField whenever the field changes.
         /// </summary>
-        /// <param name="fieldInfo"></param>
-        /// <returns></returns>
         public override int SetField(FieldInfo fieldInfo)
         {
             _indexOptions = fieldInfo.IndexOptions;
@@ -261,9 +260,8 @@ namespace Lucene.Net.Codecs.Pulsing
         private readonly RAMOutputStream _buffer = new RAMOutputStream();
 
         /// <summary>
-        /// Called when we are done adding docs to this term
+        /// Called when we are done adding docs to this term.
         /// </summary>
-        /// <param name="state"></param>
         public override void FinishTerm(BlockTermState state)
         {
             var state2 = (PulsingTermState)state;
@@ -465,7 +463,9 @@ namespace Lucene.Net.Codecs.Pulsing
             }
         }
 
-        // Pushes pending positions to the wrapped codec
+        /// <summary>
+        /// Pushes pending positions to the wrapped codec.
+        /// </summary>
         private void Push()
         {
             Debug.Assert(_pendingCount == _pending.Length);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/IntIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/IntIndexInput.cs b/src/Lucene.Net.Codecs/Sep/IntIndexInput.cs
index 9a3aabb..dbc343c 100644
--- a/src/Lucene.Net.Codecs/Sep/IntIndexInput.cs
+++ b/src/Lucene.Net.Codecs/Sep/IntIndexInput.cs
@@ -22,11 +22,10 @@ namespace Lucene.Net.Codecs.Sep
 
     /// <summary>
     /// Defines basic API for writing ints to an <see cref="IndexOutput"/>.
-    /// IntBlockCodec interacts with this API. @see
-    /// IntBlockReader
+    /// IntBlockCodec interacts with this API. See IntBlockReader.
     /// <para/>
     /// NOTE: This was IntIndexInput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public abstract class Int32IndexInput : IDisposable
@@ -44,12 +43,12 @@ namespace Lucene.Net.Codecs.Sep
         public abstract Index GetIndex();
 
         /// <summary>
-        /// Records a single skip-point in the <see cref="Int32IndexInput.GetReader"/>. </summary>
+        /// Records a single skip-point in the <see cref="Int32IndexInput.GetReader()"/>. </summary>
         public abstract class Index
         {
             public abstract void Read(DataInput indexIn, bool absolute);
 
-            /// <summary>Seeks primary stream to the last read offset </summary>
+            /// <summary>Seeks primary stream to the last read offset. </summary>
             public abstract void Seek(Reader stream);
 
             public abstract void CopyFrom(Index other);
@@ -57,10 +56,10 @@ namespace Lucene.Net.Codecs.Sep
             public abstract object Clone();
         }
 
-        /// <summary>Reads int values</summary>
+        /// <summary>Reads <see cref="int"/> values.</summary>
         public abstract class Reader
         {
-            /// <summary>Reads next single int</summary>
+            /// <summary>Reads next single <see cref="int"/>.</summary>
             public abstract int Next();
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs b/src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs
index 5d97def..81fb248 100644
--- a/src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs
+++ b/src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs
@@ -20,47 +20,46 @@ namespace Lucene.Net.Codecs.Sep
      * limitations under the License.
      */
 
+    // TODO: We may want tighter integration w/IndexOutput
+    // may give better performance
+
     /// <summary>
-    /// Defines basic API for writing ints to an IndexOutput.
-    /// IntBlockCodec interacts with this API. @see IntBlockReader.
-    /// 
+    /// Defines basic API for writing ints to an <see cref="IndexOutput"/>.
+    /// IntBlockCodec interacts with this API. See IntBlockReader.
+    /// <para/>
     /// NOTE: block sizes could be variable
     /// <para/>
     /// NOTE: This was IntIndexOutput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
-    /// <remarks>
-    /// TODO: We may want tighter integration w/IndexOutput
-    /// may give better performance
-    /// </remarks>
     public abstract class Int32IndexOutput : IDisposable
     {
         /// <summary>
-        /// Write an int to the primary file.  The value must be
+        /// Write an <see cref="int"/> to the primary file.  The value must be
         /// >= 0.  
         /// </summary>
         public abstract void Write(int v);
 
-        /// <summary>Records a single skip-point in the IndexOutput. </summary>
+        /// <summary>Records a single skip-point in the <see cref="IndexOutput"/>. </summary>
         public abstract class Index
         {
-            /// <summary>Internally records the current location </summary>
+            /// <summary>Internally records the current location. </summary>
             public abstract void Mark();
 
-            /// <summary>Copies index from other </summary>
+            /// <summary>Copies index from <paramref name="other"/>. </summary>
             public abstract void CopyFrom(Index other, bool copyLast);
 
             /// <summary>
             /// Writes "location" of current output pointer of primary
-            ///  output to different output (out) 
+            /// output to different output (out).
             /// </summary>
             public abstract void Write(DataOutput indexOut, bool absolute);
         }
 
         /// <summary>
         /// If you are indexing the primary output file, call
-        ///  this and interact with the returned IndexWriter. 
+        /// this and interact with the returned IndexWriter. 
         /// </summary>
         public abstract Index GetIndex();
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs b/src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs
index 09f0915..78dd90c 100644
--- a/src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs
+++ b/src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs
@@ -20,21 +20,21 @@ namespace Lucene.Net.Codecs.Sep
      */
 
     /// <summary>
-    /// Provides int reader and writer to specified files.
+    /// Provides <see cref="int"/> reader and writer to specified files.
     /// <para/>
     /// NOTE: This was IntStreamFactory in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public abstract class Int32StreamFactory
     {
         /// <summary>
-        /// Create an <seealso cref="Int32IndexInput"/> on the provided fileName. 
+        /// Create an <see cref="Int32IndexInput"/> on the provided fileName. 
         /// </summary>
         public abstract Int32IndexInput OpenInput(Directory dir, string fileName, IOContext context);
 
         /// <summary>
-        /// Create an <seealso cref="Int32IndexOutput"/> on the provided fileName. 
+        /// Create an <see cref="Int32IndexOutput"/> on the provided fileName. 
         /// </summary>
         public abstract Int32IndexOutput CreateOutput(Directory dir, string fileName, IOContext context);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs b/src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs
index 0ae52cf..90fb85e 100644
--- a/src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs
+++ b/src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs
@@ -22,17 +22,16 @@ namespace Lucene.Net.Codecs.Sep
      * limitations under the License.
      */
 
+    // TODO: -- should we switch "hasProx" higher up?  and
+    // create two separate docs readers, one that also reads
+    // prox and one that doesn't?
+
     /// <summary>
     /// Concrete class that reads the current doc/freq/skip
     /// postings format.    
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-    /// <remarks>
-    /// TODO: -- should we switch "hasProx" higher up?  and
-    /// create two separate docs readers, one that also reads
-    /// prox and one that doesn't?
-    /// </remarks>
     public class SepPostingsReader : PostingsReaderBase
     {
         private readonly Int32IndexInput freqIn;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs b/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs
index dbfe65e..9a1e895 100644
--- a/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs
+++ b/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Codecs.Sep
     /// <summary>
     /// Writes frq to .frq, docs to .doc, pos to .pos, payloads
     /// to .pyl, skip data to .skp
-    /// 
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public sealed class SepPostingsWriter : PostingsWriterBase
@@ -59,7 +59,7 @@ namespace Lucene.Net.Codecs.Sep
 
         /// <summary>
         /// Expert: The fraction of TermDocs entries stored in skip tables,
-        /// used to accelerate <seealso cref="DocsEnum#advance(int)"/>.  Larger values result in
+        /// used to accelerate <see cref="Lucene.Net.Search.DocIdSetIterator.Advance(int)"/>.  Larger values result in
         /// smaller indexes, greater acceleration, but fewer accelerable cases, while
         /// smaller values result in bigger indexes, less acceleration and more
         /// accelerable cases. More detailed experiments would be useful here. 
@@ -68,7 +68,7 @@ namespace Lucene.Net.Codecs.Sep
         private static readonly int DEFAULT_SKIP_INTERVAL = 16;
 
         /// <summary>
-        /// Expert: minimum docFreq to write any skip data at all
+        /// Expert: minimum docFreq to write any skip data at all.
         /// </summary>
         private readonly int skipMinimum;
 
@@ -225,8 +225,8 @@ namespace Lucene.Net.Codecs.Sep
         }
 
         /// <summary>
-        /// Adds a new doc in this term.  If this returns null
-        ///  then we just skip consuming positions/payloads. 
+        /// Adds a new doc in this term.  If this returns <c>null</c>
+        /// then we just skip consuming positions/payloads. 
         /// </summary>
         public override void StartDoc(int docID, int termDocFreq)
         {
@@ -257,7 +257,7 @@ namespace Lucene.Net.Codecs.Sep
         }
 
         /// <summary>
-        /// Add a new position &amp; payload </summary>
+        /// Add a new position &amp; payload. </summary>
         public override void AddPosition(int position, BytesRef payload, int startOffset, int endOffset)
         {
             Debug.Assert(indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
@@ -295,7 +295,7 @@ namespace Lucene.Net.Codecs.Sep
             lastPosition = position;
         }
 
-        /// <summary>Called when we are done adding positions & payloads </summary>
+        /// <summary>Called when we are done adding positions &amp; payloads. </summary>
         public override void FinishDoc()
         {
             lastPosition = 0;
@@ -310,7 +310,7 @@ namespace Lucene.Net.Codecs.Sep
             public long SkipFP { get; set; }
         }
 
-        /// <summary>Called when we are done adding docs to this term </summary>
+        /// <summary>Called when we are done adding docs to this term. </summary>
         public override void FinishTerm(BlockTermState state)
         {
             SepTermState state_ = (SepTermState)state;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs b/src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs
index 1b2a737..5cc9df4 100644
--- a/src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs
+++ b/src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Codecs.Sep
     /// <summary>
     /// Implements the skip list reader for the default posting list format
     /// that stores positions and payloads.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
 
@@ -126,7 +126,7 @@ namespace Lucene.Net.Codecs.Sep
 
         /// <summary>
         /// Returns the payload length of the payload stored just before 
-        /// the doc to which the last call of <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> 
+        /// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)"/> 
         /// has skipped.  
         /// </summary>
         internal virtual int PayloadLength

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs b/src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs
index 21c6209..5d3acb5 100644
--- a/src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs
+++ b/src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs
@@ -22,16 +22,15 @@ namespace Lucene.Net.Codecs.Sep
      * limitations under the License.
      */
 
+    // TODO: -- skip data should somehow be more local to the particular stream 
+    // (doc, freq, pos, payload)
+
     /// <summary>
     /// Implements the skip list writer for the default posting list format
     /// that stores positions and payloads.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
-    /// <remarks>
-    /// TODO: -- skip data should somehow be more local to the particular stream 
-    /// (doc, freq, pos, payload)
-    /// </remarks>
     internal class SepSkipListWriter : MultiLevelSkipListWriter
     {
         private readonly int[] _lastSkipDoc;
@@ -108,7 +107,7 @@ namespace Lucene.Net.Codecs.Sep
 
         /// <summary>
         /// Sets the values for the current skip data. 
-        /// Called @ every index interval (every 128th (by default) doc)
+        /// Called @ every index interval (every 128th (by default) doc).
         /// </summary>
         internal virtual void SetSkipData(int doc, bool storePayloads, int payloadLength)
         {
@@ -122,7 +121,7 @@ namespace Lucene.Net.Codecs.Sep
         }
 
         /// <summary>
-        /// Called @ start of new term
+        /// Called @ start of new term.
         /// </summary>
         protected internal virtual void ResetSkip(Int32IndexOutput.Index topDocIndex, Int32IndexOutput.Index topFreqIndex,
             Int32IndexOutput.Index topPosIndex)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextCodec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextCodec.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextCodec.cs
index 875e519..8d9573a 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextCodec.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextCodec.cs
@@ -18,11 +18,11 @@
      */
 
     /// <summary>
-    /// plain text index format.
+    /// Plain text index format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     [CodecName("SimpleText")] // LUCENENET specific - using CodecName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class SimpleTextCodec : Codec

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesFormat.cs
index a247080..e77b038 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesFormat.cs
@@ -21,14 +21,14 @@
 	using SegmentWriteState = Index.SegmentWriteState;
 
     /// <summary>
-    /// plain text doc values format.
+    /// Plain text doc values format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
     /// <para>
-    /// the .dat file contains the data.
-    ///  for numbers this is a "fixed-width" file, for example a single byte range:
-    ///  <pre>
+    /// The .dat file contains the data.
+    /// For numbers this is a "fixed-width" file, for example a single byte range:
+    /// <code>
     ///  field myField
     ///    type NUMERIC
     ///    minvalue 0
@@ -40,13 +40,13 @@
     ///  123
     ///  T
     ///  ...
-    ///  </pre>
-    ///  so a document's value (delta encoded from minvalue) can be retrieved by 
-    ///  seeking to startOffset + (1+pattern.length()+2)*docid. The extra 1 is the newline. 
-    ///  The extra 2 is another newline and 'T' or 'F': true if the value is real, false if missing.
+    /// </code>
+    /// So a document's value (delta encoded from minvalue) can be retrieved by 
+    /// seeking to startOffset + (1+pattern.length()+2)*docid. The extra 1 is the newline. 
+    /// The extra 2 is another newline and 'T' or 'F': true if the value is real, false if missing.
     ///  
-    ///  for bytes this is also a "fixed-width" file, for example:
-    ///  <pre>
+    /// for bytes this is also a "fixed-width" file, for example:
+    /// <code>
     ///  field myField
     ///    type BINARY
     ///    maxlength 6
@@ -58,13 +58,13 @@
     ///  baz[space][space][space][space][space]
     ///  T
     ///  ...
-    ///  </pre>
-    ///  so a doc's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength+2)*doc
-    ///  the extra 9 is 2 newlines, plus "length " itself.
-    ///  the extra 2 is another newline and 'T' or 'F': true if the value is real, false if missing.
+    /// </code>
+    /// So a doc's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength+2)*doc
+    /// the extra 9 is 2 newlines, plus "length " itself.
+    /// The extra 2 is another newline and 'T' or 'F': true if the value is real, false if missing.
     ///  
-    ///  for sorted bytes this is a fixed-width file, for example:
-    ///  <pre>
+    /// For sorted bytes this is a fixed-width file, for example:
+    /// <code>
     ///  field myField
     ///    type SORTED
     ///    numvalues 10
@@ -81,13 +81,13 @@
     ///  01
     ///  10
     ///  ...
-    ///  </pre>
-    ///  so the "ord section" begins at startOffset + (9+pattern.length+maxlength)*numValues.
-    ///  a document's ord can be retrieved by seeking to "ord section" + (1+ordpattern.length())*docid
-    ///  an ord's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength)*ord
+    /// </code>
+    /// So the "ord section" begins at startOffset + (9+pattern.length+maxlength)*numValues.
+    /// A document's ord can be retrieved by seeking to "ord section" + (1+ordpattern.length())*docid
+    /// an ord's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength)*ord
     ///  
-    ///  for sorted set this is a fixed-width file very similar to the SORTED case, for example:
-    ///  <pre>
+    /// For sorted set this is a fixed-width file very similar to the SORTED case, for example:
+    /// <code>
     ///  field myField
     ///    type SORTED_SET
     ///    numvalues 10
@@ -104,16 +104,17 @@
     ///  
     ///  10
     ///  ...
-    ///  </pre>
-    ///  so the "ord section" begins at startOffset + (9+pattern.length+maxlength)*numValues.
-    ///  a document's ord list can be retrieved by seeking to "ord section" + (1+ordpattern.length())*docid
-    ///  this is a comma-separated list, and its padded with spaces to be fixed width. so trim() and split() it.
-    ///  and beware the empty string!
-    ///  an ord's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength)*ord
-    ///   
-    ///  the reader can just scan this file when it opens, skipping over the data blocks
-    ///  and saving the offset/etc for each field. 
-    ///  @lucene.experimental
+    /// </code>
+    /// So the "ord section" begins at startOffset + (9+pattern.length+maxlength)*numValues.
+    /// A document's ord list can be retrieved by seeking to "ord section" + (1+ordpattern.length())*docid
+    /// this is a comma-separated list, and its padded with spaces to be fixed width. so trim() and split() it.
+    /// and beware the empty string!
+    /// An ord's value can be retrieved by seeking to startOffset + (9+pattern.length+maxlength)*ord
+    /// <para/> 
+    /// The reader can just scan this file when it opens, skipping over the data blocks
+    /// and saving the offset/etc for each field.
+    /// <para/>
+    /// @lucene.experimental
     /// </para>
     /// </summary>
     [DocValuesFormatName("SimpleText")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesWriter.cs
index e319d6d..1a01db9 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextDocValuesWriter.cs
@@ -406,7 +406,7 @@ namespace Lucene.Net.Codecs.SimpleText
             }
         }
 
-        /// <summary>Write the header for this field </summary>
+        /// <summary>Write the header for this field. </summary>
         private void WriteFieldEntry(FieldInfo field, DocValuesType type)
         {
             SimpleTextUtil.Write(data, FIELD);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosFormat.cs
index 50ca57d..ea2ee27 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosFormat.cs
@@ -18,11 +18,11 @@
      */
 
     /// <summary>
-    /// plaintext field infos format
+    /// Plain text field infos format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextFieldInfosFormat : FieldInfosFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosReader.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosReader.cs
index 0d8d059..a53519a 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosReader.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosReader.cs
@@ -35,11 +35,11 @@ namespace Lucene.Net.Codecs.SimpleText
     using StringHelper = Util.StringHelper;
 
     /// <summary>
-    /// reads plaintext field infos files
+    /// Reads plain text field infos files.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextFieldInfosReader : FieldInfosReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosWriter.cs
index 75979c8..4e3eee1 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldInfosWriter.cs
@@ -32,16 +32,16 @@ namespace Lucene.Net.Codecs.SimpleText
     using IOUtils = Util.IOUtils;
 
     /// <summary>
-    /// writes plaintext field infos files
+    /// Writes plain text field infos files.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextFieldInfosWriter : FieldInfosWriter
     {
         /// <summary>
-        /// Extension of field infos </summary>
+        /// Extension of field infos. </summary>
         internal const string FIELD_INFOS_EXTENSION = "inf";
 
         internal static readonly BytesRef NUMFIELDS = new BytesRef("number of fields ");

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldsReader.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldsReader.cs
index 5d57d4f..a7c33f8 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldsReader.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextFieldsReader.cs
@@ -673,7 +673,7 @@ namespace Lucene.Net.Codecs.SimpleText
             
             }
 
-            /// <summary>Returns approximate RAM bytes used</summary>
+            /// <summary>Returns approximate RAM bytes used.</summary>
             public virtual long RamBytesUsed()
             {
                 return (_fst != null) ? _fst.GetSizeInBytes() : 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextLiveDocsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextLiveDocsFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextLiveDocsFormat.cs
index e1d9cfd..ef1c6d2 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextLiveDocsFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextLiveDocsFormat.cs
@@ -40,11 +40,11 @@ namespace Lucene.Net.Codecs.SimpleText
     using UnicodeUtil = Util.UnicodeUtil;
 
     /// <summary>
-    /// reads/writes plaintext live docs
+    /// Reads/writes plain text live docs.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextLiveDocsFormat : LiveDocsFormat
     {
@@ -113,7 +113,7 @@ namespace Lucene.Net.Codecs.SimpleText
         }
 
         /// <summary>
-        /// NOTE: This was parseIntAt() in Lucene
+        /// NOTE: This was parseIntAt() in Lucene.
         /// </summary>
         private int ParseInt32At(BytesRef bytes, int offset, CharsRef scratch)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextNormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextNormsFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextNormsFormat.cs
index 3336938..fdc5864 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextNormsFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextNormsFormat.cs
@@ -21,12 +21,11 @@
     using SegmentWriteState = Index.SegmentWriteState;
 
     /// <summary>
-    /// plain-text norms format.
+    /// Plain-text norms format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// 
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextNormsFormat : NormsFormat
     {
@@ -45,10 +44,9 @@
         /// <summary>
         /// Reads plain-text norms.
         /// <para>
-        /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-        /// 
-        /// @lucene.experimental
+        /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
         /// </para>
+        /// @lucene.experimental
         /// </summary>
         public class SimpleTextNormsProducer : SimpleTextDocValuesReader
         {
@@ -63,10 +61,9 @@
         /// <summary>
         /// Writes plain-text norms.
         /// <para>
-        /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-        /// 
-        /// @lucene.experimental
+        /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
         /// </para>
+        /// @lucene.experimental
         /// </summary>
         public class SimpleTextNormsConsumer : SimpleTextDocValuesWriter
         {
@@ -78,5 +75,4 @@
             }
         }
     }
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextPostingsFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextPostingsFormat.cs
index da42168..413dffa 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextPostingsFormat.cs
@@ -23,14 +23,13 @@
 
     /// <summary>
     /// For debugging, curiosity, transparency only!!  Do not
-    ///  use this codec in production.
+    /// use this codec in production.
     /// 
-    ///  <para>This codec stores all postings data in a single
-    ///  human-readable text file (_N.pst).  You can view this in
-    ///  any text editor, and even edit it to alter your index.
-    /// 
-    ///  @lucene.experimental 
+    /// <para>This codec stores all postings data in a single
+    /// human-readable text file (_N.pst).  You can view this in
+    /// any text editor, and even edit it to alter your index.
     /// </para>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("SimpleText")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class SimpleTextPostingsFormat : PostingsFormat
@@ -51,7 +50,7 @@
         }
 
         /// <summary>
-        /// Extension of freq postings file </summary>
+        /// Extension of freq postings file. </summary>
         internal const string POSTINGS_EXTENSION = "pst";
 
         internal static string GetPostingsFileName(string segment, string segmentSuffix)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoFormat.cs
index ef3cc4c..8db197c 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoFormat.cs
@@ -18,11 +18,11 @@
      */
 
     /// <summary>
-    /// plain text segments file format.
+    /// Plain text segments file format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextSegmentInfoFormat : SegmentInfoFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoReader.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoReader.cs
index 000f212..6e7a072 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoReader.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoReader.cs
@@ -33,11 +33,11 @@ namespace Lucene.Net.Codecs.SimpleText
     using StringHelper = Util.StringHelper;
 
     /// <summary>
-    /// reads plaintext segments files
+    /// Reads plaintext segments files.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextSegmentInfoReader : SegmentInfoReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoWriter.cs
index d12df2f..e966a7c 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextSegmentInfoWriter.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Codecs.SimpleText
     using SegmentInfo = Index.SegmentInfo;
 
     /// <summary>
-    /// writes plaintext segments files
+    /// Writes plain text segments files.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextSegmentInfoWriter : SegmentInfoWriter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsFormat.cs
index 5a89edd..7143992 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsFormat.cs
@@ -23,11 +23,11 @@
     using IOContext = Store.IOContext;
 
     /// <summary>
-    /// plain text stored fields format.
+    /// Plain text stored fields format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextStoredFieldsFormat : StoredFieldsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsReader.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsReader.cs
index 032c9e4..9131d31 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsReader.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsReader.cs
@@ -41,11 +41,11 @@ namespace Lucene.Net.Codecs.SimpleText
     using UnicodeUtil = Util.UnicodeUtil;
 
     /// <summary>
-    /// reads plaintext stored fields
+    /// Reads plain text stored fields.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextStoredFieldsReader : StoredFieldsReader
     {
@@ -84,7 +84,7 @@ namespace Lucene.Net.Codecs.SimpleText
             ReadIndex(si.DocCount);
         }
 
-        /// <remarks>Used by clone</remarks>
+        /// <remarks>Used by clone.</remarks>
         internal SimpleTextStoredFieldsReader(long[] offsets, IndexInput input, FieldInfos fieldInfos)
         {
             _offsets = offsets;
@@ -93,7 +93,7 @@ namespace Lucene.Net.Codecs.SimpleText
         }
 
         /// <remarks>
-        /// we don't actually write a .fdx-like index, instead we read the 
+        /// We don't actually write a .fdx-like index, instead we read the 
         /// stored fields file in entirety up-front and save the offsets 
         /// so we can seek to the documents later.
         /// </remarks>
@@ -250,7 +250,7 @@ namespace Lucene.Net.Codecs.SimpleText
         }
 
         /// <summary>
-        /// NOTE: This was parseIntAt() in Lucene
+        /// NOTE: This was parseIntAt() in Lucene.
         /// </summary>
         private int ParseInt32At(int offset)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
index 67c5cc5..ae67cc3 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
@@ -33,9 +33,9 @@ namespace Lucene.Net.Codecs.SimpleText
     /// <summary>
     /// Writes plain-text stored fields.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextStoredFieldsWriter : StoredFieldsWriter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsFormat.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsFormat.cs
index a8d38fe..9782e1a 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsFormat.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsFormat.cs
@@ -23,11 +23,11 @@
     using IOContext = Store.IOContext;
 
     /// <summary>
-    /// plain text term vectors format.
+    /// Plain text term vectors format.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextTermVectorsFormat : TermVectorsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsReader.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsReader.cs
index 95c50d6..a4d7fc5 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsReader.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsReader.cs
@@ -48,9 +48,9 @@ namespace Lucene.Net.Codecs.SimpleText
     /// <summary>
     /// Reads plain-text term vectors.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextTermVectorsReader : TermVectorsReader
     {
@@ -259,7 +259,7 @@ namespace Lucene.Net.Codecs.SimpleText
         }
 
         /// <summary>
-        /// NOTE: This was parseIntAt() in Lucene
+        /// NOTE: This was parseIntAt() in Lucene.
         /// </summary>
         private int ParseInt32At(int offset)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
index b9d0bb6..c376c46 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Codecs.SimpleText
     /// <summary>
     /// Writes plain-text term vectors.
     /// <para>
-    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
-    /// @lucene.experimental
+    /// <b><font color="red">FOR RECREATIONAL USE ONLY</font></b>
     /// </para>
+    /// @lucene.experimental
     /// </summary>
     public class SimpleTextTermVectorsWriter : TermVectorsWriter
     {


[36/48] lucenenet git commit: Lucene.Net.Codecs.Lucene46: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene46: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/5478f1bb
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/5478f1bb
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/5478f1bb

Branch: refs/heads/master
Commit: 5478f1bbeadbca0d48ff0a2e97723126438a5682
Parents: 8214105
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 08:32:51 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:40 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   3 +-
 src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs |  22 ++--
 .../Codecs/Lucene46/Lucene46FieldInfosFormat.cs | 112 +++++++++----------
 .../Codecs/Lucene46/Lucene46FieldInfosReader.cs |   7 +-
 .../Codecs/Lucene46/Lucene46FieldInfosWriter.cs |   5 +-
 .../Lucene46/Lucene46SegmentInfoFormat.cs       |  68 ++++++-----
 .../Lucene46/Lucene46SegmentInfoReader.cs       |   7 +-
 .../Lucene46/Lucene46SegmentInfoWriter.cs       |   7 +-
 8 files changed, 116 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 21694de..0d350a9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -57,8 +57,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
    4. Codecs.Lucene41 (namespace)
    5. Codecs.Lucene42 (namespace)
    6. Codecs.Lucene45 (namespace)
-   7. Codecs.Lucene46 (namespace)
-   8. Util.Packed (namespace)
+   7. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs
index 960138f..d8f9e0e 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46Codec.cs
@@ -27,12 +27,14 @@ namespace Lucene.Net.Codecs.Lucene46
     /// <summary>
     /// Implements the Lucene 4.6 index format, with configurable per-field postings
     /// and docvalues formats.
-    /// <p>
+    /// <para/>
     /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
+    /// <see cref="FilterCodec"/>.
+    /// <para/>
+    /// See <see cref="Lucene.Net.Codecs.Lucene46"/> package documentation for file format details.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene46 package documentation for file format details.
-    /// @lucene.experimental </seealso>
     // NOTE: if we make largish changes in a minor release, easier to just make Lucene46Codec or whatever
     // if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
     // (it writes a minor version, etc).
@@ -120,9 +122,9 @@ namespace Lucene.Net.Codecs.Lucene46
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene41"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene41"
         /// </summary>
         public virtual PostingsFormat GetPostingsFormatForField(string field)
         {
@@ -131,9 +133,9 @@ namespace Lucene.Net.Codecs.Lucene46
 
         /// <summary>
         /// Returns the docvalues format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene45"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene45"
         /// </summary>
         public virtual DocValuesFormat GetDocValuesFormatForField(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosFormat.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosFormat.cs
index 40c681c..4b1755a 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosFormat.cs
@@ -19,65 +19,65 @@ namespace Lucene.Net.Codecs.Lucene46
 
     /// <summary>
     /// Lucene 4.6 Field Infos format.
-    /// <p>
-    /// <p>Field names are stored in the field info file, with suffix <tt>.fnm</tt>.</p>
-    /// <p>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
-    /// FieldBits,DocValuesBits,DocValuesGen,Attributes&gt; <sup>FieldsCount</sup>,Footer</p>
-    /// <p>Data types:
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#checkHeader CodecHeader"/></li>
-    ///   <li>FieldsCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>FieldName --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>FieldBits, DocValuesBits --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///   <li>FieldNumber --&gt; <seealso cref="DataOutput#writeInt VInt"/></li>
-    ///   <li>Attributes --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    ///   <li>DocValuesGen --&gt; <seealso cref="DataOutput#writeLong(long) Int64"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// </p>
+    /// <para/>
+    /// <para>Field names are stored in the field info file, with suffix <c>.fnm</c>.</para>
+    /// <para>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
+    /// FieldBits,DocValuesBits,DocValuesGen,Attributes&gt; <sup>FieldsCount</sup>,Footer</para>
+    /// <para>Data types:
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>)</description></item>
+    ///   <item><description>FieldsCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>)</description></item>
+    ///   <item><description>FieldName --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>)</description></item>
+    ///   <item><description>FieldBits, DocValuesBits --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>)</description></item>
+    ///   <item><description>FieldNumber --&gt; VInt (<see cref="Store.DataOutput.WriteInt32(int)"/>)</description></item>
+    ///   <item><description>Attributes --&gt; IDictionary&lt;String,String&gt; (<see cref="Store.DataOutput.WriteStringStringMap(System.Collections.Generic.IDictionary{string, string})"/>)</description></item>
+    ///   <item><description>DocValuesGen --&gt; Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>)</description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>)</description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <ul>
-    ///   <li>FieldsCount: the number of fields in this file.</li>
-    ///   <li>FieldName: name of the field as a UTF-8 String.</li>
-    ///   <li>FieldNumber: the field's number. Note that unlike previous versions of
+    /// <list type="bullet">
+    ///   <item><description>FieldsCount: the number of fields in this file.</description></item>
+    ///   <item><description>FieldName: name of the field as a UTF-8 string.</description></item>
+    ///   <item><description>FieldNumber: the field's number. Note that unlike previous versions of
     ///       Lucene, the fields are not numbered implicitly by their order in the
-    ///       file, instead explicitly.</li>
-    ///   <li>FieldBits: a byte containing field options.
-    ///       <ul>
-    ///         <li>The low-order bit is one for indexed fields, and zero for non-indexed
-    ///             fields.</li>
-    ///         <li>The second lowest-order bit is one for fields that have term vectors
-    ///             stored, and zero for fields without term vectors.</li>
-    ///         <li>If the third lowest order-bit is set (0x4), offsets are stored into
-    ///             the postings list in addition to positions.</li>
-    ///         <li>Fourth bit is unused.</li>
-    ///         <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the
-    ///             indexed field.</li>
-    ///         <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the
-    ///             indexed field.</li>
-    ///         <li>If the seventh lowest-order bit is set (0x40), term frequencies and
-    ///             positions omitted for the indexed field.</li>
-    ///         <li>If the eighth lowest-order bit is set (0x80), positions are omitted for the
-    ///             indexed field.</li>
-    ///       </ul>
-    ///    </li>
-    ///    <li>DocValuesBits: a byte containing per-document value types. The type
+    ///       file, instead explicitly.</description></item>
+    ///   <item><description>FieldBits: a <see cref="byte"/> containing field options.
+    ///       <list type="bullet">
+    ///         <item><description>The low-order bit is one for indexed fields, and zero for non-indexed
+    ///             fields.</description></item>
+    ///         <item><description>The second lowest-order bit is one for fields that have term vectors
+    ///             stored, and zero for fields without term vectors.</description></item>
+    ///         <item><description>If the third lowest order-bit is set (0x4), offsets are stored into
+    ///             the postings list in addition to positions.</description></item>
+    ///         <item><description>Fourth bit is unused.</description></item>
+    ///         <item><description>If the fifth lowest-order bit is set (0x10), norms are omitted for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the sixth lowest-order bit is set (0x20), payloads are stored for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the seventh lowest-order bit is set (0x40), term frequencies and
+    ///             positions omitted for the indexed field.</description></item>
+    ///         <item><description>If the eighth lowest-order bit is set (0x80), positions are omitted for the
+    ///             indexed field.</description></item>
+    ///       </list>
+    ///    </description></item>
+    ///    <item><description>DocValuesBits: a <see cref="byte"/> containing per-document value types. The type
     ///        recorded as two four-bit integers, with the high-order bits representing
-    ///        <code>norms</code> options, and the low-order bits representing
-    ///        {@code DocValues} options. Each four-bit integer can be decoded as such:
-    ///        <ul>
-    ///          <li>0: no DocValues for this field.</li>
-    ///          <li>1: NumericDocValues. (<seealso cref="DocValuesType#NUMERIC"/>)</li>
-    ///          <li>2: BinaryDocValues. ({@code DocValuesType#BINARY})</li>
-    ///          <li>3: SortedDocValues. ({@code DocValuesType#SORTED})</li>
-    ///        </ul>
-    ///    </li>
-    ///    <li>DocValuesGen is the generation count of the field's DocValues. If this is -1,
-    ///        there are no DocValues updates to that field. Anything above zero means there
-    ///        are updates stored by <seealso cref="DocValuesFormat"/>.</li>
-    ///    <li>Attributes: a key-value map of codec-private attributes.</li>
-    /// </ul>
-    ///
+    ///        <c>norms</c> options, and the low-order bits representing
+    ///        <see cref="Index.DocValues"/> options. Each four-bit integer can be decoded as such:
+    ///        <list type="bullet">
+    ///          <item><description>0: no DocValues for this field.</description></item>
+    ///          <item><description>1: <see cref="Index.NumericDocValues"/>. (<see cref="Index.DocValuesType.NUMERIC"/>)</description></item>
+    ///          <item><description>2: <see cref="Index.BinaryDocValues"/>. (<see cref="Index.DocValuesType.BINARY"/>)</description></item>
+    ///          <item><description>3: <see cref="Index.SortedDocValues"/>. (<see cref="Index.DocValuesType.SORTED"/>)</description></item>
+    ///        </list>
+    ///    </description></item>
+    ///    <item><description>DocValuesGen is the generation count of the field's <see cref="Index.DocValues"/>. If this is -1,
+    ///        there are no <see cref="Index.DocValues"/> updates to that field. Anything above zero means there
+    ///        are updates stored by <see cref="DocValuesFormat"/>.</description></item>
+    ///    <item><description>Attributes: a key-value map of codec-private attributes.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class Lucene46FieldInfosFormat : FieldInfosFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosReader.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosReader.cs
index 0a3bacc..e46df2d 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosReader.cs
@@ -34,9 +34,10 @@ namespace Lucene.Net.Codecs.Lucene46
 
     /// <summary>
     /// Lucene 4.6 FieldInfos reader.
-    ///
-    /// @lucene.experimental </summary>
-    /// <seealso cref= Lucene46FieldInfosFormat </seealso>
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
+    /// <seealso cref="Lucene46FieldInfosFormat"/>
     internal sealed class Lucene46FieldInfosReader : FieldInfosReader
     {
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosWriter.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosWriter.cs
index a2fcf85..eafa70d 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46FieldInfosWriter.cs
@@ -32,9 +32,10 @@ namespace Lucene.Net.Codecs.Lucene46
 
     /// <summary>
     /// Lucene 4.6 FieldInfos writer.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene46FieldInfosFormat
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene46FieldInfosFormat"/>
     internal sealed class Lucene46FieldInfosWriter : FieldInfosWriter
     {
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs
index f50fecc..65d3e88 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs
@@ -17,50 +17,46 @@ namespace Lucene.Net.Codecs.Lucene46
      * limitations under the License.
      */
 
-    // javadocs
-    using SegmentInfo = Lucene.Net.Index.SegmentInfo; // javadocs
-
-    // javadocs
-    // javadocs
+    using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
     /// Lucene 4.6 Segment info format.
-    /// <p>
+    /// <para>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.si</tt>: Header, SegVersion, SegSize, IsCompoundFile, Diagnostics, Files, Footer
-    /// </ul>
-    /// </p>
+    /// <list type="bullet">
+    ///   <item><description><c>.si</c>: Header, SegVersion, SegSize, IsCompoundFile, Diagnostics, Files, Footer</description></item>
+    /// </list>
+    /// </para>
     /// Data types:
-    /// <p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>SegSize --&gt; <seealso cref="DataOutput#writeInt Int32"/></li>
-    ///   <li>SegVersion --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>Files --&gt; <seealso cref="DataOutput#writeStringSet Set&lt;String&gt;"/></li>
-    ///   <li>Diagnostics --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    ///   <li>IsCompoundFile --&gt; <seealso cref="DataOutput#writeByte Int8"/></li>
-    ///   <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
-    /// </p>
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>SegSize --&gt; Int32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///   <item><description>SegVersion --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>) </description></item>
+    ///   <item><description>Files --&gt; ISet&lt;String&gt; (<see cref="Store.DataOutput.WriteStringSet(System.Collections.Generic.ISet{string})"/>) </description></item>
+    ///   <item><description>Diagnostics --&gt; IDictionary&lt;String,String&gt; (<see cref="Store.DataOutput.WriteStringStringMap(System.Collections.Generic.IDictionary{string, string})"/>) </description></item>
+    ///   <item><description>IsCompoundFile --&gt; Int8 (<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    ///   <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(Store.IndexOutput)"/>) </description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <p>
-    /// <ul>
-    ///   <li>SegVersion is the code version that created the segment.</li>
-    ///   <li>SegSize is the number of documents contained in the segment index.</li>
-    ///   <li>IsCompoundFile records whether the segment is written as a compound file or
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>SegVersion is the code version that created the segment.</description></item>
+    ///   <item><description>SegSize is the number of documents contained in the segment index.</description></item>
+    ///   <item><description>IsCompoundFile records whether the segment is written as a compound file or
     ///       not. If this is -1, the segment is not a compound file. If it is 1, the segment
-    ///       is a compound file.</li>
-    ///   <li>The Diagnostics Map is privately written by <seealso cref="IndexWriter"/>, as a debugging aid,
+    ///       is a compound file.</description></item>
+    ///   <item><description>The Diagnostics Map is privately written by <see cref="Index.IndexWriter"/>, as a debugging aid,
     ///       for each segment it creates. It includes metadata like the current Lucene
-    ///       version, OS, Java version, why the segment was created (merge, flush,
-    ///       addIndexes), etc.</li>
-    ///   <li>Files is a list of files referred to by this segment.</li>
-    /// </ul>
-    /// </p>
+    ///       version, OS, .NET/Java version, why the segment was created (merge, flush,
+    ///       addIndexes), etc.</description></item>
+    ///   <item><description>Files is a list of files referred to by this segment.</description></item>
+    /// </list>
+    /// </para>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= SegmentInfos
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Index.SegmentInfos"/>
     public class Lucene46SegmentInfoFormat : SegmentInfoFormat
     {
         private readonly SegmentInfoReader reader = new Lucene46SegmentInfoReader();
@@ -89,7 +85,7 @@ namespace Lucene.Net.Codecs.Lucene46
         }
 
         /// <summary>
-        /// File extension used to store <seealso cref="SegmentInfo"/>. </summary>
+        /// File extension used to store <see cref="SegmentInfo"/>. </summary>
         public readonly static string SI_EXTENSION = "si";
 
         internal const string CODEC_NAME = "Lucene46SegmentInfo";

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoReader.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoReader.cs
index 6cb374e..19a856a 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoReader.cs
@@ -28,10 +28,11 @@ namespace Lucene.Net.Codecs.Lucene46
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene 4.6 implementation of <seealso cref="SegmentInfoReader"/>.
+    /// Lucene 4.6 implementation of <see cref="SegmentInfoReader"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene46SegmentInfoFormat
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene46SegmentInfoFormat"/>
     public class Lucene46SegmentInfoReader : SegmentInfoReader
     {
         /// <summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5478f1bb/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoWriter.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoWriter.cs
index 9cf8da9..a39be6d 100644
--- a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoWriter.cs
@@ -26,10 +26,11 @@ namespace Lucene.Net.Codecs.Lucene46
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene 4.0 implementation of <seealso cref="SegmentInfoWriter"/>.
+    /// Lucene 4.0 implementation of <see cref="SegmentInfoWriter"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene46SegmentInfoFormat
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene46SegmentInfoFormat"/>
     public class Lucene46SegmentInfoWriter : SegmentInfoWriter
     {
         /// <summary>


[15/48] lucenenet git commit: Lucene.Net.Util: Fixed up documentation comments, types beginning with A-G

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BytesRefArray.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BytesRefArray.cs b/src/Lucene.Net/Util/BytesRefArray.cs
index 309e9d8..c6f16ef 100644
--- a/src/Lucene.Net/Util/BytesRefArray.cs
+++ b/src/Lucene.Net/Util/BytesRefArray.cs
@@ -22,12 +22,11 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A simple append only random-access <seealso cref="BytesRef"/> array that stores full
-    /// copies of the appended bytes in a <seealso cref="ByteBlockPool"/>.
-    ///
-    ///
+    /// A simple append only random-access <see cref="BytesRef"/> array that stores full
+    /// copies of the appended bytes in a <see cref="ByteBlockPool"/>.
+    /// <para/>
     /// <b>Note: this class is not Thread-Safe!</b>
-    ///
+    /// <para/>
     /// @lucene.internal
     /// @lucene.experimental
     /// </summary>
@@ -43,7 +42,7 @@ namespace Lucene.Net.Util
         private readonly Counter bytesUsed;
 
         /// <summary>
-        /// Creates a new <seealso cref="BytesRefArray"/> with a counter to track allocated bytes
+        /// Creates a new <see cref="BytesRefArray"/> with a counter to track allocated bytes
         /// </summary>
         public BytesRefArray(Counter bytesUsed)
         {
@@ -54,7 +53,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Clears this <seealso cref="BytesRefArray"/>
+        /// Clears this <see cref="BytesRefArray"/>
         /// </summary>
         public void Clear()
         {
@@ -65,9 +64,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Appends a copy of the given <seealso cref="BytesRef"/> to this <seealso cref="BytesRefArray"/>. </summary>
-        /// <param name="bytes"> the bytes to append </param>
-        /// <returns> the index of the appended bytes </returns>
+        /// Appends a copy of the given <see cref="BytesRef"/> to this <see cref="BytesRefArray"/>. </summary>
+        /// <param name="bytes"> The bytes to append </param>
+        /// <returns> The index of the appended bytes </returns>
         public int Append(BytesRef bytes)
         {
             if (lastElement >= offsets.Length)
@@ -84,19 +83,20 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the current size of this <see cref="BytesRefArray"/>.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
-        /// <returns> the current size of this <see cref="BytesRefArray"/> </returns>
+        /// <returns> The current size of this <see cref="BytesRefArray"/> </returns>
         public int Length
         {
             get { return lastElement; }
         }
 
         /// <summary>
-        /// Returns the <i>n'th</i> element of this <seealso cref="BytesRefArray"/> </summary>
-        /// <param name="spare"> a spare <seealso cref="BytesRef"/> instance </param>
-        /// <param name="index"> the elements index to retrieve </param>
-        /// <returns> the <i>n'th</i> element of this <seealso cref="BytesRefArray"/> </returns>
+        /// Returns the <i>n'th</i> element of this <see cref="BytesRefArray"/> </summary>
+        /// <param name="spare"> A spare <see cref="BytesRef"/> instance </param>
+        /// <param name="index"> The elements index to retrieve </param>
+        /// <returns> The <i>n'th</i> element of this <see cref="BytesRefArray"/> </returns>
         public BytesRef Get(BytesRef spare, int index)
         {
             if (lastElement > index)
@@ -171,7 +171,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// sugar for <seealso cref="#iterator(Comparer)"/> with a <code>null</code> comparer
+        /// Sugar for <see cref="GetIterator(IComparer{BytesRef})"/> with a <c>null</c> comparer
         /// </summary>
         public IBytesRefIterator GetIterator()
         {
@@ -179,18 +179,18 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// <p>
-        /// Returns a <seealso cref="IBytesRefIterator"/> with point in time semantics. The
-        /// iterator provides access to all so far appended <seealso cref="BytesRef"/> instances.
-        /// </p>
-        /// <p>
-        /// If a non <code>null</code> <seealso cref="Comparer"/> is provided the iterator will
+        /// <para>
+        /// Returns a <see cref="IBytesRefIterator"/> with point in time semantics. The
+        /// iterator provides access to all so far appended <see cref="BytesRef"/> instances.
+        /// </para>
+        /// <para>
+        /// If a non <c>null</c> <see cref="T:IComparer{BytesRef}"/> is provided the iterator will
         /// iterate the byte values in the order specified by the comparer. Otherwise
         /// the order is the same as the values were appended.
-        /// </p>
-        /// <p>
-        /// this is a non-destructive operation.
-        /// </p>
+        /// </para>
+        /// <para>
+        /// This is a non-destructive operation.
+        /// </para>
         /// </summary>
         public IBytesRefIterator GetIterator(IComparer<BytesRef> comp)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BytesRefHash.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BytesRefHash.cs b/src/Lucene.Net/Util/BytesRefHash.cs
index 6c8a839..4ad61d6 100644
--- a/src/Lucene.Net/Util/BytesRefHash.cs
+++ b/src/Lucene.Net/Util/BytesRefHash.cs
@@ -28,18 +28,18 @@ namespace Lucene.Net.Util
     using DirectAllocator = Lucene.Net.Util.ByteBlockPool.DirectAllocator;
 
     /// <summary>
-    /// <seealso cref="BytesRefHash"/> is a special purpose hash-map like data-structure
-    /// optimized for <seealso cref="BytesRef"/> instances. BytesRefHash maintains mappings of
+    /// <see cref="BytesRefHash"/> is a special purpose hash-map like data-structure
+    /// optimized for <see cref="BytesRef"/> instances. <see cref="BytesRefHash"/> maintains mappings of
     /// byte arrays to ids (Map&lt;BytesRef,int&gt;) storing the hashed bytes
     /// efficiently in continuous storage. The mapping to the id is
-    /// encapsulated inside <seealso cref="BytesRefHash"/> and is guaranteed to be increased
-    /// for each added <seealso cref="BytesRef"/>.
+    /// encapsulated inside <see cref="BytesRefHash"/> and is guaranteed to be increased
+    /// for each added <see cref="BytesRef"/>.
     ///
-    /// <p>
-    /// Note: The maximum capacity <seealso cref="BytesRef"/> instance passed to
-    /// <seealso cref="#add(BytesRef)"/> must not be longer than <seealso cref="ByteBlockPool#BYTE_BLOCK_SIZE"/>-2.
+    /// <para>
+    /// Note: The maximum capacity <see cref="BytesRef"/> instance passed to
+    /// <see cref="Add(BytesRef)"/> must not be longer than <see cref="ByteBlockPool.BYTE_BLOCK_SIZE"/>-2.
     /// The internal storage is limited to 2GB total byte storage.
-    /// </p>
+    /// </para>
     ///
     /// @lucene.internal
     /// </summary>
@@ -64,8 +64,8 @@ namespace Lucene.Net.Util
         private Counter bytesUsed;
 
         /// <summary>
-        /// Creates a new <seealso cref="BytesRefHash"/> with a <seealso cref="ByteBlockPool"/> using a
-        /// <seealso cref="DirectAllocator"/>.
+        /// Creates a new <see cref="BytesRefHash"/> with a <see cref="ByteBlockPool"/> using a
+        /// <see cref="DirectAllocator"/>.
         /// </summary>
         public BytesRefHash()
             : this(new ByteBlockPool(new DirectAllocator()))
@@ -73,7 +73,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="BytesRefHash"/>
+        /// Creates a new <see cref="BytesRefHash"/>
         /// </summary>
         public BytesRefHash(ByteBlockPool pool)
             : this(pool, DEFAULT_CAPACITY, new DirectBytesStartArray(DEFAULT_CAPACITY))
@@ -81,7 +81,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="BytesRefHash"/>
+        /// Creates a new <see cref="BytesRefHash"/>
         /// </summary>
         public BytesRefHash(ByteBlockPool pool, int capacity, BytesStartArray bytesStartArray)
         {
@@ -98,28 +98,29 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of <seealso cref="BytesRef"/> values in this <seealso cref="BytesRefHash"/>.
+        /// Returns the number of <see cref="BytesRef"/> values in this <see cref="BytesRefHash"/>.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
-        /// <returns> the number of <seealso cref="BytesRef"/> values in this <seealso cref="BytesRefHash"/>. </returns>
+        /// <returns> The number of <see cref="BytesRef"/> values in this <see cref="BytesRefHash"/>. </returns>
         public int Count
         {
             get { return count; }
         }
 
         /// <summary>
-        /// Populates and returns a <seealso cref="BytesRef"/> with the bytes for the given
+        /// Populates and returns a <see cref="BytesRef"/> with the bytes for the given
         /// bytesID.
-        /// <p>
+        /// <para/>
         /// Note: the given bytesID must be a positive integer less than the current
-        /// size (<seealso cref="Count"/>)
+        /// size (<see cref="Count"/>)
         /// </summary>
         /// <param name="bytesID">
-        ///          the id </param>
+        ///          The id </param>
         /// <param name="ref">
-        ///          the <seealso cref="BytesRef"/> to populate
+        ///          The <see cref="BytesRef"/> to populate
         /// </param>
-        /// <returns> the given BytesRef instance populated with the bytes for the given
+        /// <returns> The given <see cref="BytesRef"/> instance populated with the bytes for the given
         ///         bytesID </returns>
         public BytesRef Get(int bytesID, BytesRef @ref)
         {
@@ -131,11 +132,11 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the ids array in arbitrary order. Valid ids start at offset of 0
-        /// and end at a limit of <seealso cref="Count"/> - 1
-        /// <p>
-        /// Note: this is a destructive operation. <seealso cref="#clear()"/> must be called in
-        /// order to reuse this <seealso cref="BytesRefHash"/> instance.
-        /// </p>
+        /// and end at a limit of <see cref="Count"/> - 1
+        /// <para>
+        /// Note: this is a destructive operation. <see cref="Clear()"/> must be called in
+        /// order to reuse this <see cref="BytesRefHash"/> instance.
+        /// </para>
         /// </summary>
         public int[] Compact()
         {
@@ -161,13 +162,13 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the values array sorted by the referenced byte values.
-        /// <p>
-        /// Note: this is a destructive operation. <seealso cref="#clear()"/> must be called in
-        /// order to reuse this <seealso cref="BytesRefHash"/> instance.
-        /// </p>
+        /// <para>
+        /// Note: this is a destructive operation. <see cref="Clear()"/> must be called in
+        /// order to reuse this <see cref="BytesRefHash"/> instance.
+        /// </para>
         /// </summary>
         /// <param name="comp">
-        ///          the <seealso cref="Comparer"/> used for sorting </param>
+        ///          The <see cref="T:IComparer{BytesRef}"/> used for sorting </param>
         public int[] Sort(IComparer<BytesRef> comp)
         {
             int[] compact = Compact();
@@ -254,7 +255,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Clears the <seealso cref="BytesRef"/> which maps to the given <seealso cref="BytesRef"/>
+        /// Clears the <see cref="BytesRef"/> which maps to the given <see cref="BytesRef"/>
         /// </summary>
         public void Clear(bool resetPool)
         {
@@ -279,7 +280,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Closes the BytesRefHash and releases all internally used memory
+        /// Closes the <see cref="BytesRefHash"/> and releases all internally used memory
         /// </summary>
         public void Dispose()
         {
@@ -289,18 +290,18 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds a new <seealso cref="BytesRef"/>
+        /// Adds a new <see cref="BytesRef"/>
         /// </summary>
         /// <param name="bytes">
-        ///          the bytes to hash </param>
-        /// <returns> the id the given bytes are hashed if there was no mapping for the
-        ///         given bytes, otherwise <code>(-(id)-1)</code>. this guarantees
+        ///          The bytes to hash </param>
+        /// <returns> The id the given bytes are hashed if there was no mapping for the
+        ///         given bytes, otherwise <c>(-(id)-1)</c>. this guarantees
         ///         that the return value will always be &gt;= 0 if the given bytes
         ///         haven't been hashed before.
         /// </returns>
         /// <exception cref="MaxBytesLengthExceededException">
         ///           if the given bytes are > 2 +
-        ///           <seealso cref="ByteBlockPool#BYTE_BLOCK_SIZE"/> </exception>
+        ///           <see cref="ByteBlockPool.BYTE_BLOCK_SIZE"/> </exception>
         public int Add(BytesRef bytes)
         {
             Debug.Assert(bytesStart != null, "Bytesstart is null - not initialized");
@@ -365,12 +366,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the id of the given <seealso cref="BytesRef"/>.
+        /// Returns the id of the given <see cref="BytesRef"/>.
         /// </summary>
         /// <param name="bytes">
-        ///          the bytes to look for
+        ///          The bytes to look for
         /// </param>
-        /// <returns> the id of the given bytes, or {@code -1} if there is no mapping for the
+        /// <returns> The id of the given bytes, or <c>-1</c> if there is no mapping for the
         ///         given bytes. </returns>
         public int Find(BytesRef bytes)
         {
@@ -402,12 +403,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Adds a "arbitrary" int offset instead of a BytesRef
-        ///  term.  this is used in the indexer to hold the hash for term
-        ///  vectors, because they do not redundantly store the byte[] term
-        ///  directly and instead reference the byte[] term
-        ///  already stored by the postings BytesRefHash.  See
-        ///  add(int textStart) in TermsHashPerField.
+        /// Adds a "arbitrary" int offset instead of a <see cref="BytesRef"/>
+        /// term.  This is used in the indexer to hold the hash for term
+        /// vectors, because they do not redundantly store the <see cref="T:byte[]"/> term
+        /// directly and instead reference the <see cref="T:byte[]"/> term
+        /// already stored by the postings <see cref="BytesRefHash"/>.  See
+        /// <see cref="Index.TermsHashPerField.Add(int)"/>.
         /// </summary>
         public int AddByPoolOffset(int offset)
         {
@@ -450,7 +451,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Called when hash is too small (> 50% occupied) or too large (< 20%
+        /// Called when hash is too small (&gt; 50% occupied) or too large (&lt; 20%
         /// occupied).
         /// </summary>
         private void Rehash(int newSize, bool hashOnData)
@@ -520,8 +521,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// reinitializes the <seealso cref="BytesRefHash"/> after a previous <seealso cref="#clear()"/>
-        /// call. If <seealso cref="#clear()"/> has not been called previously this method has no
+        /// Reinitializes the <see cref="BytesRefHash"/> after a previous <see cref="Clear()"/>
+        /// call. If <see cref="Clear()"/> has not been called previously this method has no
         /// effect.
         /// </summary>
         public void Reinit()
@@ -540,12 +541,12 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the bytesStart offset into the internally used
-        /// <seealso cref="ByteBlockPool"/> for the given bytesID
+        /// <see cref="ByteBlockPool"/> for the given <paramref name="bytesID"/>
         /// </summary>
         /// <param name="bytesID">
-        ///          the id to look up </param>
-        /// <returns> the bytesStart offset into the internally used
-        ///         <seealso cref="ByteBlockPool"/> for the given id </returns>
+        ///          The id to look up </param>
+        /// <returns> The bytesStart offset into the internally used
+        ///         <see cref="ByteBlockPool"/> for the given id </returns>
         public int ByteStart(int bytesID)
         {
             Debug.Assert(bytesStart != null, "bytesStart is null - not initialized");
@@ -554,8 +555,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Thrown if a <seealso cref="BytesRef"/> exceeds the <seealso cref="BytesRefHash"/> limit of
-        /// <seealso cref="ByteBlockPool#BYTE_BLOCK_SIZE"/>-2.
+        /// Thrown if a <see cref="BytesRef"/> exceeds the <see cref="BytesRefHash"/> limit of
+        /// <see cref="ByteBlockPool.BYTE_BLOCK_SIZE"/>-2.
         /// </summary>
         // LUCENENET: All exeption classes should be marked serializable
 #if FEATURE_SERIALIZABLE
@@ -586,37 +587,37 @@ namespace Lucene.Net.Util
         public abstract class BytesStartArray
         {
             /// <summary>
-            /// Initializes the BytesStartArray. this call will allocate memory
+            /// Initializes the <see cref="BytesStartArray"/>. This call will allocate memory.
             /// </summary>
-            /// <returns> the initialized bytes start array </returns>
+            /// <returns> The initialized bytes start array. </returns>
             public abstract int[] Init();
 
             /// <summary>
-            /// Grows the <seealso cref="BytesStartArray"/>
+            /// Grows the <see cref="BytesStartArray"/>.
             /// </summary>
-            /// <returns> the grown array </returns>
+            /// <returns> The grown array. </returns>
             public abstract int[] Grow();
 
             /// <summary>
-            /// clears the <seealso cref="BytesStartArray"/> and returns the cleared instance.
+            /// Clears the <see cref="BytesStartArray"/> and returns the cleared instance.
             /// </summary>
-            /// <returns> the cleared instance, this might be <code>null</code> </returns>
+            /// <returns> The cleared instance, this might be <c>null</c>. </returns>
             public abstract int[] Clear();
 
             /// <summary>
-            /// A <seealso cref="Counter"/> reference holding the number of bytes used by this
-            /// <seealso cref="BytesStartArray"/>. The <seealso cref="BytesRefHash"/> uses this reference to
-            /// track it memory usage
+            /// A <see cref="Counter"/> reference holding the number of bytes used by this
+            /// <see cref="BytesStartArray"/>. The <see cref="BytesRefHash"/> uses this reference to
+            /// track it memory usage.
             /// </summary>
-            /// <returns> a <seealso cref="AtomicInt64"/> reference holding the number of bytes used
-            ///         by this <seealso cref="BytesStartArray"/>. </returns>
+            /// <returns> a <see cref="AtomicInt64"/> reference holding the number of bytes used
+            ///         by this <see cref="BytesStartArray"/>. </returns>
             public abstract Counter BytesUsed();
         }
 
         /// <summary>
-        /// A simple <seealso cref="BytesStartArray"/> that tracks
-        ///  memory allocation using a private <seealso cref="Counter"/>
-        ///  instance.
+        /// A simple <see cref="BytesStartArray"/> that tracks
+        /// memory allocation using a private <see cref="Counter"/>
+        /// instance.
         /// </summary>
         public class DirectBytesStartArray : BytesStartArray
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/BytesRefIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/BytesRefIterator.cs b/src/Lucene.Net/Util/BytesRefIterator.cs
index 5671d8d..3d2e2bc 100644
--- a/src/Lucene.Net/Util/BytesRefIterator.cs
+++ b/src/Lucene.Net/Util/BytesRefIterator.cs
@@ -20,27 +20,27 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// A simple iterator interface for <seealso cref="BytesRef"/> iteration.
+    /// A simple iterator interface for <see cref="BytesRef"/> iteration.
     /// </summary>
     public interface IBytesRefIterator
     {
         /// <summary>
-        /// Increments the iteration to the next <seealso cref="BytesRef"/> in the iterator.
-        /// Returns the resulting <seealso cref="BytesRef"/> or <code>null</code> if the end of
-        /// the iterator is reached. The returned BytesRef may be re-used across calls
-        /// to next. After this method returns null, do not call it again: the results
+        /// Increments the iteration to the next <see cref="BytesRef"/> in the iterator.
+        /// Returns the resulting <see cref="BytesRef"/> or <c>null</c> if the end of
+        /// the iterator is reached. The returned <see cref="BytesRef"/> may be re-used across calls
+        /// to <see cref="Next()"/>. After this method returns <c>null</c>, do not call it again: the results
         /// are undefined.
         /// </summary>
-        /// <returns> the next <seealso cref="BytesRef"/> in the iterator or <code>null</code> if
+        /// <returns> The next <see cref="BytesRef"/> in the iterator or <c>null</c> if
         ///         the end of the iterator is reached. </returns>
-        /// <exception cref="IOException"> If there is a low-level I/O error. </exception>
+        /// <exception cref="System.IO.IOException"> If there is a low-level I/O error. </exception>
         BytesRef Next();
 
         /// <summary>
-        /// Return the <seealso cref="BytesRef"/> Comparer used to sort terms provided by the
-        /// iterator. this may return null if there are no items or the iterator is not
+        /// Return the <see cref="BytesRef"/> Comparer used to sort terms provided by the
+        /// iterator. This may return <c>null</c> if there are no items or the iterator is not
         /// sorted. Callers may invoke this method many times, so it's best to cache a
-        /// single instance & reuse it.
+        /// single instance &amp; reuse it.
         /// </summary>
         IComparer<BytesRef> Comparer { get; }
     }
@@ -56,6 +56,9 @@ namespace Lucene.Net.Util
     {
         private BytesRefIterator() { } // Disallow creation
 
+        /// <summary>
+        /// Singleton <see cref="BytesRefIterator"/> that iterates over 0 BytesRefs.
+        /// </summary>
         public static readonly IBytesRefIterator EMPTY = new EmptyBytesRefIterator();
 
         private class EmptyBytesRefIterator : IBytesRefIterator

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/CharsRef.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/CharsRef.cs b/src/Lucene.Net/Util/CharsRef.cs
index 8779afa..432d53f 100644
--- a/src/Lucene.Net/Util/CharsRef.cs
+++ b/src/Lucene.Net/Util/CharsRef.cs
@@ -24,9 +24,10 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// Represents char[], as a slice (offset + Length) into an existing char[].
-    /// The <seealso cref="#chars"/> member should never be null; use
-    /// <seealso cref="#EMPTY_CHARS"/> if necessary.
+    /// Represents <see cref="T:char[]"/>, as a slice (offset + Length) into an existing <see cref="T:char[]"/>.
+    /// The <see cref="Chars"/> property should never be <c>null</c>; use
+    /// <see cref="EMPTY_CHARS"/> if necessary.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
 #if FEATURE_SERIALIZABLE
@@ -39,7 +40,7 @@ namespace Lucene.Net.Util
         public static readonly char[] EMPTY_CHARS = new char[0];
 
         /// <summary>
-        /// The contents of the CharsRef. Should never be {@code null}.
+        /// The contents of the <see cref="CharsRef"/>. Should never be <c>null</c>.
         /// </summary>
         [WritableArray]
         [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
@@ -66,7 +67,7 @@ namespace Lucene.Net.Util
         public int Length { get; set; }
 
         /// <summary>
-        /// Creates a new <seealso cref="CharsRef"/> initialized an empty array zero-Length
+        /// Creates a new <see cref="CharsRef"/> initialized an empty array zero-Length
         /// </summary>
         public CharsRef()
             : this(EMPTY_CHARS, 0, 0)
@@ -74,8 +75,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="CharsRef"/> initialized with an array of the given
-        /// capacity
+        /// Creates a new <see cref="CharsRef"/> initialized with an array of the given
+        /// <paramref name="capacity"/>.
         /// </summary>
         public CharsRef(int capacity)
         {
@@ -83,8 +84,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="CharsRef"/> initialized with the given array, offset and
-        /// Length
+        /// Creates a new <see cref="CharsRef"/> initialized with the given <paramref name="chars"/>, 
+        /// <paramref name="offset"/> and <paramref name="length"/>.
         /// </summary>
         public CharsRef(char[] chars, int offset, int length)
         {
@@ -95,8 +96,8 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new <seealso cref="CharsRef"/> initialized with the given Strings character
-        /// array
+        /// Creates a new <see cref="CharsRef"/> initialized with the given <see cref="string"/> character
+        /// array.
         /// </summary>
         public CharsRef(string @string)
         {
@@ -110,7 +111,7 @@ namespace Lucene.Net.Util
         /// <b>not</b> copied and will be shared by both the returned object and this
         /// object.
         /// </summary>
-        /// <seealso cref= #deepCopyOf </seealso>
+        /// <seealso cref="DeepCopyOf(CharsRef)"/>
         public object Clone()
         {
             return new CharsRef(chars, Offset, Length);
@@ -165,7 +166,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Signed int order comparison </summary>
+        /// Signed <see cref="int"/> order comparison </summary>
         public int CompareTo(CharsRef other)
         {
             if (this == other)
@@ -199,10 +200,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the given <seealso cref="CharsRef"/> referenced content into this instance.
+        /// Copies the given <see cref="CharsRef"/> referenced content into this instance.
         /// </summary>
         /// <param name="other">
-        ///          the <seealso cref="CharsRef"/> to copy </param>
+        ///          The <see cref="CharsRef"/> to copy. </param>
         public void CopyChars(CharsRef other)
         {
             CopyChars(other.chars, other.Offset, other.Length);
@@ -210,8 +211,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Used to grow the reference array.
-        ///
+        /// <para/>
         /// In general this should not be used as it does not take the offset into account.
+        /// <para/>
         /// @lucene.internal
         /// </summary>
         public void Grow(int newLength)
@@ -224,7 +226,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Copies the given array into this CharsRef.
+        /// Copies the given array into this <see cref="CharsRef"/>.
         /// </summary>
         public void CopyChars(char[] otherChars, int otherOffset, int otherLength)
         {
@@ -238,7 +240,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Appends the given array to this CharsRef
+        /// Appends the given array to this <see cref="CharsRef"/>.
         /// </summary>
         public void Append(char[] otherChars, int otherOffset, int otherLength)
         {
@@ -373,10 +375,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new CharsRef that points to a copy of the chars from
-        /// <code>other</code>
-        /// <p>
-        /// The returned CharsRef will have a Length of other.Length
+        /// Creates a new <see cref="CharsRef"/> that points to a copy of the chars from
+        /// <paramref name="other"/>.
+        /// <para/>
+        /// The returned <see cref="CharsRef"/> will have a Length of <c>other.Length</c>
         /// and an offset of zero.
         /// </summary>
         public static CharsRef DeepCopyOf(CharsRef other)
@@ -388,7 +390,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Performs internal consistency checks.
-        /// Always returns true (or throws InvalidOperationException)
+        /// Always returns true (or throws <see cref="InvalidOperationException"/>)
         /// </summary>
         public bool IsValid()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/CloseableThreadLocal.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/CloseableThreadLocal.cs b/src/Lucene.Net/Util/CloseableThreadLocal.cs
index 239316c..0328a4a 100644
--- a/src/Lucene.Net/Util/CloseableThreadLocal.cs
+++ b/src/Lucene.Net/Util/CloseableThreadLocal.cs
@@ -24,31 +24,28 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Java's builtin ThreadLocal has a serious flaw:
-    ///  it can take an arbitrarily long amount of time to
-    ///  dereference the things you had stored in it, even once the
-    ///  ThreadLocal instance itself is no longer referenced.
-    ///  this is because there is single, master map stored for
-    ///  each thread, which all ThreadLocals share, and that
-    ///  master map only periodically purges "stale" entries.
-    ///
-    ///  While not technically a memory leak, because eventually
-    ///  the memory will be reclaimed, it can take a long time
-    ///  and you can easily hit OutOfMemoryError because from the
-    ///  GC's standpoint the stale entries are not reclaimable.
-    ///
-    ///  this class works around that, by only enrolling
-    ///  WeakReference values into the ThreadLocal, and
-    ///  separately holding a hard reference to each stored
-    ///  value.  When you call <seealso cref="#close"/>, these hard
-    ///  references are cleared and then GC is freely able to
-    ///  reclaim space by objects stored in it.
-    ///
-    ///  We can not rely on <seealso cref="ThreadLocal#remove()"/> as it
-    ///  only removes the value for the caller thread, whereas
-    ///  <seealso cref="#close"/> takes care of all
-    ///  threads.  You should not call <seealso cref="#close"/> until all
-    ///  threads are done using the instance.
-    ///
+    /// it can take an arbitrarily long amount of time to
+    /// dereference the things you had stored in it, even once the
+    /// ThreadLocal instance itself is no longer referenced.
+    /// This is because there is single, master map stored for
+    /// each thread, which all ThreadLocals share, and that
+    /// master map only periodically purges "stale" entries.
+    /// <para/>
+    /// While not technically a memory leak, because eventually
+    /// the memory will be reclaimed, it can take a long time
+    /// and you can easily hit <see cref="OutOfMemoryException"/> because from the
+    /// GC's standpoint the stale entries are not reclaimable.
+    /// <para/>
+    /// This class works around that, by only enrolling
+    /// WeakReference values into the ThreadLocal, and
+    /// separately holding a hard reference to each stored
+    /// value.  When you call <see cref="Dispose()"/>, these hard
+    /// references are cleared and then GC is freely able to
+    /// reclaim space by objects stored in it.
+    /// <para/>
+    /// You should not call <see cref="Dispose()"/> until all
+    /// threads are done using the instance.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public class DisposableThreadLocal<T> : IDisposable

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/CollectionUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/CollectionUtil.cs b/src/Lucene.Net/Util/CollectionUtil.cs
index fd030d5..6b0f90f 100644
--- a/src/Lucene.Net/Util/CollectionUtil.cs
+++ b/src/Lucene.Net/Util/CollectionUtil.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Util
     /// Sort methods work directly on the supplied lists and don't copy to/from arrays
     /// before/after. For medium size collections as used in the Lucene indexer that is
     /// much more efficient.
-    ///
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class CollectionUtil
@@ -140,7 +140,7 @@ namespace Lucene.Net.Util
         /// This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small lists. 
         /// </summary>
-        /// <param name="list">this <see cref="IList{T}"/></param>
+        /// <param name="list">This <see cref="IList{T}"/></param>
         /// <param name="comp">The <see cref="IComparer{T}"/> to use for the sort.</param>
         public static void IntroSort<T>(IList<T> list, IComparer<T> comp)
         {
@@ -157,7 +157,7 @@ namespace Lucene.Net.Util
         /// This method uses the intro sort
         /// algorithm, but falls back to insertion sort for small lists. 
         /// </summary>
-        /// <param name="list">this <see cref="IList{T}"/></param>
+        /// <param name="list">This <see cref="IList{T}"/></param>
         public static void IntroSort<T>(IList<T> list)
             //where T : IComparable<T> // LUCENENET specific: removing constraint because in .NET, it is not needed
         {
@@ -193,7 +193,7 @@ namespace Lucene.Net.Util
         /// Sorts the given <see cref="IList{T}"/> in natural order.
         /// This method uses the Tim sort
         /// algorithm, but falls back to binary sort for small lists. </summary>
-        /// <param name="list">this <see cref="IList{T}"/></param>
+        /// <param name="list">This <see cref="IList{T}"/></param>
         public static void TimSort<T>(IList<T> list)
             //where T : IComparable<T> // LUCENENET specific: removing constraint because in .NET, it is not needed
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/CommandLineUtil.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/CommandLineUtil.cs b/src/Lucene.Net/Util/CommandLineUtil.cs
index 49a000a..2d04e65 100644
--- a/src/Lucene.Net/Util/CommandLineUtil.cs
+++ b/src/Lucene.Net/Util/CommandLineUtil.cs
@@ -25,7 +25,6 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Class containing some useful methods used by command line tools
-    ///
     /// </summary>
     public sealed class CommandLineUtil
     {
@@ -34,10 +33,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a specific FSDirectory instance starting from its class name </summary>
-        /// <param name="clazzName"> The name of the FSDirectory class to load </param>
-        /// <param name="file"> The file to be used as parameter constructor </param>
-        /// <returns> the new FSDirectory instance </returns>
+        /// Creates a specific <see cref="FSDirectory"/> instance starting from its class name. </summary>
+        /// <param name="clazzName"> The name of the <see cref="FSDirectory"/> class to load. </param>
+        /// <param name="dir"> The <see cref="DirectoryInfo"/> to be used as parameter constructor. </param>
+        /// <returns> The new <see cref="FSDirectory"/> instance </returns>
         public static FSDirectory NewFSDirectory(string clazzName, DirectoryInfo dir)
         {
             try
@@ -64,20 +63,20 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Loads a specific Directory implementation </summary>
-        /// <param name="clazzName"> The name of the Directory class to load </param>
-        /// <returns> The Directory class loaded </returns>
-        /// <exception cref="ClassNotFoundException"> If the specified class cannot be found. </exception>
+        /// Loads a specific <see cref="Directory"/> implementation. </summary>
+        /// <param name="clazzName"> The name of the <see cref="Directory"/> class to load. </param>
+        /// <returns> The <see cref="Directory"/> class loaded. </returns>
+        /// <exception cref="System.TypeLoadException"> If the specified class cannot be found. </exception>
         public static Type LoadDirectoryClass(string clazzName)
         {
             return Type.GetType(AdjustDirectoryClassName(clazzName));
         }
 
         /// <summary>
-        /// Loads a specific FSDirectory implementation </summary>
-        /// <param name="clazzName"> The name of the FSDirectory class to load </param>
-        /// <returns> The FSDirectory class loaded </returns>
-        /// <exception cref="ClassNotFoundException"> If the specified class cannot be found. </exception>
+        /// Loads a specific <see cref="FSDirectory"/> implementation. </summary>
+        /// <param name="clazzName"> The name of the <see cref="FSDirectory"/> class to load. </param>
+        /// <returns> The <see cref="FSDirectory"/> class loaded. </returns>
+        /// <exception cref="System.TypeLoadException"> If the specified class cannot be found. </exception>
         public static Type LoadFSDirectoryClass(string clazzName)
         {
             return Type.GetType(AdjustDirectoryClassName(clazzName));
@@ -98,14 +97,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new specific FSDirectory instance </summary>
+        /// Creates a new specific <see cref="FSDirectory"/> instance. </summary>
         /// <param name="clazz"> The class of the object to be created </param>
-        /// <param name="file"> The file to be used as parameter constructor </param>
-        /// <returns> The new FSDirectory instance </returns>
-        /// <exception cref="NoSuchMethodException"> If the Directory does not have a constructor that takes <code>File</code>. </exception>
-        /// <exception cref="InstantiationException"> If the class is abstract or an interface. </exception>
-        /// <exception cref="IllegalAccessException"> If the constructor does not have public visibility. </exception>
-        /// <exception cref="InvocationTargetException"> If the constructor throws an exception </exception>
+        /// <param name="dir"> The <see cref="DirectoryInfo"/> to be used as parameter constructor </param>
+        /// <returns> The new <see cref="FSDirectory"/> instance. </returns>
+        /// <exception cref="MissingMethodException"> If the <see cref="Directory"/> does not have a constructor that takes <see cref="DirectoryInfo"/>. </exception>
+        /// <exception cref="MemberAccessException"> If the class is abstract or an interface. </exception>
+        /// <exception cref="TypeLoadException"> If the constructor does not have public visibility. </exception>
+        /// <exception cref="System.Reflection.TargetInvocationException"> If the constructor throws an exception </exception>
         public static FSDirectory NewFSDirectory(Type clazz, DirectoryInfo dir)
         {
             // Assuming every FSDirectory has a ctor(File):

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/Constants.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Constants.cs b/src/Lucene.Net/Util/Constants.cs
index 1086467..5dfd556 100644
--- a/src/Lucene.Net/Util/Constants.cs
+++ b/src/Lucene.Net/Util/Constants.cs
@@ -39,15 +39,19 @@ namespace Lucene.Net.Util
         // LUCENENET NOTE: IMPORTANT - this line must be placed before RUNTIME_VERSION so it can be parsed.
         private static Regex VERSION_PARSER = new Regex(@"(\d+\.\d+\.\d+\.\d+)", RegexOptions.Compiled);
 
-        /// <summary>
 #if NETSTANDARD
+        /// <summary>
         /// The value of the version parsed from <see cref="RuntimeInformation.FrameworkDescription"/>.
+        /// <para/>
+        /// NOTE: This was JAVA_VERSION in Lucene
+        /// </summary>
 #else
+        /// <summary>
         /// The value of <see cref="Environment.Version"/>.
-#endif
         /// <para/>
         /// NOTE: This was JAVA_VERSION in Lucene
         /// </summary>
+#endif
         public static readonly string RUNTIME_VERSION = GetEnvironmentVariable("RUNTIME_VERSION", "?");
 
 
@@ -173,10 +177,10 @@ namespace Lucene.Net.Util
         // We should never change index format with minor versions, so it should always be x.y or x.y.0.z for alpha/beta versions!
         /// <summary>
         /// this is the internal Lucene version, recorded into each segment.
-        /// NOTE: we track per-segment version as a String with the {@code "X.Y"} format
-        /// (no minor version), e.g. {@code "4.0", "3.1", "3.0"}.
-        /// <p>Alpha and Beta versions will have numbers like {@code "X.Y.0.Z"},
-        /// anything else is not allowed. this is done to prevent people from
+        /// NOTE: we track per-segment version as a <see cref="string"/> with the <c>"X.Y"</c> format
+        /// (no minor version), e.g. <c>"4.0", "3.1", "3.0"</c>.
+        /// <para/>Alpha and Beta versions will have numbers like <c>"X.Y.0.Z"</c>,
+        /// anything else is not allowed. This is done to prevent people from
         /// using indexes created with ALPHA/BETA versions with the released version.
         /// </summary>
         public static readonly string LUCENE_MAIN_VERSION = Ident("4.8");
@@ -186,7 +190,7 @@ namespace Lucene.Net.Util
         // it might make sense to change it when a major/minor/patch
         // port to Lucene is done).
         /// <summary>
-        /// this is the Lucene version for display purposes.
+        /// This is the Lucene version for display purposes.
         /// </summary>
         public static readonly string LUCENE_VERSION = "4.8.0";
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/Counter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Counter.cs b/src/Lucene.Net/Util/Counter.cs
index 099b8ee..93991f5 100644
--- a/src/Lucene.Net/Util/Counter.cs
+++ b/src/Lucene.Net/Util/Counter.cs
@@ -21,24 +21,24 @@ namespace Lucene.Net.Util
 
     /// <summary>
     /// Simple counter class
-    ///
+    /// <para/>
     /// @lucene.internal
     /// @lucene.experimental
     /// </summary>
     public abstract class Counter
     {
         /// <summary>
-        /// Adds the given delta to the counters current value
+        /// Adds the given delta to the counters current value.
         /// </summary>
         /// <param name="delta">
-        ///          the delta to add </param>
-        /// <returns> the counters updated value </returns>
+        ///          The delta to add. </param>
+        /// <returns> The counters updated value. </returns>
         public abstract long AddAndGet(long delta);
 
         /// <summary>
-        /// Returns the counters current value
+        /// Returns the counters current value.
         /// </summary>
-        /// <returns> the counters current value </returns>
+        /// <returns> The counters current value. </returns>
         public abstract long Get();
 
         /// <summary>
@@ -53,9 +53,9 @@ namespace Lucene.Net.Util
         /// Returns a new counter.
         /// </summary>
         /// <param name="threadSafe">
-        ///          <code>true</code> if the returned counter can be used by multiple
+        ///          <c>true</c> if the returned counter can be used by multiple
         ///          threads concurrently. </param>
-        /// <returns> a new counter. </returns>
+        /// <returns> A new counter. </returns>
         public static Counter NewCounter(bool threadSafe)
         {
             return threadSafe ? (Counter)new AtomicCounter() : new SerialCounter();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/DocIdBitSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/DocIdBitSet.cs b/src/Lucene.Net/Util/DocIdBitSet.cs
index db1c04f..7531446 100644
--- a/src/Lucene.Net/Util/DocIdBitSet.cs
+++ b/src/Lucene.Net/Util/DocIdBitSet.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Util
     using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
 
     /// <summary>
-    /// Simple DocIdSet and DocIdSetIterator backed by a BitSet 
+    /// Simple <see cref="DocIdSet"/> and <see cref="DocIdSetIterator"/> backed by a <see cref="BitArray"/> 
     /// </summary>
 #if FEATURE_SERIALIZABLE
     [Serializable]
@@ -50,7 +50,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this DocIdSet implementation is cacheable. </summary>
+        /// This DocIdSet implementation is cacheable. </summary>
         public override bool IsCacheable
         {
             get
@@ -60,7 +60,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the underlying BitSet.
+        /// Returns the underlying <see cref="BitArray"/>.
         /// </summary>
         public virtual BitArray BitSet
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/DoubleBarrelLRUCache.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/DoubleBarrelLRUCache.cs b/src/Lucene.Net/Util/DoubleBarrelLRUCache.cs
index 0bd5d2e..316fd9e 100644
--- a/src/Lucene.Net/Util/DoubleBarrelLRUCache.cs
+++ b/src/Lucene.Net/Util/DoubleBarrelLRUCache.cs
@@ -25,19 +25,19 @@ namespace Lucene.Net.Util
     /// Simple concurrent LRU cache, using a "double barrel"
     /// approach where two ConcurrentHashMaps record entries.
     ///
-    /// <p>At any given time, one hash is primary and the other
-    /// is secondary.  <seealso cref="#get"/> first checks primary, and if
+    /// <para>At any given time, one hash is primary and the other
+    /// is secondary.  <see cref="Get(TKey)"/> first checks primary, and if
     /// that's a miss, checks secondary.  If secondary has the
     /// entry, it's promoted to primary (<b>NOTE</b>: the key is
     /// cloned at this point).  Once primary is full, the
-    /// secondary is cleared and the two are swapped.</p>
+    /// secondary is cleared and the two are swapped.</para>
     ///
-    /// <p>this is not as space efficient as other possible
+    /// <para>This is not as space efficient as other possible
     /// concurrent approaches (see LUCENE-2075): to achieve
     /// perfect LRU(N) it requires 2*N storage.  But, this
     /// approach is relatively simple and seems in practice to
     /// not grow unbounded in size when under hideously high
-    /// load.</p>
+    /// load.</para>
     ///
     /// @lucene.internal
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/FieldCacheSanityChecker.cs b/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
index f42d53a..ca6d281 100644
--- a/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
+++ b/src/Lucene.Net/Util/FieldCacheSanityChecker.cs
@@ -28,10 +28,11 @@ namespace Lucene.Net.Util
     using IndexReader = Lucene.Net.Index.IndexReader;
 
     /// <summary>
+    /// <para>
     /// Provides methods for sanity checking that entries in the FieldCache
     /// are not wasteful or inconsistent.
-    /// </p>
-    /// <p>
+    /// </para>
+    /// <para>
     /// Lucene 2.9 Introduced numerous enhancements into how the FieldCache
     /// is used by the low levels of Lucene searching (for Sorting and
     /// ValueSourceQueries) to improve both the speed for Sorting, as well
@@ -40,14 +41,15 @@ namespace Lucene.Net.Util
     /// MultiReader or DirectoryReader) down to the leaf level SegmentReaders.
     /// As a result, existing applications that directly access the FieldCache
     /// may find RAM usage increase significantly when upgrading to 2.9 or
-    /// Later.  this class provides an API for these applications (or their
+    /// Later.  This class provides an API for these applications (or their
     /// Unit tests) to check at run time if the FieldCache contains "insane"
     /// usages of the FieldCache.
-    /// </p>
-    /// @lucene.experimental </summary>
-    /// <seealso cref= FieldCache </seealso>
-    /// <seealso cref= FieldCacheSanityChecker.Insanity </seealso>
-    /// <seealso cref= FieldCacheSanityChecker.InsanityType </seealso>
+    /// </para>
+    /// @lucene.experimental 
+    /// </summary>
+    /// <seealso cref="IFieldCache"/>
+    /// <seealso cref="FieldCacheSanityChecker.Insanity"/>
+    /// <seealso cref="FieldCacheSanityChecker.InsanityType"/>
     public sealed class FieldCacheSanityChecker
     {
         private bool estimateRam;
@@ -57,7 +59,7 @@ namespace Lucene.Net.Util
             /* NOOP */
         }
 
-        /// <param name="estimateRam">If set, estimate size for all CacheEntry objects will be calculated.</param>
+        /// <param name="estimateRam">If set, estimate size for all <see cref="FieldCache.CacheEntry"/> objects will be calculated.</param>
         // LUCENENET specific - added this constructor overload so we wouldn't need a (ridiculous) SetRamUsageEstimator() method
         public FieldCacheSanityChecker(bool estimateRam)
         {
@@ -75,7 +77,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Quick and dirty convenience method </summary>
-        /// <seealso cref= #check </seealso>
+        /// <seealso cref="Check(FieldCache.CacheEntry[])"/>
         public static Insanity[] CheckSanity(IFieldCache cache)
         {
             return CheckSanity(cache.GetCacheEntries());
@@ -83,8 +85,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Quick and dirty convenience method that instantiates an instance with
-        /// "good defaults" and uses it to test the CacheEntrys </summary>
-        /// <seealso cref= #check </seealso>
+        /// "good defaults" and uses it to test the <see cref="FieldCache.CacheEntry"/>s </summary>
+        /// <seealso cref="Check(FieldCache.CacheEntry[])"/>
         public static Insanity[] CheckSanity(params FieldCache.CacheEntry[] cacheEntries)
         {
             FieldCacheSanityChecker sanityChecker = new FieldCacheSanityChecker(estimateRam: true);
@@ -93,10 +95,10 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Tests a CacheEntry[] for indication of "insane" cache usage.
-        /// <p>
-        /// <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored.
+        /// <para>
+        /// <b>NOTE:</b>FieldCache CreationPlaceholder objects are ignored.
         /// (:TODO: is this a bad idea? are we masking a real problem?)
-        /// </p>
+        /// </para>
         /// </summary>
         public Insanity[] Check(params FieldCache.CacheEntry[] cacheEntries)
         {
@@ -164,10 +166,10 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Internal helper method used by check that iterates over
-        /// valMismatchKeys and generates a Collection of Insanity
-        /// instances accordingly.  The MapOfSets are used to populate
-        /// the Insanity objects. </summary>
-        /// <seealso cref= InsanityType#VALUEMISMATCH </seealso>
+        /// <paramref name="valMismatchKeys"/> and generates a <see cref="ICollection{T}"/> of <see cref="Insanity"/>
+        /// instances accordingly.  The <see cref="MapOfSets{TKey, TValue}"/> are used to populate
+        /// the <see cref="Insanity"/> objects. </summary>
+        /// <seealso cref="InsanityType.VALUEMISMATCH"/>
         private ICollection<Insanity> CheckValueMismatch(MapOfSets<int, FieldCache.CacheEntry> valIdToItems, MapOfSets<ReaderField, int> readerFieldToValIds, ISet<ReaderField> valMismatchKeys)
         {
             List<Insanity> insanity = new List<Insanity>(valMismatchKeys.Count * 3);
@@ -200,11 +202,11 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Internal helper method used by check that iterates over
-        /// the keys of readerFieldToValIds and generates a Collection
-        /// of Insanity instances whenever two (or more) ReaderField instances are
+        /// the keys of <paramref name="readerFieldToValIds"/> and generates a <see cref="ICollection{T}"/>
+        /// of <see cref="Insanity"/> instances whenever two (or more) <see cref="ReaderField"/> instances are
         /// found that have an ancestry relationships.
         /// </summary>
-        /// <seealso cref= InsanityType#SUBREADER </seealso>
+        /// <seealso cref="InsanityType.SUBREADER"/>
         private ICollection<Insanity> CheckSubreaders(MapOfSets<int, FieldCache.CacheEntry> valIdToItems, MapOfSets<ReaderField, int> readerFieldToValIds)
         {
             List<Insanity> insanity = new List<Insanity>(23);
@@ -281,9 +283,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Checks if the seed is an IndexReader, and if so will walk
+        /// Checks if the <paramref name="seed"/> is an <see cref="IndexReader"/>, and if so will walk
         /// the hierarchy of subReaders building up a list of the objects
-        /// returned by {@code seed.getCoreCacheKey()}
+        /// returned by <c>seed.CoreCacheKey</c>
         /// </summary>
         private IList<object> GetAllDescendantReaderKeys(object seed)
         {
@@ -359,9 +361,9 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Simple container for a collection of related CacheEntry objects that
+        /// Simple container for a collection of related <see cref="FieldCache.CacheEntry"/> objects that
         /// in conjunction with each other represent some "insane" usage of the
-        /// FieldCache.
+        /// <see cref="IFieldCache"/>.
         /// </summary>
         public sealed class Insanity
         {
@@ -396,7 +398,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Description of hte insane behavior
+            /// Description of the insane behavior
             /// </summary>
             public string Msg
             {
@@ -407,7 +409,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// CacheEntry objects which suggest a problem
+            /// <see cref="FieldCache.CacheEntry"/> objects which suggest a problem
             /// </summary>
             [WritableArray]
             [SuppressMessage("Microsoft.Performance", "CA1819", Justification = "Lucene's design requires some writable array properties")]
@@ -417,8 +419,8 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Multi-Line representation of this Insanity object, starting with
-            /// the Type and Msg, followed by each CacheEntry.toString() on it's
+            /// Multi-Line representation of this <see cref="Insanity"/> object, starting with
+            /// the Type and Msg, followed by each CacheEntry.ToString() on it's
             /// own line prefaced by a tab character
             /// </summary>
             public override string ToString()
@@ -446,11 +448,11 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// An Enumeration of the different types of "insane" behavior that
-        /// may be detected in a FieldCache.
+        /// may be detected in a <see cref="IFieldCache"/>.
         /// </summary>
-        /// <seealso cref= InsanityType#SUBREADER </seealso>
-        /// <seealso cref= InsanityType#VALUEMISMATCH </seealso>
-        /// <seealso cref= InsanityType#EXPECTED </seealso>
+        /// <seealso cref="InsanityType.SUBREADER"/>
+        /// <seealso cref="InsanityType.VALUEMISMATCH"/>
+        /// <seealso cref="InsanityType.EXPECTED"/>
         public sealed class InsanityType
         {
             private readonly string label;
@@ -472,24 +474,24 @@ namespace Lucene.Net.Util
             public static readonly InsanityType SUBREADER = new InsanityType("SUBREADER");
 
             /// <summary>
-            /// <p>
+            /// <para>
             /// Indicates entries have the same reader+fieldname but
-            /// different cached values.  this can happen if different datatypes,
+            /// different cached values.  This can happen if different datatypes,
             /// or parsers are used -- and while it's not necessarily a bug
             /// it's typically an indication of a possible problem.
-            /// </p>
-            /// <p>
+            /// </para>
+            /// <para>
             /// <b>NOTE:</b> Only the reader, fieldname, and cached value are actually
             /// tested -- if two cache entries have different parsers or datatypes but
-            /// the cached values are the same Object (== not just equal()) this method
-            /// does not consider that a red flag.  this allows for subtle variations
+            /// the cached values are the same Object (== not just Equal()) this method
+            /// does not consider that a red flag.  This allows for subtle variations
             /// in the way a Parser is specified (null vs DEFAULT_INT64_PARSER, etc...)
-            /// </p>
+            /// </para>
             /// </summary>
             public static readonly InsanityType VALUEMISMATCH = new InsanityType("VALUEMISMATCH");
 
             /// <summary>
-            /// Indicates an expected bit of "insanity".  this may be useful for
+            /// Indicates an expected bit of "insanity".  This may be useful for
             /// clients that wish to preserve/log information about insane usage
             /// but indicate that it was expected.
             /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/FilterIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/FilterIterator.cs b/src/Lucene.Net/Util/FilterIterator.cs
index f4c8743..4ede14d 100644
--- a/src/Lucene.Net/Util/FilterIterator.cs
+++ b/src/Lucene.Net/Util/FilterIterator.cs
@@ -22,8 +22,8 @@ namespace Lucene.Net.Util
      */
 
     /// <summary>
-    /// An <seealso cref="Iterator"/> implementation that filters elements with a boolean predicate. </summary>
-    /// <seealso cref= #predicateFunction </seealso>
+    /// An <see cref="IEnumerator{T}"/> implementation that filters elements with a boolean predicate. </summary>
+    /// <seealso cref="PredicateFunction(T)"/>
     public abstract class FilterIterator<T> : IEnumerator<T>
     {
         private readonly IEnumerator<T> iter;
@@ -32,7 +32,7 @@ namespace Lucene.Net.Util
         private T current = default(T);
 
         /// <summary>
-        /// returns true, if this element should be returned by <seealso cref="#next()"/>. </summary>
+        /// Returns <c>true</c>, if this element should be set to <see cref="Current"/> by <see cref="SetNext()"/>. </summary>
         protected abstract bool PredicateFunction(T @object);
 
         public FilterIterator(IEnumerator<T> baseIterator)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/FixedBitSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/FixedBitSet.cs b/src/Lucene.Net/Util/FixedBitSet.cs
index 0f4aa34..ff9b91d 100644
--- a/src/Lucene.Net/Util/FixedBitSet.cs
+++ b/src/Lucene.Net/Util/FixedBitSet.cs
@@ -25,18 +25,18 @@ namespace Lucene.Net.Util
     using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
 
     /// <summary>
-    /// BitSet of fixed length (numBits), backed by accessible (<seealso cref="#getBits"/>)
-    /// long[], accessed with an int index, implementing <seealso cref="GetBits"/> and
-    /// <seealso cref="DocIdSet"/>. If you need to manage more than 2.1B bits, use
-    /// <seealso cref="Int64BitSet"/>.
-    ///
+    /// BitSet of fixed length (numBits), backed by accessible (<see cref="GetBits()"/>)
+    /// long[], accessed with an int index, implementing <see cref="GetBits()"/> and
+    /// <see cref="DocIdSet"/>. If you need to manage more than 2.1B bits, use
+    /// <see cref="Int64BitSet"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class FixedBitSet : DocIdSet, IBits
     {
         /// <summary>
-        /// A <seealso cref="DocIdSetIterator"/> which iterates over set bits in a
-        /// <seealso cref="FixedBitSet"/>.
+        /// A <see cref="DocIdSetIterator"/> which iterates over set bits in a
+        /// <see cref="FixedBitSet"/>.
         /// </summary>
         public sealed class FixedBitSetIterator : DocIdSetIterator
         {
@@ -45,7 +45,7 @@ namespace Lucene.Net.Util
             internal int doc = -1;
 
             /// <summary>
-            /// Creates an iterator over the given <seealso cref="FixedBitSet"/>. </summary>
+            /// Creates an iterator over the given <see cref="FixedBitSet"/>. </summary>
             public FixedBitSetIterator(FixedBitSet bits)
                 : this(bits.bits, bits.numBits, bits.numWords)
             {
@@ -126,14 +126,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// If the given <seealso cref="FixedBitSet"/> is large enough to hold {@code numBits},
-        /// returns the given bits, otherwise returns a new <seealso cref="FixedBitSet"/> which
+        /// If the given <see cref="FixedBitSet"/> is large enough to hold <paramref name="numBits"/>,
+        /// returns the given bits, otherwise returns a new <see cref="FixedBitSet"/> which
         /// can hold the requested number of bits.
         ///
-        /// <p>
-        /// <b>NOTE:</b> the returned bitset reuses the underlying {@code long[]} of
-        /// the given {@code bits} if possible. Also, calling <seealso cref="#length()"/> on the
-        /// returned bits may return a value greater than {@code numBits}.
+        /// <para/>
+        /// <b>NOTE:</b> the returned bitset reuses the underlying <see cref="T:long[]"/> of
+        /// the given <paramref name="bits"/> if possible. Also, calling <see cref="Length"/> on the
+        /// returned bits may return a value greater than <paramref name="numBits"/>.
         /// </summary>
         public static FixedBitSet EnsureCapacity(FixedBitSet bits, int numBits)
         {
@@ -154,7 +154,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns the number of 64 bit words it would take to hold numBits </summary>
+        /// Returns the number of 64 bit words it would take to hold <paramref name="numBits"/> </summary>
         public static int Bits2words(int numBits)
         {
             int numLong = (int)((uint)numBits >> 6);
@@ -244,7 +244,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this DocIdSet implementation is cacheable. </summary>
+        /// This DocIdSet implementation is cacheable. </summary>
         public override bool IsCacheable
         {
             get
@@ -263,8 +263,8 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns number of set bits.  NOTE: this visits every
-        ///  long in the backing bits array, and the result is not
-        ///  internally cached!
+        /// <see cref="long"/> in the backing bits array, and the result is not
+        /// internally cached!
         /// </summary>
         public int Cardinality()
         {
@@ -324,7 +324,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the index of the first set bit starting at the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public int NextSetBit(int index)
         {
@@ -352,7 +352,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns the index of the last set bit before or on the index specified.
-        ///  -1 is returned if there are no more set bits.
+        /// -1 is returned if there are no more set bits.
         /// </summary>
         public int PrevSetBit(int index)
         {
@@ -380,7 +380,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Does in-place OR of the bits provided by the
-        ///  iterator.
+        /// iterator.
         /// </summary>
         public void Or(DocIdSetIterator iter)
         {
@@ -455,7 +455,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Does in-place AND of the bits provided by the
-        ///  iterator.
+        /// iterator.
         /// </summary>
         public void And(DocIdSetIterator iter)
         {
@@ -496,7 +496,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns true if the sets have any elements in common </summary>
+        /// Returns true if the sets have any elements in common </summary>
         public bool Intersects(FixedBitSet other)
         {
             int pos = Math.Min(numWords, other.numWords);
@@ -533,7 +533,7 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Does in-place AND NOT of the bits provided by the
-        ///  iterator.
+        /// iterator.
         /// </summary>
         public void AndNot(DocIdSetIterator iter)
         {
@@ -587,8 +587,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Flips a range of bits
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to flip </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to flip </param>
         public void Flip(int startIndex, int endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits);
@@ -633,8 +633,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Sets a range of bits
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to set </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to set </param>
         public void Set(int startIndex, int endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits);
@@ -665,8 +665,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Clears a range of bits.
         /// </summary>
-        /// <param name="startIndex"> lower index </param>
-        /// <param name="endIndex"> one-past the last bit to clear </param>
+        /// <param name="startIndex"> Lower index </param>
+        /// <param name="endIndex"> One-past the last bit to clear </param>
         public void Clear(int startIndex, int endIndex)
         {
             Debug.Assert(startIndex >= 0 && startIndex < numBits, "startIndex=" + startIndex + ", numBits=" + numBits);
@@ -707,7 +707,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// returns true if both sets have the same bits set </summary>
+        /// Returns <c>true</c> if both sets have the same bits set </summary>
         public override bool Equals(object o)
         {
             if (this == o)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/GrowableByteArrayDataOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/GrowableByteArrayDataOutput.cs b/src/Lucene.Net/Util/GrowableByteArrayDataOutput.cs
index 208a14c..49c6b66 100644
--- a/src/Lucene.Net/Util/GrowableByteArrayDataOutput.cs
+++ b/src/Lucene.Net/Util/GrowableByteArrayDataOutput.cs
@@ -23,7 +23,8 @@ namespace Lucene.Net.Util
     using DataOutput = Lucene.Net.Store.DataOutput;
 
     /// <summary>
-    /// A <seealso cref="DataOutput"/> that can be used to build a byte[].
+    /// A <see cref="DataOutput"/> that can be used to build a <see cref="T:byte[]"/>.
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class GrowableByteArrayDataOutput : DataOutput
@@ -44,7 +45,7 @@ namespace Lucene.Net.Util
         public int Length { get; set; }
 
         /// <summary>
-        /// Create a <seealso cref="GrowableByteArrayDataOutput"/> with the given initial capacity. </summary>
+        /// Create a <see cref="GrowableByteArrayDataOutput"/> with the given initial capacity. </summary>
         public GrowableByteArrayDataOutput(int cp)
         {
             this.bytes = new byte[ArrayUtil.Oversize(cp, 1)];

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d7cb70c4/src/Lucene.Net/Util/IAttributeReflector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/IAttributeReflector.cs b/src/Lucene.Net/Util/IAttributeReflector.cs
deleted file mode 100644
index 07019d3..0000000
--- a/src/Lucene.Net/Util/IAttributeReflector.cs
+++ /dev/null
@@ -1,29 +0,0 @@
-using System;
-
-namespace Lucene.Net.Util
-{
-    /*
-     * Licensed to the Apache Software Foundation (ASF) under one or more
-     * contributor license agreements.  See the NOTICE file distributed with
-     * this work for additional information regarding copyright ownership.
-     * The ASF licenses this file to You under the Apache License, Version 2.0
-     * (the "License"); you may not use this file except in compliance with
-     * the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    public interface IAttributeReflector
-    {
-        void Reflect<T>(string key, object value)
-            where T : IAttribute;
-
-        void Reflect(Type type, string key, object value);
-    }
-}
\ No newline at end of file


[44/48] lucenenet git commit: Lucene.Net: Fixed misc XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net: Fixed misc XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/5a0e4b68
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/5a0e4b68
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/5a0e4b68

Branch: refs/heads/master
Commit: 5a0e4b68a7822c86b6e0434c4a1875c98a98e034
Parents: 5dc5193
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 15:43:33 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:42 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net/Index/DirectoryReader.cs             | 13 ++++++-------
 src/Lucene.Net/Index/DocValuesFieldUpdates.cs       |  2 +-
 src/Lucene.Net/Index/DocumentsWriterStallControl.cs |  4 ++--
 src/Lucene.Net/Index/IndexDeletionPolicy.cs         |  3 +--
 src/Lucene.Net/Index/IndexWriter.cs                 |  2 +-
 src/Lucene.Net/Index/IndexWriterConfig.cs           |  2 +-
 src/Lucene.Net/Store/ByteBufferIndexInput.cs        |  2 +-
 src/Lucene.Net/Store/DataOutput.cs                  |  4 ++--
 src/Lucene.Net/Store/Lock.cs                        |  2 +-
 src/Lucene.Net/Util/Fst/Builder.cs                  |  2 +-
 src/Lucene.Net/Util/Fst/FST.cs                      | 10 +++++-----
 src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs       |  4 ++--
 12 files changed, 24 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/DirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/DirectoryReader.cs b/src/Lucene.Net/Index/DirectoryReader.cs
index d3cf4de..7056ce0 100644
--- a/src/Lucene.Net/Index/DirectoryReader.cs
+++ b/src/Lucene.Net/Index/DirectoryReader.cs
@@ -38,8 +38,7 @@ namespace Lucene.Net.Index
     /// as documents are added to and deleted from an index.  Clients should thus not
     /// rely on a given document having the same number between sessions.
     ///
-    /// <para/>
-    /// <p><b>NOTE</b>:
+    /// <para/><b>NOTE</b>:
     /// <see cref="IndexReader"/> instances are completely thread
     /// safe, meaning multiple threads can call any of its methods,
     /// concurrently.  If your application requires external
@@ -127,8 +126,8 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>
-        /// Expert: returns an <see cref=""/>IndexReader reading the index in the given
-        ///  <seealso cref="IndexCommit"/> and <paramref name="termInfosIndexDivisor"/>. </summary>
+        /// Expert: returns an <see cref="IndexReader"/> reading the index in the given
+        /// <seealso cref="Index.IndexCommit"/> and <paramref name="termInfosIndexDivisor"/>. </summary>
         /// <param name="commit"> the commit point to open </param>
         /// <param name="termInfosIndexDivisor"> Subsamples which indexed
         /// terms are loaded into RAM. this has the same effect as setting
@@ -462,14 +461,14 @@ namespace Lucene.Net.Index
         /// Check whether any new changes have occurred to the
         /// index since this reader was opened.
         ///
-        /// <para>If this reader was created by calling <see cref="Open"/>,
+        /// <para>If this reader was created by calling an overload of <see cref="Open(Directory)"/>,
         /// then this method checks if any further commits
         /// (see <see cref="IndexWriter.Commit()"/>) have occurred in the
         /// directory.</para>
         ///
         /// <para>If instead this reader is a near real-time reader
         /// (ie, obtained by a call to 
-        /// <see cref="DirectoryReader.Open(IndexWriter, bool)"/>, or by calling <see cref="OpenIfChanged"/>
+        /// <see cref="DirectoryReader.Open(IndexWriter, bool)"/>, or by calling an overload of <see cref="OpenIfChanged(DirectoryReader)"/>
         /// on a near real-time reader), then this method checks if
         /// either a new commit has occurred, or any new
         /// uncommitted changes have taken place via the writer.
@@ -477,7 +476,7 @@ namespace Lucene.Net.Index
         /// merging, this method will still return <c>false</c>.</para>
         ///
         /// <para>In any event, if this returns <c>false</c>, you should call
-        /// <see cref="OpenIfChanged"/> to get a new reader that sees the
+        /// an overload of <see cref="OpenIfChanged(DirectoryReader)"/> to get a new reader that sees the
         /// changes.</para>
         /// </summary>
         /// <exception cref="IOException"> if there is a low-level IO error </exception>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/DocValuesFieldUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/DocValuesFieldUpdates.cs b/src/Lucene.Net/Index/DocValuesFieldUpdates.cs
index 81e5965..a16158f 100644
--- a/src/Lucene.Net/Index/DocValuesFieldUpdates.cs
+++ b/src/Lucene.Net/Index/DocValuesFieldUpdates.cs
@@ -63,7 +63,7 @@ namespace Lucene.Net.Index
 
             /// <summary>
             /// Reset the iterator's state. Should be called before <see cref="NextDoc()"/>
-            /// and <seealso cref="#value()"/>.
+            /// and <see cref="Value"/>.
             /// </summary>
             public abstract void Reset();
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/DocumentsWriterStallControl.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/DocumentsWriterStallControl.cs b/src/Lucene.Net/Index/DocumentsWriterStallControl.cs
index 7dd1581..a0c844c 100644
--- a/src/Lucene.Net/Index/DocumentsWriterStallControl.cs
+++ b/src/Lucene.Net/Index/DocumentsWriterStallControl.cs
@@ -34,9 +34,9 @@ namespace Lucene.Net.Index
     /// <para/>
     /// To prevent OOM Errors and ensure <see cref="IndexWriter"/>'s stability this class blocks
     /// incoming threads from indexing once 2 x number of available
-    /// <see cref="ThreadState"/>s in <see cref="DocumentsWriterPerThreadPool"/> is exceeded.
+    /// <see cref="DocumentsWriterPerThreadPool.ThreadState"/> is exceeded.
     /// Once flushing catches up and the number of flushing DWPT is equal or lower
-    /// than the number of active <see cref="ThreadState"/>s threads are released and can
+    /// than the number of active <see cref="DocumentsWriterPerThreadPool.ThreadState"/>s threads are released and can
     /// continue indexing.
     /// </summary>
 #if FEATURE_SERIALIZABLE

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/IndexDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/IndexDeletionPolicy.cs b/src/Lucene.Net/Index/IndexDeletionPolicy.cs
index 2c00bb7..d5daffe 100644
--- a/src/Lucene.Net/Index/IndexDeletionPolicy.cs
+++ b/src/Lucene.Net/Index/IndexDeletionPolicy.cs
@@ -70,8 +70,7 @@ namespace Lucene.Net.Index
         /// <para>The writer locates all index commits present in the
         /// index directory and calls this method.  The policy may
         /// choose to delete some of the commit points, doing so by
-        /// calling method <seealso cref="IndexCommit#delete delete()"/>
-        /// of <seealso cref="IndexCommit"/>.</para>
+        /// calling method <see cref="IndexCommit.Delete()"/>.</para>
         ///
         /// <para><u>Note:</u> the last CommitPoint is the most recent one,
         /// i.e. the "front index state". Be careful not to delete it,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/IndexWriter.cs b/src/Lucene.Net/Index/IndexWriter.cs
index 618024c..0c81ccb 100644
--- a/src/Lucene.Net/Index/IndexWriter.cs
+++ b/src/Lucene.Net/Index/IndexWriter.cs
@@ -151,7 +151,7 @@ namespace Lucene.Net.Index
     /// <para><b>NOTE</b>: If you call
     /// <see cref="Thread.Interrupt()"/> on a thread that's within
     /// <see cref="IndexWriter"/>, <see cref="IndexWriter"/> will try to catch this (eg, if
-    /// it's in a Wait() or <see cref="Thread.Sleep()"/>), and will then throw
+    /// it's in a Wait() or <see cref="Thread.Sleep(int)"/>), and will then throw
     /// the unchecked exception <see cref="ThreadInterruptedException"/>
     /// and <b>clear</b> the interrupt status on the thread.</para>
     /// </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Index/IndexWriterConfig.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Index/IndexWriterConfig.cs b/src/Lucene.Net/Index/IndexWriterConfig.cs
index ffc21c2..b2ed2b8 100644
--- a/src/Lucene.Net/Index/IndexWriterConfig.cs
+++ b/src/Lucene.Net/Index/IndexWriterConfig.cs
@@ -155,7 +155,7 @@ namespace Lucene.Net.Index
         /// <summary>
         /// Gets or sets the <see cref="IndexWriter"/> this config is attached to.
         /// </summary>
-        /// <exception cref="Util.SetOnce{T}.AlreadySetException">
+        /// <exception cref="Util.AlreadySetException">
         ///           if this config is already attached to a writer. </exception>
         internal IndexWriterConfig SetIndexWriter(IndexWriter writer)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Store/ByteBufferIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/ByteBufferIndexInput.cs b/src/Lucene.Net/Store/ByteBufferIndexInput.cs
index 629b5d0..9827473 100644
--- a/src/Lucene.Net/Store/ByteBufferIndexInput.cs
+++ b/src/Lucene.Net/Store/ByteBufferIndexInput.cs
@@ -312,7 +312,7 @@ namespace Lucene.Net.Store
 
         /// <summary>
         /// Returns a sliced view from a set of already-existing buffers:
-        /// the last buffer's <see cref="Support.Buffer.Limit"/> will be correct, but
+        /// the last buffer's <see cref="Support.IO.Buffer.Limit"/> will be correct, but
         /// you must deal with <paramref name="offset"/> separately (the first buffer will not be adjusted)
         /// </summary>
         private ByteBuffer[] BuildSlice(ByteBuffer[] buffers, long offset, long length)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Store/DataOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/DataOutput.cs b/src/Lucene.Net/Store/DataOutput.cs
index 7fe27e2..940439f 100644
--- a/src/Lucene.Net/Store/DataOutput.cs
+++ b/src/Lucene.Net/Store/DataOutput.cs
@@ -43,7 +43,7 @@ namespace Lucene.Net.Store
         /// accessed as sequences of bytes. All other data types are defined
         /// as sequences of bytes, so file formats are byte-order independent.
         /// </summary>
-        /// <seealso cref="IndexInput.ReadByte()"/>
+        /// <seealso cref="DataInput.ReadByte()"/>
         public abstract void WriteByte(byte b);
 
         /// <summary>
@@ -215,7 +215,7 @@ namespace Lucene.Net.Store
         /// <para/>
         /// NOTE: this was writeLong() in Lucene
         /// </summary>
-        /// <seealso cref="DataInput.ReadLong()"/>
+        /// <seealso cref="DataInput.ReadInt64()"/>
         public virtual void WriteInt64(long i)
         {
             WriteInt32((int)(i >> 32));

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Store/Lock.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Store/Lock.cs b/src/Lucene.Net/Store/Lock.cs
index f09aee9..9f37acc 100644
--- a/src/Lucene.Net/Store/Lock.cs
+++ b/src/Lucene.Net/Store/Lock.cs
@@ -51,7 +51,7 @@ namespace Lucene.Net.Store
         public const long LOCK_OBTAIN_WAIT_FOREVER = -1;
 
         /// <summary>
-        /// Creates a new instance with the ability to specify the <see cref="With.DoBody()"/> method
+        /// Creates a new instance with the ability to specify the <see cref="With{T}.DoBody()"/> method
         /// through the <paramref name="doBody"/> argument
         /// <para/>
         /// Simple example:

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Util/Fst/Builder.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Fst/Builder.cs b/src/Lucene.Net/Util/Fst/Builder.cs
index 418f724..c7eb080 100644
--- a/src/Lucene.Net/Util/Fst/Builder.cs
+++ b/src/Lucene.Net/Util/Fst/Builder.cs
@@ -99,7 +99,7 @@ namespace Lucene.Net.Util.Fst
 
         /// <summary>
         /// Instantiates an FST/FSA builder without any pruning. A shortcut
-        /// to <see cref="Builder.Builder(FST.INPUT_TYPE, int, int, bool, bool, int, Outputs{T}, FreezeTail{T}, bool, float, bool, int)"/>
+        /// to <see cref="Builder{T}.Builder(FST.INPUT_TYPE, int, int, bool, bool, int, Outputs{T}, FreezeTail{T}, bool, float, bool, int)"/>
         /// with pruning options turned off.
         /// </summary>
         public Builder(FST.INPUT_TYPE inputType, Outputs<T> outputs)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Util/Fst/FST.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Fst/FST.cs b/src/Lucene.Net/Util/Fst/FST.cs
index dfefd63..e85af58 100644
--- a/src/Lucene.Net/Util/Fst/FST.cs
+++ b/src/Lucene.Net/Util/Fst/FST.cs
@@ -1069,9 +1069,9 @@ namespace Lucene.Net.Util.Fst
         }
 
         /// <summary>
-        /// Checks if <paramref name="arc"/>'s target state is in expanded (or vector) format.
+        /// Checks if arc's target state is in expanded (or vector) format.
         /// </summary>
-        /// <returns> Returns <c>true</c> if <paramref name="arc"/> points to a state in an
+        /// <returns> Returns <c>true</c> if arc points to a state in an
         /// expanded array format. </returns>
         internal bool IsExpandedTarget(FST.Arc<T> follow, FST.BytesReader @in)
         {
@@ -2105,17 +2105,17 @@ namespace Lucene.Net.Util.Fst
         internal const sbyte ARCS_AS_FIXED_ARRAY = BIT_ARC_HAS_FINAL_OUTPUT;
 
         /// <summary>
-        /// <see cref="Builder{T}.UnCompiledNode{S}"/>
+        /// <see cref="Builder.UnCompiledNode{S}"/>
         /// </summary>
         public const int FIXED_ARRAY_SHALLOW_DISTANCE = 3;
 
         /// <summary>
-        /// <see cref="Builder{T}.UnCompiledNode{S}"/>
+        /// <see cref="Builder.UnCompiledNode{S}"/>
         /// </summary>
         public const int FIXED_ARRAY_NUM_ARCS_SHALLOW = 5;
 
         /// <summary>
-        /// <see cref="Builder{T}.UnCompiledNode{S}"/>
+        /// <see cref="Builder.UnCompiledNode{S}"/>
         /// </summary>
         public const int FIXED_ARRAY_NUM_ARCS_DEEP = 10;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/5a0e4b68/src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs b/src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs
index 8897054..bf343ed 100644
--- a/src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs
+++ b/src/Lucene.Net/Util/Fst/PositiveIntOutputs.cs
@@ -25,10 +25,10 @@ namespace Lucene.Net.Util.Fst
 
     /// <summary>
     /// An FST <see cref="Outputs{T}"/> implementation where each output
-    /// is a non-negative <see cref="long?"/> value.
+    /// is a non-negative <see cref="T:long?"/> value.
     /// <para/>
     /// NOTE: This was PositiveIntOutputs in Lucene
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class PositiveInt32Outputs : Outputs<long?>


[23/48] lucenenet git commit: Lucene.Net.ICU: Fixed XML documentation warnings

Posted by ni...@apache.org.
Lucene.Net.ICU: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/6267463f
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/6267463f
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/6267463f

Branch: refs/heads/master
Commit: 6267463f36151a3821cf4e7dc943a8977421e632
Parents: 33f31f5
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 04:24:39 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 04:24:39 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs   | 4 ++--
 .../PostingsHighlight/PostingsHighlighter.cs                 | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6267463f/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
index 5b84fde..4371a7b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Th/ThaiAnalyzer.cs
@@ -104,10 +104,10 @@ namespace Lucene.Net.Analysis.Th
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="ThaiWordFilter"/>, and
         ///         <see cref="StopFilter"/> </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/6267463f/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
index 7562228..85d6925 100644
--- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
+++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs
@@ -46,10 +46,10 @@ namespace Lucene.Net.Search.PostingsHighlight
     /// <para/>
     /// You can customize the behavior by subclassing this highlighter, some important hooks:
     /// <list type="bullet">
-    ///     <item><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</description></item>
-    ///     <item><see cref="GetScorer(string)"/>: Customize how passages are ranked.</description></item>
-    ///     <item><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</description></item>
-    ///     <item><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</description></item>
+    ///     <item><description><see cref="GetBreakIterator(string)"/>: Customize how the text is divided into passages.</description></item>
+    ///     <item><description><see cref="GetScorer(string)"/>: Customize how passages are ranked.</description></item>
+    ///     <item><description><see cref="GetFormatter(string)"/>: Customize how snippets are formatted.</description></item>
+    ///     <item><description><see cref="GetIndexAnalyzer(string)"/>: Enable highlighting of MultiTermQuerys such as <see cref="WildcardQuery"/>.</description></item>
     /// </list>
     /// <para/>
     /// <b>WARNING</b>: The code is very new and probably still has some exciting bugs!


[41/48] lucenenet git commit: Lucene.Net.Codecs.Lucene40: Fixed XML documentation comment warnings

Posted by ni...@apache.org.
Lucene.Net.Codecs.Lucene40: Fixed XML documentation comment warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/27cdd048
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/27cdd048
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/27cdd048

Branch: refs/heads/master
Commit: 27cdd0480ae5d8f4c83ae73557e77fa8d589792c
Parents: 3221b63
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Mon Jun 5 14:10:17 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:41 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |   3 +-
 src/Lucene.Net/Codecs/Lucene40/BitVector.cs     |  67 +++---
 src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs |  14 +-
 .../Codecs/Lucene40/Lucene40DocValuesFormat.cs  | 158 ++++++-------
 .../Codecs/Lucene40/Lucene40DocValuesReader.cs  |  17 +-
 .../Codecs/Lucene40/Lucene40FieldInfosFormat.cs | 126 +++++-----
 .../Codecs/Lucene40/Lucene40FieldInfosReader.cs |   8 +-
 .../Codecs/Lucene40/Lucene40LiveDocsFormat.cs   |  52 ++---
 .../Codecs/Lucene40/Lucene40NormsFormat.cs      |  18 +-
 .../Lucene40/Lucene40PostingsBaseFormat.cs      |   6 +-
 .../Codecs/Lucene40/Lucene40PostingsFormat.cs   | 232 +++++++++----------
 .../Codecs/Lucene40/Lucene40PostingsReader.cs   |  11 +-
 .../Lucene40/Lucene40SegmentInfoFormat.cs       |  75 +++---
 .../Lucene40/Lucene40SegmentInfoReader.cs       |   8 +-
 .../Lucene40/Lucene40SegmentInfoWriter.cs       |   7 +-
 .../Codecs/Lucene40/Lucene40SkipListReader.cs   |  11 +-
 .../Lucene40/Lucene40StoredFieldsFormat.cs      |  96 ++++----
 .../Lucene40/Lucene40StoredFieldsReader.cs      |  30 +--
 .../Lucene40/Lucene40StoredFieldsWriter.cs      |  21 +-
 .../Lucene40/Lucene40TermVectorsFormat.cs       | 139 ++++++-----
 .../Lucene40/Lucene40TermVectorsReader.cs       |  23 +-
 .../Lucene40/Lucene40TermVectorsWriter.cs       |   8 +-
 22 files changed, 559 insertions(+), 571 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5f422f8..c8a36fb 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -53,8 +53,7 @@ helpers to help with that, see for examples see our [Java style methods to avoid
 1. Lucene.Net.Core (project)
    1. Codecs.Compressing (namespace)
    2. Codecs.Lucene3x (namespace)
-   3. Codecs.Lucene40 (namespace)
-   4. Util.Packed (namespace)
+   3. Util.Packed (namespace)
 2. Lucene.Net.Codecs (project)
    1. Appending (namespace)
    2. BlockTerms (namespace)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/BitVector.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/BitVector.cs b/src/Lucene.Net/Codecs/Lucene40/BitVector.cs
index 57a7e14..eb1605a 100644
--- a/src/Lucene.Net/Codecs/Lucene40/BitVector.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/BitVector.cs
@@ -32,16 +32,16 @@ namespace Lucene.Net.Codecs.Lucene40
     using IMutableBits = Lucene.Net.Util.IMutableBits;
 
     /// <summary>
-    /// Optimized implementation of a vector of bits.  this is more-or-less like
-    ///  java.util.BitSet, but also includes the following:
-    ///  <ul>
-    ///  <li>a count() method, which efficiently computes the number of one bits;</li>
-    ///  <li>optimized read from and write to disk;</li>
-    ///  <li>inlinable get() method;</li>
-    ///  <li>store and load, as bit set or d-gaps, depending on sparseness;</li>
-    ///  </ul>
-    ///
-    ///  @lucene.internal
+    /// Optimized implementation of a vector of bits.  This is more-or-less like
+    /// <c>java.util.BitSet</c>, but also includes the following:
+    /// <list type="bullet">
+    ///     <item><description>a count() method, which efficiently computes the number of one bits;</description></item>
+    ///     <item><description>optimized read from and write to disk;</description></item>
+    ///     <item><description>inlinable get() method;</description></item>
+    ///     <item><description>store and load, as bit set or d-gaps, depending on sparseness;</description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.internal
     /// </summary>
     // pkg-private: if this thing is generally useful then it can go back in .util,
     // but the serialization must be here underneath the codec.
@@ -53,7 +53,7 @@ namespace Lucene.Net.Codecs.Lucene40
         private int version;
 
         /// <summary>
-        /// Constructs a vector capable of holding <code>n</code> bits. </summary>
+        /// Constructs a vector capable of holding <paramref name="n"/> bits. </summary>
         public BitVector(int n)
         {
             size = n;
@@ -88,7 +88,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Sets the value of <code>bit</code> to one. </summary>
+        /// Sets the value of <paramref name="bit"/> to one. </summary>
         public void Set(int bit)
         {
             if (bit >= size)
@@ -100,8 +100,8 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Sets the value of <code>bit</code> to true, and
-        ///  returns true if bit was already set
+        /// Sets the value of <paramref name="bit"/> to <c>true</c>, and
+        /// returns <c>true</c> if bit was already set.
         /// </summary>
         public bool GetAndSet(int bit)
         {
@@ -129,7 +129,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Sets the value of <code>bit</code> to zero. </summary>
+        /// Sets the value of <paramref name="bit"/> to zero. </summary>
         public void Clear(int bit)
         {
             if (bit >= size)
@@ -166,8 +166,8 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Returns <code>true</code> if <code>bit</code> is one and
-        ///  <code>false</code> if it is zero.
+        /// Returns <c>true</c> if <paramref name="bit"/> is one and
+        /// <c>false</c> if it is zero.
         /// </summary>
         public bool Get(int bit)
         {
@@ -186,8 +186,9 @@ namespace Lucene.Net.Codecs.Lucene40
         //}
 
         /// <summary>
-        /// Returns the number of bits in this vector.  this is also one greater than
+        /// Returns the number of bits in this vector.  This is also one greater than
         /// the number of the largest valid bit number.
+        /// <para/>
         /// This is the equivalent of either size() or length() in Lucene.
         /// </summary>
         public int Length
@@ -196,9 +197,9 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Returns the total number of one bits in this vector.  this is efficiently
-        ///  computed and cached, so that, if the vector is not changed, no
-        ///  recomputation is done for repeated calls.
+        /// Returns the total number of one bits in this vector.  This is efficiently
+        /// computed and cached, so that, if the vector is not changed, no
+        /// recomputation is done for repeated calls.
         /// </summary>
         public int Count()
         {
@@ -257,9 +258,9 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Writes this vector to the file <code>name</code> in Directory
-        ///  <code>d</code>, in a format that can be read by the constructor {@link
-        ///  #BitVector(Directory, String, IOContext)}.
+        /// Writes this vector to the file <paramref name="name"/> in Directory
+        /// <paramref name="d"/>, in a format that can be read by the constructor 
+        /// <see cref="BitVector(Directory, string, IOContext)"/>.
         /// </summary>
         public void Write(Directory d, string name, IOContext context)
         {
@@ -289,7 +290,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Invert all bits </summary>
+        /// Invert all bits. </summary>
         public void InvertAll()
         {
             if (count != -1)
@@ -322,7 +323,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Set all bits </summary>
+        /// Set all bits. </summary>
         public void SetAll()
         {
             Arrays.Fill(bits, (byte)0xff);
@@ -331,7 +332,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Write as a bit set </summary>
+        /// Write as a bit set. </summary>
         private void WriteBits(IndexOutput output)
         {
             output.WriteInt32(Length); // write size
@@ -340,7 +341,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Write as a d-gaps list </summary>
+        /// Write as a d-gaps list. </summary>
         private void WriteClearedDgaps(IndexOutput output)
         {
             output.WriteInt32(-1); // mark using d-gaps
@@ -412,8 +413,8 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Constructs a bit vector from the file <code>name</code> in Directory
-        ///  <code>d</code>, as written by the <seealso cref="#write"/> method.
+        /// Constructs a bit vector from the file <paramref name="name"/> in Directory
+        /// <paramref name="d"/>, as written by the <see cref="Write(Directory, string, IOContext)"/> method.
         /// </summary>
         public BitVector(Directory d, string name, IOContext context)
         {
@@ -486,7 +487,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Read as a bit set </summary>
+        /// Read as a bit set. </summary>
         private void ReadBits(IndexInput input)
         {
             count = input.ReadInt32(); // read count
@@ -495,7 +496,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// read as a d-gaps list </summary>
+        /// Read as a d-gaps list. </summary>
         private void ReadSetDgaps(IndexInput input)
         {
             size = input.ReadInt32(); // (re)read size
@@ -513,7 +514,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// read as a d-gaps cleared bits list </summary>
+        /// Read as a d-gaps cleared bits list. </summary>
         private void ReadClearedDgaps(IndexInput input)
         {
             size = input.ReadInt32(); // (re)read size

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs
index d0cc900..fd5ce7b 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40Codec.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Implements the Lucene 4.0 index format, with configurable per-field postings formats.
-    /// <p>
+    /// <para/>
     /// If you want to reuse functionality of this codec in another codec, extend
-    /// <seealso cref="FilterCodec"/>.
+    /// <see cref="FilterCodec"/>.
+    /// <para/>
+    /// See <see cref="Lucene.Net.Codecs.Lucene40"/> package documentation for file format details.
     /// </summary>
-    /// <seealso cref= Lucene.Net.Codecs.Lucene40 package documentation for file format details. </seealso>
-    /// @deprecated Only for reading old 4.0 segments
     // NOTE: if we make largish changes in a minor release, easier to just make Lucene42Codec or whatever
     // if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
     // (it writes a minor version, etc).
@@ -113,9 +113,9 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the postings format that should be used for writing
-        ///  new segments of <code>field</code>.
-        ///
-        ///  The default implementation always returns "Lucene40"
+        /// new segments of <paramref name="field"/>.
+        /// <para/>
+        /// The default implementation always returns "Lucene40".
         /// </summary>
         public virtual PostingsFormat GetPostingsFormatForField(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs
index 5c658b4..93227e5 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs
@@ -25,98 +25,98 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 DocValues format.
-    /// <p>
+    /// <para/>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.dv.cfs</tt>: <seealso cref="CompoundFileDirectory compound container"/></li>
-    ///   <li><tt>.dv.cfe</tt>: <seealso cref="CompoundFileDirectory compound entries"/></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description><c>.dv.cfs</c>: compound container (<see cref="Store.CompoundFileDirectory"/>)</description></item>
+    ///   <item><description><c>.dv.cfe</c>: compound entries (<see cref="Store.CompoundFileDirectory"/>)</description></item>
+    /// </list>
     /// Entries within the compound file:
-    /// <ul>
-    ///   <li><tt>&lt;segment&gt;_&lt;fieldNumber&gt;.dat</tt>: data values</li>
-    ///   <li><tt>&lt;segment&gt;_&lt;fieldNumber&gt;.idx</tt>: index into the .dat for DEREF types</li>
-    /// </ul>
-    /// <p>
-    /// There are several many types of {@code DocValues} with different encodings.
-    /// From the perspective of filenames, all types store their values in <tt>.dat</tt>
-    /// entries within the compound file. In the case of dereferenced/sorted types, the <tt>.dat</tt>
-    /// actually contains only the unique values, and an additional <tt>.idx</tt> file contains
+    /// <list type="bullet">
+    ///   <item><description><c>&lt;segment&gt;_&lt;fieldNumber&gt;.dat</c>: data values</description></item>
+    ///   <item><description><c>&lt;segment&gt;_&lt;fieldNumber&gt;.idx</c>: index into the .dat for DEREF types</description></item>
+    /// </list>
+    /// <para>
+    /// There are several many types of <see cref="Index.DocValues"/> with different encodings.
+    /// From the perspective of filenames, all types store their values in <c>.dat</c>
+    /// entries within the compound file. In the case of dereferenced/sorted types, the <c>.dat</c>
+    /// actually contains only the unique values, and an additional <c>.idx</c> file contains
     /// pointers to these unique values.
-    /// </p>
+    /// </para>
     /// Formats:
-    /// <ul>
-    ///    <li>{@code VAR_INTS} .dat --&gt; Header, PackedType, MinValue,
-    ///        DefaultValue, PackedStream</li>
-    ///    <li>{@code FIXED_INTS_8} .dat --&gt; Header, ValueSize,
-    ///        <seealso cref="DataOutput#writeByte Byte"/><sup>maxdoc</sup></li>
-    ///    <li>{@code FIXED_INTS_16} .dat --&gt; Header, ValueSize,
-    ///        <seealso cref="DataOutput#writeShort Short"/><sup>maxdoc</sup></li>
-    ///    <li>{@code FIXED_INTS_32} .dat --&gt; Header, ValueSize,
-    ///        <seealso cref="DataOutput#writeInt Int32"/><sup>maxdoc</sup></li>
-    ///    <li>{@code FIXED_INTS_64} .dat --&gt; Header, ValueSize,
-    ///        <seealso cref="DataOutput#writeLong Int64"/><sup>maxdoc</sup></li>
-    ///    <li>{@code FLOAT_32} .dat --&gt; Header, ValueSize, Float32<sup>maxdoc</sup></li>
-    ///    <li>{@code FLOAT_64} .dat --&gt; Header, ValueSize, Float64<sup>maxdoc</sup></li>
-    ///    <li>{@code BYTES_FIXED_STRAIGHT} .dat --&gt; Header, ValueSize,
-    ///        (<seealso cref="DataOutput#writeByte Byte"/> * ValueSize)<sup>maxdoc</sup></li>
-    ///    <li>{@code BYTES_VAR_STRAIGHT} .idx --&gt; Header, TotalBytes, Addresses</li>
-    ///    <li>{@code BYTES_VAR_STRAIGHT} .dat --&gt; Header,
-    ///          (<seealso cref="DataOutput#writeByte Byte"/> * <i>variable ValueSize</i>)<sup>maxdoc</sup></li>
-    ///    <li>{@code BYTES_FIXED_DEREF} .idx --&gt; Header, NumValues, Addresses</li>
-    ///    <li>{@code BYTES_FIXED_DEREF} .dat --&gt; Header, ValueSize,
-    ///        (<seealso cref="DataOutput#writeByte Byte"/> * ValueSize)<sup>NumValues</sup></li>
-    ///    <li>{@code BYTES_VAR_DEREF} .idx --&gt; Header, TotalVarBytes, Addresses</li>
-    ///    <li>{@code BYTES_VAR_DEREF} .dat --&gt; Header,
-    ///        (LengthPrefix + <seealso cref="DataOutput#writeByte Byte"/> * <i>variable ValueSize</i>)<sup>NumValues</sup></li>
-    ///    <li>{@code BYTES_FIXED_SORTED} .idx --&gt; Header, NumValues, Ordinals</li>
-    ///    <li>{@code BYTES_FIXED_SORTED} .dat --&gt; Header, ValueSize,
-    ///        (<seealso cref="DataOutput#writeByte Byte"/> * ValueSize)<sup>NumValues</sup></li>
-    ///    <li>{@code BYTES_VAR_SORTED} .idx --&gt; Header, TotalVarBytes, Addresses, Ordinals</li>
-    ///    <li>{@code BYTES_VAR_SORTED} .dat --&gt; Header,
-    ///        (<seealso cref="DataOutput#writeByte Byte"/> * <i>variable ValueSize</i>)<sup>NumValues</sup></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///    <item><description><see cref="LegacyDocValuesType.VAR_INTS"/> .dat --&gt; Header, PackedType, MinValue,
+    ///        DefaultValue, PackedStream</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FIXED_INTS_8"/> .dat --&gt; Header, ValueSize,
+    ///        Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FIXED_INTS_16"/> .dat --&gt; Header, ValueSize,
+    ///        Short (<see cref="Store.DataOutput.WriteInt16(short)"/>) <sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FIXED_INTS_32"/> .dat --&gt; Header, ValueSize,
+    ///        Int32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) <sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FIXED_INTS_64"/> .dat --&gt; Header, ValueSize,
+    ///        Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) <sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FLOAT_32"/> .dat --&gt; Header, ValueSize, Float32<sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.FLOAT_64"/> .dat --&gt; Header, ValueSize, Float64<sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_FIXED_STRAIGHT"/> .dat --&gt; Header, ValueSize,
+    ///        (Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * ValueSize)<sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_STRAIGHT"/> .idx --&gt; Header, TotalBytes, Addresses</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_STRAIGHT"/> .dat --&gt; Header,
+    ///          (Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * <i>variable ValueSize</i>)<sup>maxdoc</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_FIXED_DEREF"/> .idx --&gt; Header, NumValues, Addresses</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_FIXED_DEREF"/> .dat --&gt; Header, ValueSize,
+    ///        (Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * ValueSize)<sup>NumValues</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_DEREF"/> .idx --&gt; Header, TotalVarBytes, Addresses</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_DEREF"/> .dat --&gt; Header,
+    ///        (LengthPrefix + Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * <i>variable ValueSize</i>)<sup>NumValues</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_FIXED_SORTED"/> .idx --&gt; Header, NumValues, Ordinals</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_FIXED_SORTED"/> .dat --&gt; Header, ValueSize,
+    ///        (Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * ValueSize)<sup>NumValues</sup></description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_SORTED"/> .idx --&gt; Header, TotalVarBytes, Addresses, Ordinals</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_SORTED"/> .dat --&gt; Header,
+    ///        (Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) * <i>variable ValueSize</i>)<sup>NumValues</sup></description></item>
+    /// </list>
     /// Data Types:
-    /// <ul>
-    ///    <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///    <li>PackedType --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///    <li>MaxAddress, MinValue, DefaultValue --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///    <li>PackedStream, Addresses, Ordinals --&gt; <seealso cref="PackedInts"/></li>
-    ///    <li>ValueSize, NumValues --&gt; <seealso cref="DataOutput#writeInt Int32"/></li>
-    ///    <li>Float32 --&gt; 32-bit float encoded with <seealso cref="Float#floatToRawIntBits(float)"/>
-    ///                       then written as <seealso cref="DataOutput#writeInt Int32"/></li>
-    ///    <li>Float64 --&gt; 64-bit float encoded with <seealso cref="Double#doubleToRawLongBits(double)"/>
-    ///                       then written as <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///    <li>TotalBytes --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///    <li>TotalVarBytes --&gt; <seealso cref="DataOutput#writeLong Int64"/></li>
-    ///    <li>LengthPrefix --&gt; Length of the data value as <seealso cref="DataOutput#writeVInt VInt"/> (maximum
-    ///                       of 2 bytes)</li>
-    /// </ul>
+    /// <list type="bullet">
+    ///    <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///    <item><description>PackedType --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>)</description></item>
+    ///    <item><description>MaxAddress, MinValue, DefaultValue --&gt; Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///    <item><description>PackedStream, Addresses, Ordinals --&gt; <see cref="Util.Packed.PackedInt32s"/></description></item>
+    ///    <item><description>ValueSize, NumValues --&gt; Int32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///    <item><description>Float32 --&gt; 32-bit float encoded with <see cref="Support.Number.SingleToRawInt32Bits(float)"/>
+    ///                       then written as Int32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///    <item><description>Float64 --&gt; 64-bit float encoded with <see cref="Support.Number.DoubleToRawInt64Bits(double)"/>
+    ///                       then written as Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///    <item><description>TotalBytes --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    ///    <item><description>TotalVarBytes --&gt; Int64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///    <item><description>LengthPrefix --&gt; Length of the data value as VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) (maximum
+    ///                       of 2 bytes)</description></item>
+    /// </list>
     /// Notes:
-    /// <ul>
-    ///    <li>PackedType is a 0 when compressed, 1 when the stream is written as 64-bit integers.</li>
-    ///    <li>Addresses stores pointers to the actual byte location (indexed by docid). In the VAR_STRAIGHT
+    /// <list type="bullet">
+    ///    <item><description>PackedType is a 0 when compressed, 1 when the stream is written as 64-bit integers.</description></item>
+    ///    <item><description>Addresses stores pointers to the actual byte location (indexed by docid). In the VAR_STRAIGHT
     ///        case, each entry can have a different length, so to determine the length, docid+1 is
     ///        retrieved. A sentinel address is written at the end for the VAR_STRAIGHT case, so the Addresses
     ///        stream contains maxdoc+1 indices. For the deduplicated VAR_DEREF case, each length
-    ///        is encoded as a prefix to the data itself as a <seealso cref="DataOutput#writeVInt VInt"/>
-    ///        (maximum of 2 bytes).</li>
-    ///    <li>Ordinals stores the term ID in sorted order (indexed by docid). In the FIXED_SORTED case,
+    ///        is encoded as a prefix to the data itself as a VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>)
+    ///        (maximum of 2 bytes).</description></item>
+    ///    <item><description>Ordinals stores the term ID in sorted order (indexed by docid). In the FIXED_SORTED case,
     ///        the address into the .dat can be computed from the ordinal as
-    ///        <code>Header+ValueSize+(ordinal*ValueSize)</code> because the byte length is fixed.
+    ///        <c>Header+ValueSize+(ordinal*ValueSize)</c> because the byte length is fixed.
     ///        In the VAR_SORTED case, there is double indirection (docid -> ordinal -> address), but
     ///        an additional sentinel ordinal+address is always written (so there are NumValues+1 ordinals). To
-    ///        determine the length, ord+1's address is looked up as well.</li>
-    ///    <li>{@code BYTES_VAR_STRAIGHT BYTES_VAR_STRAIGHT} in contrast to other straight
-    ///        variants uses a <tt>.idx</tt> file to improve lookup perfromance. In contrast to
-    ///        {@code BYTES_VAR_DEREF BYTES_VAR_DEREF} it doesn't apply deduplication of the document values.
-    ///    </li>
-    /// </ul>
-    /// <p>
+    ///        determine the length, ord+1's address is looked up as well.</description></item>
+    ///    <item><description><see cref="LegacyDocValuesType.BYTES_VAR_STRAIGHT"/> in contrast to other straight
+    ///        variants uses a <c>.idx</c> file to improve lookup perfromance. In contrast to
+    ///        <see cref="LegacyDocValuesType.BYTES_VAR_DEREF"/> it doesn't apply deduplication of the document values.
+    ///    </description></item>
+    /// </list>
+    /// <para/>
     /// Limitations:
-    /// <ul>
-    ///   <li> Binary doc values can be at most <seealso cref="#MAX_BINARY_FIELD_LENGTH"/> in length.
-    /// </ul> </summary>
-    /// @deprecated Only for reading old 4.0 and 4.1 segments
+    /// <list type="bullet">
+    ///   <item><description> Binary doc values can be at most <see cref="MAX_BINARY_FIELD_LENGTH"/> in length.</description></item>
+    /// </list> 
+    /// </summary>
     [Obsolete("Only for reading old 4.0 and 4.1 segments")]
     [DocValuesFormatName("Lucene40")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class Lucene40DocValuesFormat : DocValuesFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesReader.cs
index 54d3511..bca9a0c 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesReader.cs
@@ -42,9 +42,10 @@ namespace Lucene.Net.Codecs.Lucene40
     using SortedSetDocValues = Lucene.Net.Index.SortedSetDocValues;
 
     /// <summary>
-    /// Reads the 4.0 format of norms/docvalues
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading old 4.0 and 4.1 segments
+    /// Reads the 4.0 format of norms/docvalues.
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("Only for reading old 4.0 and 4.1 segments")]
     internal sealed class Lucene40DocValuesReader : DocValuesProducer
     {
@@ -139,7 +140,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// NOTE: This was loadVarIntsField() in Lucene
+        /// NOTE: This was loadVarIntsField() in Lucene.
         /// </summary>
         private NumericDocValues LoadVarInt32sField(FieldInfo field, IndexInput input)
         {
@@ -243,7 +244,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// NOTE: This was loadShortField() in Lucene
+        /// NOTE: This was loadShortField() in Lucene.
         /// </summary>
         private NumericDocValues LoadInt16Field(FieldInfo field, IndexInput input)
         {
@@ -279,7 +280,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// NOTE: This was loadIntField() in Lucene
+        /// NOTE: This was loadIntField() in Lucene.
         /// </summary>
         private NumericDocValues LoadInt32Field(FieldInfo field, IndexInput input)
         {
@@ -315,7 +316,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// NOTE: This was loadLongField() in Lucene
+        /// NOTE: This was loadLongField() in Lucene.
         /// </summary>
         private NumericDocValues LoadInt64Field(FieldInfo field, IndexInput input)
         {
@@ -351,7 +352,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// NOTE: This was loadFloatField() in Lucene
+        /// NOTE: This was loadFloatField() in Lucene.
         /// </summary>
         private NumericDocValues LoadSingleField(FieldInfo field, IndexInput input)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosFormat.cs
index a38dc52..49b5008 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosFormat.cs
@@ -19,76 +19,74 @@ namespace Lucene.Net.Codecs.Lucene40
      * limitations under the License.
      */
 
-    // javadoc
-
     /// <summary>
     /// Lucene 4.0 Field Infos format.
-    /// <p>
-    /// <p>Field names are stored in the field info file, with suffix <tt>.fnm</tt>.</p>
-    /// <p>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
-    /// FieldBits,DocValuesBits,Attributes&gt; <sup>FieldsCount</sup></p>
-    /// <p>Data types:
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#checkHeader CodecHeader"/></li>
-    ///   <li>FieldsCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>FieldName --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>FieldBits, DocValuesBits --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///   <li>FieldNumber --&gt; <seealso cref="DataOutput#writeInt VInt"/></li>
-    ///   <li>Attributes --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    /// </ul>
-    /// </p>
+    /// <para/>
+    /// <para>Field names are stored in the field info file, with suffix <tt>.fnm</tt>.</para>
+    /// <para>FieldInfos (.fnm) --&gt; Header,FieldsCount, &lt;FieldName,FieldNumber,
+    /// FieldBits,DocValuesBits,Attributes&gt; <sup>FieldsCount</sup></para>
+    /// <para>Data types:
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>FieldsCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>FieldName --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>) </description></item>
+    ///   <item><description>FieldBits, DocValuesBits --&gt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    ///   <item><description>FieldNumber --&gt; VInt (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///   <item><description>Attributes --&gt; IDictionary&lt;String,String&gt; (<see cref="Store.DataOutput.WriteStringStringMap(System.Collections.Generic.IDictionary{string, string})"/>) </description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <ul>
-    ///   <li>FieldsCount: the number of fields in this file.</li>
-    ///   <li>FieldName: name of the field as a UTF-8 String.</li>
-    ///   <li>FieldNumber: the field's number. Note that unlike previous versions of
+    /// <list type="bullet">
+    ///   <item><description>FieldsCount: the number of fields in this file.</description></item>
+    ///   <item><description>FieldName: name of the field as a UTF-8 String.</description></item>
+    ///   <item><description>FieldNumber: the field's number. Note that unlike previous versions of
     ///       Lucene, the fields are not numbered implicitly by their order in the
-    ///       file, instead explicitly.</li>
-    ///   <li>FieldBits: a byte containing field options.
-    ///       <ul>
-    ///         <li>The low-order bit is one for indexed fields, and zero for non-indexed
-    ///             fields.</li>
-    ///         <li>The second lowest-order bit is one for fields that have term vectors
-    ///             stored, and zero for fields without term vectors.</li>
-    ///         <li>If the third lowest order-bit is set (0x4), offsets are stored into
-    ///             the postings list in addition to positions.</li>
-    ///         <li>Fourth bit is unused.</li>
-    ///         <li>If the fifth lowest-order bit is set (0x10), norms are omitted for the
-    ///             indexed field.</li>
-    ///         <li>If the sixth lowest-order bit is set (0x20), payloads are stored for the
-    ///             indexed field.</li>
-    ///         <li>If the seventh lowest-order bit is set (0x40), term frequencies and
-    ///             positions omitted for the indexed field.</li>
-    ///         <li>If the eighth lowest-order bit is set (0x80), positions are omitted for the
-    ///             indexed field.</li>
-    ///       </ul>
-    ///    </li>
-    ///    <li>DocValuesBits: a byte containing per-document value types. The type
+    ///       file, instead explicitly.</description></item>
+    ///   <item><description>FieldBits: a byte containing field options.
+    ///       <list type="bullet">
+    ///         <item><description>The low-order bit is one for indexed fields, and zero for non-indexed
+    ///             fields.</description></item>
+    ///         <item><description>The second lowest-order bit is one for fields that have term vectors
+    ///             stored, and zero for fields without term vectors.</description></item>
+    ///         <item><description>If the third lowest order-bit is set (0x4), offsets are stored into
+    ///             the postings list in addition to positions.</description></item>
+    ///         <item><description>Fourth bit is unused.</description></item>
+    ///         <item><description>If the fifth lowest-order bit is set (0x10), norms are omitted for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the sixth lowest-order bit is set (0x20), payloads are stored for the
+    ///             indexed field.</description></item>
+    ///         <item><description>If the seventh lowest-order bit is set (0x40), term frequencies and
+    ///             positions omitted for the indexed field.</description></item>
+    ///         <item><description>If the eighth lowest-order bit is set (0x80), positions are omitted for the
+    ///             indexed field.</description></item>
+    ///       </list>
+    ///    </description></item>
+    ///    <item><description>DocValuesBits: a byte containing per-document value types. The type
     ///        recorded as two four-bit integers, with the high-order bits representing
-    ///        <code>norms</code> options, and the low-order bits representing
-    ///        {@code DocValues} options. Each four-bit integer can be decoded as such:
-    ///        <ul>
-    ///          <li>0: no DocValues for this field.</li>
-    ///          <li>1: variable-width signed integers. ({@code Type#VAR_INTS VAR_INTS})</li>
-    ///          <li>2: 32-bit floating point values. ({@code Type#FLOAT_32 FLOAT_32})</li>
-    ///          <li>3: 64-bit floating point values. ({@code Type#FLOAT_64 FLOAT_64})</li>
-    ///          <li>4: fixed-length byte array values. ({@code Type#BYTES_FIXED_STRAIGHT BYTES_FIXED_STRAIGHT})</li>
-    ///          <li>5: fixed-length dereferenced byte array values. ({@code Type#BYTES_FIXED_DEREF BYTES_FIXED_DEREF})</li>
-    ///          <li>6: variable-length byte array values. ({@code Type#BYTES_VAR_STRAIGHT BYTES_VAR_STRAIGHT})</li>
-    ///          <li>7: variable-length dereferenced byte array values. ({@code Type#BYTES_VAR_DEREF BYTES_VAR_DEREF})</li>
-    ///          <li>8: 16-bit signed integers. ({@code Type#FIXED_INTS_16 FIXED_INTS_16})</li>
-    ///          <li>9: 32-bit signed integers. ({@code Type#FIXED_INTS_32 FIXED_INTS_32})</li>
-    ///          <li>10: 64-bit signed integers. ({@code Type#FIXED_INTS_64 FIXED_INTS_64})</li>
-    ///          <li>11: 8-bit signed integers. ({@code Type#FIXED_INTS_8 FIXED_INTS_8})</li>
-    ///          <li>12: fixed-length sorted byte array values. ({@code Type#BYTES_FIXED_SORTED BYTES_FIXED_SORTED})</li>
-    ///          <li>13: variable-length sorted byte array values. ({@code Type#BYTES_VAR_SORTED BYTES_VAR_SORTED})</li>
-    ///        </ul>
-    ///    </li>
-    ///    <li>Attributes: a key-value map of codec-private attributes.</li>
-    /// </ul>
+    ///        <c>norms</c> options, and the low-order bits representing
+    ///        <see cref="Index.DocValues"/> options. Each four-bit integer can be decoded as such:
+    ///        <list type="bullet">
+    ///          <item><description>0: no DocValues for this field.</description></item>
+    ///          <item><description>1: variable-width signed integers. (<see cref="LegacyDocValuesType.VAR_INTS"/>)</description></item>
+    ///          <item><description>2: 32-bit floating point values. (<see cref="LegacyDocValuesType.FLOAT_32"/>)</description></item>
+    ///          <item><description>3: 64-bit floating point values. (<see cref="LegacyDocValuesType.FLOAT_64"/>)</description></item>
+    ///          <item><description>4: fixed-length byte array values. (<see cref="LegacyDocValuesType.BYTES_FIXED_STRAIGHT"/>)</description></item>
+    ///          <item><description>5: fixed-length dereferenced byte array values. (<see cref="LegacyDocValuesType.BYTES_FIXED_DEREF"/>)</description></item>
+    ///          <item><description>6: variable-length byte array values. (<see cref="LegacyDocValuesType.BYTES_VAR_STRAIGHT"/>)</description></item>
+    ///          <item><description>7: variable-length dereferenced byte array values. (<see cref="LegacyDocValuesType.BYTES_VAR_DEREF"/>)</description></item>
+    ///          <item><description>8: 16-bit signed integers. (<see cref="LegacyDocValuesType.FIXED_INTS_16"/>)</description></item>
+    ///          <item><description>9: 32-bit signed integers. (<see cref="LegacyDocValuesType.FIXED_INTS_32"/>)</description></item>
+    ///          <item><description>10: 64-bit signed integers. (<see cref="LegacyDocValuesType.FIXED_INTS_64"/>)</description></item>
+    ///          <item><description>11: 8-bit signed integers. (<see cref="LegacyDocValuesType.FIXED_INTS_8"/>)</description></item>
+    ///          <item><description>12: fixed-length sorted byte array values. (<see cref="LegacyDocValuesType.BYTES_FIXED_SORTED"/>)</description></item>
+    ///          <item><description>13: variable-length sorted byte array values. (<see cref="LegacyDocValuesType.BYTES_VAR_SORTED"/>)</description></item>
+    ///        </list>
+    ///    </description></item>
+    ///    <item><description>Attributes: a key-value map of codec-private attributes.</description></item>
+    /// </list>
     ///
-    /// @lucene.experimental </summary>
-    /// @deprecated Only for reading old 4.0 and 4.1 segments
+    /// @lucene.experimental 
+    /// </summary>
     [Obsolete("Only for reading old 4.0 and 4.1 segments")]
     public class Lucene40FieldInfosFormat : FieldInfosFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosReader.cs
index 4805a4b..3f14e3f 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40FieldInfosReader.cs
@@ -34,10 +34,10 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 FieldInfos reader.
-    ///
-    /// @lucene.experimental </summary>
-    /// <seealso cref= Lucene40FieldInfosFormat </seealso>
-    /// @deprecated Only for reading old 4.0 and 4.1 segments
+    /// <para/>
+    /// @lucene.experimental 
+    /// </summary>
+    /// <seealso cref="Lucene40FieldInfosFormat"/>
     [Obsolete("Only for reading old 4.0 and 4.1 segments")]
     internal class Lucene40FieldInfosReader : FieldInfosReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40LiveDocsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40LiveDocsFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40LiveDocsFormat.cs
index cd81003..f26406e 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40LiveDocsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40LiveDocsFormat.cs
@@ -31,35 +31,35 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Live Documents Format.
-    /// <p>
-    /// <p>The .del file is optional, and only exists when a segment contains
-    /// deletions.</p>
-    /// <p>Although per-segment, this file is maintained exterior to compound segment
-    /// files.</p>
-    /// <p>Deletions (.del) --&gt; Format,Header,ByteCount,BitCount, Bits | DGaps (depending
-    /// on Format)</p>
-    /// <ul>
-    ///   <li>Format,ByteSize,BitCount --&gt; <seealso cref="DataOutput#writeInt Uint32"/></li>
-    ///   <li>Bits --&gt; &lt;<seealso cref="DataOutput#writeByte Byte"/>&gt; <sup>ByteCount</sup></li>
-    ///   <li>DGaps --&gt; &lt;DGap,NonOnesByte&gt; <sup>NonzeroBytesCount</sup></li>
-    ///   <li>DGap --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>NonOnesByte --&gt; <seealso cref="DataOutput#writeByte Byte"/></li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    /// </ul>
-    /// <p>Format is 1: indicates cleared DGaps.</p>
-    /// <p>ByteCount indicates the number of bytes in Bits. It is typically
-    /// (SegSize/8)+1.</p>
-    /// <p>BitCount indicates the number of bits that are currently set in Bits.</p>
-    /// <p>Bits contains one bit for each document indexed. When the bit corresponding
+    /// <para/>
+    /// <para>The .del file is optional, and only exists when a segment contains
+    /// deletions.</para>
+    /// <para>Although per-segment, this file is maintained exterior to compound segment
+    /// files.</para>
+    /// <para>Deletions (.del) --&gt; Format,Header,ByteCount,BitCount, Bits | DGaps (depending
+    /// on Format)</para>
+    /// <list type="bullet">
+    ///   <item><description>Format,ByteSize,BitCount --&gt; Uint32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///   <item><description>Bits --&gt; &lt; Byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) &gt; <sup>ByteCount</sup></description></item>
+    ///   <item><description>DGaps --&gt; &lt;DGap,NonOnesByte&gt; <sup>NonzeroBytesCount</sup></description></item>
+    ///   <item><description>DGap --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>NonOnesByte --&gt;  Byte(<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    /// </list>
+    /// <para>Format is 1: indicates cleared DGaps.</para>
+    /// <para>ByteCount indicates the number of bytes in Bits. It is typically
+    /// (SegSize/8)+1.</para>
+    /// <para>BitCount indicates the number of bits that are currently set in Bits.</para>
+    /// <para>Bits contains one bit for each document indexed. When the bit corresponding
     /// to a document number is cleared, that document is marked as deleted. Bit ordering
     /// is from least to most significant. Thus, if Bits contains two bytes, 0x00 and
-    /// 0x02, then document 9 is marked as alive (not deleted).</p>
-    /// <p>DGaps represents sparse bit-vectors more efficiently than Bits. It is made
+    /// 0x02, then document 9 is marked as alive (not deleted).</para>
+    /// <para>DGaps represents sparse bit-vectors more efficiently than Bits. It is made
     /// of DGaps on indexes of nonOnes bytes in Bits, and the nonOnes bytes themselves.
-    /// The number of nonOnes bytes in Bits (NonOnesBytesCount) is not stored.</p>
-    /// <p>For example, if there are 8000 bits and only bits 10,12,32 are cleared, DGaps
-    /// would be used:</p>
-    /// <p>(VInt) 1 , (byte) 20 , (VInt) 3 , (Byte) 1</p>
+    /// The number of nonOnes bytes in Bits (NonOnesBytesCount) is not stored.</para>
+    /// <para>For example, if there are 8000 bits and only bits 10,12,32 are cleared, DGaps
+    /// would be used:</para>
+    /// <para>(VInt) 1 , (byte) 20 , (VInt) 3 , (Byte) 1</para>
     /// </summary>
     public class Lucene40LiveDocsFormat : LiveDocsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40NormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40NormsFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40NormsFormat.cs
index 02d97ed..424d63b 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40NormsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40NormsFormat.cs
@@ -25,18 +25,18 @@ namespace Lucene.Net.Codecs.Lucene40
 
     /// <summary>
     /// Lucene 4.0 Norms Format.
-    /// <p>
+    /// <para/>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.nrm.cfs</tt>: <seealso cref="CompoundFileDirectory compound container"/></li>
-    ///   <li><tt>.nrm.cfe</tt>: <seealso cref="CompoundFileDirectory compound entries"/></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///   <item><description><c>.nrm.cfs</c>: compound container (<see cref="Store.CompoundFileDirectory"/>) </description></item>
+    ///   <item><description><c>.nrm.cfe</c>: compound entries (<see cref="Store.CompoundFileDirectory"/>) </description></item>
+    /// </list>
     /// Norms are implemented as DocValues, so other than file extension, norms are
-    /// written exactly the same way as <seealso cref="Lucene40DocValuesFormat DocValues"/>.
+    /// written exactly the same way as <see cref="Lucene40DocValuesFormat"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene40DocValuesFormat
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.0 and 4.1 segments
+    /// <seealso cref="Lucene40DocValuesFormat"/>
     [Obsolete("Only for reading old 4.0 and 4.1 segments")]
     public class Lucene40NormsFormat : NormsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsBaseFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsBaseFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsBaseFormat.cs
index ecd85b9..8ecde5b 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsBaseFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsBaseFormat.cs
@@ -23,11 +23,9 @@ namespace Lucene.Net.Codecs.Lucene40
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
     /// <summary>
-    /// Provides a <seealso cref="PostingsReaderBase"/> and {@link
-    /// PostingsWriterBase}.
+    /// Provides a <see cref="Codecs.PostingsReaderBase"/> and
+    /// <see cref="Codecs.PostingsWriterBase"/>.
     /// </summary>
-    /// @deprecated Only for reading old 4.0 segments
-
     // TODO: should these also be named / looked up via SPI?
     [Obsolete("Only for reading old 4.0 segments")]
     public sealed class Lucene40PostingsBaseFormat : PostingsBaseFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsFormat.cs
index 440003e..bc0829a 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsFormat.cs
@@ -20,193 +20,187 @@ namespace Lucene.Net.Codecs.Lucene40
      * limitations under the License.
      */
 
-    // javadocs
-    // javadocs
-    // javadocs
     using SegmentReadState = Lucene.Net.Index.SegmentReadState;
     using SegmentWriteState = Lucene.Net.Index.SegmentWriteState;
 
-    // javadocs
-    // javadocs
-
     /// <summary>
     /// Lucene 4.0 Postings format.
-    /// <p>
+    /// <para>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></li>
-    ///   <li><tt>.tip</tt>: <a href="#Termindex">Term Index</a></li>
-    ///   <li><tt>.frq</tt>: <a href="#Frequencies">Frequencies</a></li>
-    ///   <li><tt>.prx</tt>: <a href="#Positions">Positions</a></li>
-    /// </ul>
-    /// </p>
-    /// <p>
+    /// <list type="bullet">
+    ///   <item><description><tt>.tim</tt>: <a href="#Termdictionary">Term Dictionary</a></description></item>
+    ///   <item><description><tt>.tip</tt>: <a href="#Termindex">Term Index</a></description></item>
+    ///   <item><description><tt>.frq</tt>: <a href="#Frequencies">Frequencies</a></description></item>
+    ///   <item><description><tt>.prx</tt>: <a href="#Positions">Positions</a></description></item>
+    /// </list>
+    /// </para>
+    /// <para/>
     /// <a name="Termdictionary" id="Termdictionary"></a>
     /// <h3>Term Dictionary</h3>
     ///
-    /// <p>The .tim file contains the list of terms in each
+    /// <para>The .tim file contains the list of terms in each
     /// field along with per-term statistics (such as docfreq)
     /// and pointers to the frequencies, positions and
     /// skip data in the .frq and .prx files.
-    /// See <seealso cref="BlockTreeTermsWriter"/> for more details on the format.
-    /// </p>
+    /// See <see cref="BlockTreeTermsWriter"/> for more details on the format.
+    /// </para>
     ///
-    /// <p>NOTE: The term dictionary can plug into different postings implementations:
+    /// <para>NOTE: The term dictionary can plug into different postings implementations:
     /// the postings writer/reader are actually responsible for encoding
-    /// and decoding the Postings Metadata and Term Metadata sections described here:</p>
-    /// <ul>
-    ///    <li>Postings Metadata --&gt; Header, SkipInterval, MaxSkipLevels, SkipMinimum</li>
-    ///    <li>Term Metadata --&gt; FreqDelta, SkipDelta?, ProxDelta?
-    ///    <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///    <li>SkipInterval,MaxSkipLevels,SkipMinimum --&gt; <seealso cref="DataOutput#writeInt Uint32"/></li>
-    ///    <li>SkipDelta,FreqDelta,ProxDelta --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    /// </ul>
-    /// <p>Notes:</p>
-    /// <ul>
-    ///    <li>Header is a <seealso cref="CodecUtil#writeHeader CodecHeader"/> storing the version information
-    ///        for the postings.</li>
-    ///    <li>SkipInterval is the fraction of TermDocs stored in skip tables. It is used to accelerate
-    ///        <seealso cref="DocsEnum#advance(int)"/>. Larger values result in smaller indexes, greater
+    /// and decoding the Postings Metadata and Term Metadata sections described here:</para>
+    /// <list type="bullet">
+    ///    <item><description>Postings Metadata --&gt; Header, SkipInterval, MaxSkipLevels, SkipMinimum</description></item>
+    ///    <item><description>Term Metadata --&gt; FreqDelta, SkipDelta?, ProxDelta?</description></item>
+    ///    <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///    <item><description>SkipInterval,MaxSkipLevels,SkipMinimum --&gt; Uint32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///    <item><description>SkipDelta,FreqDelta,ProxDelta --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    /// </list>
+    /// <para>Notes:</para>
+    /// <list type="bullet">
+    ///    <item><description>Header is a CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>)  storing the version information
+    ///        for the postings.</description></item>
+    ///    <item><description>SkipInterval is the fraction of TermDocs stored in skip tables. It is used to accelerate
+    ///        <see cref="Search.DocIdSetIterator.Advance(int)"/>. Larger values result in smaller indexes, greater
     ///        acceleration, but fewer accelerable cases, while smaller values result in bigger indexes,
     ///        less acceleration (in case of a small value for MaxSkipLevels) and more accelerable cases.
-    ///        </li>
-    ///    <li>MaxSkipLevels is the max. number of skip levels stored for each term in the .frq file. A
+    ///        </description></item>
+    ///    <item><description>MaxSkipLevels is the max. number of skip levels stored for each term in the .frq file. A
     ///        low value results in smaller indexes but less acceleration, a larger value results in
     ///        slightly larger indexes but greater acceleration. See format of .frq file for more
-    ///        information about skip levels.</li>
-    ///    <li>SkipMinimum is the minimum document frequency a term must have in order to write any
-    ///        skip data at all.</li>
-    ///    <li>FreqDelta determines the position of this term's TermFreqs within the .frq
+    ///        information about skip levels.</description></item>
+    ///    <item><description>SkipMinimum is the minimum document frequency a term must have in order to write any
+    ///        skip data at all.</description></item>
+    ///    <item><description>FreqDelta determines the position of this term's TermFreqs within the .frq
     ///        file. In particular, it is the difference between the position of this term's
     ///        data in that file and the position of the previous term's data (or zero, for
-    ///        the first term in the block).</li>
-    ///    <li>ProxDelta determines the position of this term's TermPositions within the
+    ///        the first term in the block).</description></item>
+    ///    <item><description>ProxDelta determines the position of this term's TermPositions within the
     ///        .prx file. In particular, it is the difference between the position of this
     ///        term's data in that file and the position of the previous term's data (or zero,
     ///        for the first term in the block. For fields that omit position data, this will
-    ///        be 0 since prox information is not stored.</li>
-    ///    <li>SkipDelta determines the position of this term's SkipData within the .frq
+    ///        be 0 since prox information is not stored.</description></item>
+    ///    <item><description>SkipDelta determines the position of this term's SkipData within the .frq
     ///        file. In particular, it is the number of bytes after TermFreqs that the
     ///        SkipData starts. In other words, it is the length of the TermFreq data.
-    ///        SkipDelta is only stored if DocFreq is not smaller than SkipMinimum.</li>
-    /// </ul>
+    ///        SkipDelta is only stored if DocFreq is not smaller than SkipMinimum.</description></item>
+    /// </list>
     /// <a name="Termindex" id="Termindex"></a>
     /// <h3>Term Index</h3>
-    /// <p>The .tip file contains an index into the term dictionary, so that it can be
-    /// accessed randomly.  See <seealso cref="BlockTreeTermsWriter"/> for more details on the format.</p>
+    /// <para>The .tip file contains an index into the term dictionary, so that it can be
+    /// accessed randomly.  See <see cref="BlockTreeTermsWriter"/> for more details on the format.</para>
     /// <a name="Frequencies" id="Frequencies"></a>
     /// <h3>Frequencies</h3>
-    /// <p>The .frq file contains the lists of documents which contain each term, along
+    /// <para>The .frq file contains the lists of documents which contain each term, along
     /// with the frequency of the term in that document (except when frequencies are
-    /// omitted: <seealso cref="IndexOptions#DOCS_ONLY"/>).</p>
-    /// <ul>
-    ///   <li>FreqFile (.frq) --&gt; Header, &lt;TermFreqs, SkipData?&gt; <sup>TermCount</sup></li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>TermFreqs --&gt; &lt;TermFreq&gt; <sup>DocFreq</sup></li>
-    ///   <li>TermFreq --&gt; DocDelta[, Freq?]</li>
-    ///   <li>SkipData --&gt; &lt;&lt;SkipLevelLength, SkipLevel&gt;
-    ///       <sup>NumSkipLevels-1</sup>, SkipLevel&gt; &lt;SkipDatum&gt;</li>
-    ///   <li>SkipLevel --&gt; &lt;SkipDatum&gt; <sup>DocFreq/(SkipInterval^(Level +
-    ///       1))</sup></li>
-    ///   <li>SkipDatum --&gt;
-    ///       DocSkip,PayloadLength?,OffsetLength?,FreqSkip,ProxSkip,SkipChildLevelPointer?</li>
-    ///   <li>DocDelta,Freq,DocSkip,PayloadLength,OffsetLength,FreqSkip,ProxSkip --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>SkipChildLevelPointer --&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    /// </ul>
-    /// <p>TermFreqs are ordered by term (the term is implicit, from the term dictionary).</p>
-    /// <p>TermFreq entries are ordered by increasing document number.</p>
-    /// <p>DocDelta: if frequencies are indexed, this determines both the document
+    /// omitted: <see cref="Index.IndexOptions.DOCS_ONLY"/>).</para>
+    /// <list type="bullet">
+    ///   <item><description>FreqFile (.frq) --&gt; Header, &lt;TermFreqs, SkipData?&gt; <sup>TermCount</sup></description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>TermFreqs --&gt; &lt;TermFreq&gt; <sup>DocFreq</sup></description></item>
+    ///   <item><description>TermFreq --&gt; DocDelta[, Freq?]</description></item>
+    ///   <item><description>SkipData --&gt; &lt;&lt;SkipLevelLength, SkipLevel&gt;
+    ///       <sup>NumSkipLevels-1</sup>, SkipLevel&gt; &lt;SkipDatum&gt;</description></item>
+    ///   <item><description>SkipLevel --&gt; &lt;SkipDatum&gt; <sup>DocFreq/(SkipInterval^(Level +
+    ///       1))</sup></description></item>
+    ///   <item><description>SkipDatum --&gt;
+    ///       DocSkip,PayloadLength?,OffsetLength?,FreqSkip,ProxSkip,SkipChildLevelPointer?</description></item>
+    ///   <item><description>DocDelta,Freq,DocSkip,PayloadLength,OffsetLength,FreqSkip,ProxSkip --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>SkipChildLevelPointer --&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    /// </list>
+    /// <para>TermFreqs are ordered by term (the term is implicit, from the term dictionary).</para>
+    /// <para>TermFreq entries are ordered by increasing document number.</para>
+    /// <para>DocDelta: if frequencies are indexed, this determines both the document
     /// number and the frequency. In particular, DocDelta/2 is the difference between
     /// this document number and the previous document number (or zero when this is the
     /// first document in a TermFreqs). When DocDelta is odd, the frequency is one.
     /// When DocDelta is even, the frequency is read as another VInt. If frequencies
     /// are omitted, DocDelta contains the gap (not multiplied by 2) between document
-    /// numbers and no frequency information is stored.</p>
-    /// <p>For example, the TermFreqs for a term which occurs once in document seven
+    /// numbers and no frequency information is stored.</para>
+    /// <para>For example, the TermFreqs for a term which occurs once in document seven
     /// and three times in document eleven, with frequencies indexed, would be the
-    /// following sequence of VInts:</p>
-    /// <p>15, 8, 3</p>
-    /// <p>If frequencies were omitted (<seealso cref="IndexOptions#DOCS_ONLY"/>) it would be this
-    /// sequence of VInts instead:</p>
-    /// <p>7,4</p>
-    /// <p>DocSkip records the document number before every SkipInterval <sup>th</sup>
+    /// following sequence of VInts:</para>
+    /// <para>15, 8, 3</para>
+    /// <para>If frequencies were omitted (<see cref="Index.IndexOptions.DOCS_ONLY"/>) it would be this
+    /// sequence of VInts instead:</para>
+    /// <para>7,4</para>
+    /// <para>DocSkip records the document number before every SkipInterval <sup>th</sup>
     /// document in TermFreqs. If payloads and offsets are disabled for the term's field, then
     /// DocSkip represents the difference from the previous value in the sequence. If
     /// payloads and/or offsets are enabled for the term's field, then DocSkip/2 represents the
     /// difference from the previous value in the sequence. In this case when
     /// DocSkip is odd, then PayloadLength and/or OffsetLength are stored indicating the length of
-    /// the last payload/offset before the SkipInterval<sup>th</sup> document in TermPositions.</p>
-    /// <p>PayloadLength indicates the length of the last payload.</p>
-    /// <p>OffsetLength indicates the length of the last offset (endOffset-startOffset).</p>
-    /// <p>
+    /// the last payload/offset before the SkipInterval<sup>th</sup> document in TermPositions.</para>
+    /// <para>PayloadLength indicates the length of the last payload.</para>
+    /// <para>OffsetLength indicates the length of the last offset (endOffset-startOffset).</para>
+    /// <para>
     /// FreqSkip and ProxSkip record the position of every SkipInterval <sup>th</sup>
     /// entry in FreqFile and ProxFile, respectively. File positions are relative to
     /// the start of TermFreqs and Positions, to the previous SkipDatum in the
-    /// sequence.</p>
-    /// <p>For example, if DocFreq=35 and SkipInterval=16, then there are two SkipData
+    /// sequence.</para>
+    /// <para>For example, if DocFreq=35 and SkipInterval=16, then there are two SkipData
     /// entries, containing the 15 <sup>th</sup> and 31 <sup>st</sup> document numbers
     /// in TermFreqs. The first FreqSkip names the number of bytes after the beginning
     /// of TermFreqs that the 16 <sup>th</sup> SkipDatum starts, and the second the
     /// number of bytes after that that the 32 <sup>nd</sup> starts. The first ProxSkip
     /// names the number of bytes after the beginning of Positions that the 16
     /// <sup>th</sup> SkipDatum starts, and the second the number of bytes after that
-    /// that the 32 <sup>nd</sup> starts.</p>
-    /// <p>Each term can have multiple skip levels. The amount of skip levels for a
+    /// that the 32 <sup>nd</sup> starts.</para>
+    /// <para>Each term can have multiple skip levels. The amount of skip levels for a
     /// term is NumSkipLevels = Min(MaxSkipLevels,
     /// floor(log(DocFreq/log(SkipInterval)))). The number of SkipData entries for a
     /// skip level is DocFreq/(SkipInterval^(Level + 1)), whereas the lowest skip level
-    /// is Level=0.<br>
+    /// is Level=0.
+    /// <para/>
     /// Example: SkipInterval = 4, MaxSkipLevels = 2, DocFreq = 35. Then skip level 0
     /// has 8 SkipData entries, containing the 3<sup>rd</sup>, 7<sup>th</sup>,
     /// 11<sup>th</sup>, 15<sup>th</sup>, 19<sup>th</sup>, 23<sup>rd</sup>,
     /// 27<sup>th</sup>, and 31<sup>st</sup> document numbers in TermFreqs. Skip level
     /// 1 has 2 SkipData entries, containing the 15<sup>th</sup> and 31<sup>st</sup>
-    /// document numbers in TermFreqs.<br>
+    /// document numbers in TermFreqs.
+    /// <para/>
     /// The SkipData entries on all upper levels &gt; 0 contain a SkipChildLevelPointer
     /// referencing the corresponding SkipData entry in level-1. In the example has
     /// entry 15 on level 1 a pointer to entry 15 on level 0 and entry 31 on level 1 a
     /// pointer to entry 31 on level 0.
-    /// </p>
+    /// </para>
     /// <a name="Positions" id="Positions"></a>
     /// <h3>Positions</h3>
-    /// <p>The .prx file contains the lists of positions that each term occurs at
+    /// <para>The .prx file contains the lists of positions that each term occurs at
     /// within documents. Note that fields omitting positional data do not store
     /// anything into this file, and if all fields in the index omit positional data
-    /// then the .prx file will not exist.</p>
-    /// <ul>
-    ///   <li>ProxFile (.prx) --&gt; Header, &lt;TermPositions&gt; <sup>TermCount</sup></li>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>TermPositions --&gt; &lt;Positions&gt; <sup>DocFreq</sup></li>
-    ///   <li>Positions --&gt; &lt;PositionDelta,PayloadLength?,OffsetDelta?,OffsetLength?,PayloadData?&gt; <sup>Freq</sup></li>
-    ///   <li>PositionDelta,OffsetDelta,OffsetLength,PayloadLength --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///   <li>PayloadData --&gt; <seealso cref="DataOutput#writeByte byte"/><sup>PayloadLength</sup></li>
-    /// </ul>
-    /// <p>TermPositions are ordered by term (the term is implicit, from the term dictionary).</p>
-    /// <p>Positions entries are ordered by increasing document number (the document
-    /// number is implicit from the .frq file).</p>
-    /// <p>PositionDelta is, if payloads are disabled for the term's field, the
+    /// then the .prx file will not exist.</para>
+    /// <list type="bullet">
+    ///   <item><description>ProxFile (.prx) --&gt; Header, &lt;TermPositions&gt; <sup>TermCount</sup></description></item>
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>TermPositions --&gt; &lt;Positions&gt; <sup>DocFreq</sup></description></item>
+    ///   <item><description>Positions --&gt; &lt;PositionDelta,PayloadLength?,OffsetDelta?,OffsetLength?,PayloadData?&gt; <sup>Freq</sup></description></item>
+    ///   <item><description>PositionDelta,OffsetDelta,OffsetLength,PayloadLength --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///   <item><description>PayloadData --&gt; byte (<see cref="Store.DataOutput.WriteByte(byte)"/>) <sup>PayloadLength</sup></description></item>
+    /// </list>
+    /// <para>TermPositions are ordered by term (the term is implicit, from the term dictionary).</para>
+    /// <para>Positions entries are ordered by increasing document number (the document
+    /// number is implicit from the .frq file).</para>
+    /// <para>PositionDelta is, if payloads are disabled for the term's field, the
     /// difference between the position of the current occurrence in the document and
     /// the previous occurrence (or zero, if this is the first occurrence in this
     /// document). If payloads are enabled for the term's field, then PositionDelta/2
     /// is the difference between the current and the previous position. If payloads
     /// are enabled and PositionDelta is odd, then PayloadLength is stored, indicating
-    /// the length of the payload at the current term position.</p>
-    /// <p>For example, the TermPositions for a term which occurs as the fourth term in
+    /// the length of the payload at the current term position.</para>
+    /// <para>For example, the TermPositions for a term which occurs as the fourth term in
     /// one document, and as the fifth and ninth term in a subsequent document, would
-    /// be the following sequence of VInts (payloads disabled):</p>
-    /// <p>4, 5, 4</p>
-    /// <p>PayloadData is metadata associated with the current term position. If
+    /// be the following sequence of VInts (payloads disabled):</para>
+    /// <para>4, 5, 4</para>
+    /// <para>PayloadData is metadata associated with the current term position. If
     /// PayloadLength is stored at the current position, then it indicates the length
     /// of this payload. If PayloadLength is not stored, then this payload has the same
-    /// length as the payload at the previous position.</p>
-    /// <p>OffsetDelta/2 is the difference between this position's startOffset from the
+    /// length as the payload at the previous position.</para>
+    /// <para>OffsetDelta/2 is the difference between this position's startOffset from the
     /// previous occurrence (or zero, if this is the first occurrence in this document).
     /// If OffsetDelta is odd, then the length (endOffset-startOffset) differs from the
     /// previous occurrence and an OffsetLength follows. Offset data is only written for
-    /// <seealso cref="IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS"/>.</p>
+    /// <see cref="Index.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS"/>.</para>
     /// </summary>
-    ///  @deprecated Only for reading old 4.0 segments
-
     // TODO: this class could be created by wrapping
     // BlockTreeTermsDict around Lucene40PostingsBaseFormat; ie
     // we should not duplicate the code from that class here:
@@ -215,16 +209,16 @@ namespace Lucene.Net.Codecs.Lucene40
     public class Lucene40PostingsFormat : PostingsFormat
     {
         /// <summary>
-        /// minimum items (terms or sub-blocks) per block for BlockTree </summary>
+        /// Minimum items (terms or sub-blocks) per block for BlockTree. </summary>
         protected readonly int m_minBlockSize;
 
         /// <summary>
-        /// maximum items (terms or sub-blocks) per block for BlockTree </summary>
+        /// Maximum items (terms or sub-blocks) per block for BlockTree. </summary>
         protected readonly int m_maxBlockSize;
 
         /// <summary>
-        /// Creates {@code Lucene40PostingsFormat} with default
-        ///  settings.
+        /// Creates <see cref="Lucene40PostingsFormat"/> with default
+        /// settings.
         /// </summary>
         public Lucene40PostingsFormat()
             : this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE)
@@ -232,10 +226,10 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Creates {@code Lucene40PostingsFormat} with custom
-        ///  values for {@code minBlockSize} and {@code
-        ///  maxBlockSize} passed to block terms dictionary. </summary>
-        ///  <seealso cref= BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)  </seealso>
+        /// Creates <see cref="Lucene40PostingsFormat"/> with custom
+        /// values for <paramref name="minBlockSize"/> and 
+        /// <paramref name="maxBlockSize"/> passed to block terms dictionary. </summary>
+        ///  <seealso cref="BlockTreeTermsWriter.BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int)"/>
         private Lucene40PostingsFormat(int minBlockSize, int maxBlockSize)
             : base()
         {
@@ -270,11 +264,11 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// Extension of freq postings file </summary>
+        /// Extension of freq postings file. </summary>
         internal static readonly string FREQ_EXTENSION = "frq";
 
         /// <summary>
-        /// Extension of prox postings file </summary>
+        /// Extension of prox postings file. </summary>
         internal static readonly string PROX_EXTENSION = "prx";
 
         public override string ToString()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsReader.cs
index 29516db..12fb35d 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40PostingsReader.cs
@@ -42,8 +42,7 @@ namespace Lucene.Net.Codecs.Lucene40
     /// Concrete class that reads the 4.0 frq/prox
     /// postings format.
     /// </summary>
-    ///  <seealso cref= Lucene40PostingsFormat </seealso>
-    ///  @deprecated Only for reading old 4.0 segments
+    /// <seealso cref="Lucene40PostingsFormat"/>
     [Obsolete("Only for reading old 4.0 segments")]
     public class Lucene40PostingsReader : PostingsReaderBase
     {
@@ -958,8 +957,8 @@ namespace Lucene.Net.Codecs.Lucene40
             }
 
             /// <summary>
-            /// Returns the payload at this position, or null if no
-            ///  payload was indexed.
+            /// Returns the payload at this position, or <c>null</c> if no
+            /// payload was indexed.
             /// </summary>
             public override BytesRef GetPayload()
             {
@@ -1263,8 +1262,8 @@ namespace Lucene.Net.Codecs.Lucene40
             }
 
             /// <summary>
-            /// Returns the payload at this position, or null if no
-            ///  payload was indexed.
+            /// Returns the payload at this position, or <c>null</c> if no
+            /// payload was indexed.
             /// </summary>
             public override BytesRef GetPayload()
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoFormat.cs
index c3ce3c9..8fb9084 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoFormat.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoFormat.cs
@@ -19,54 +19,49 @@ namespace Lucene.Net.Codecs.Lucene40
      * limitations under the License.
      */
 
-    // javadocs
-    using SegmentInfo = Lucene.Net.Index.SegmentInfo; // javadocs
-
-    // javadocs
-    // javadocs
+    using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
     /// Lucene 4.0 Segment info format.
-    /// <p>
+    /// <para>
     /// Files:
-    /// <ul>
-    ///   <li><tt>.si</tt>: Header, SegVersion, SegSize, IsCompoundFile, Diagnostics, Attributes, Files
-    /// </ul>
-    /// </p>
+    /// <list type="bullet">
+    ///   <item><description><tt>.si</tt>: Header, SegVersion, SegSize, IsCompoundFile, Diagnostics, Attributes, Files</description></item>
+    /// </list>
+    /// </para>
     /// Data types:
-    /// <p>
-    /// <ul>
-    ///   <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///   <li>SegSize --&gt; <seealso cref="DataOutput#writeInt Int32"/></li>
-    ///   <li>SegVersion --&gt; <seealso cref="DataOutput#writeString String"/></li>
-    ///   <li>Files --&gt; <seealso cref="DataOutput#writeStringSet Set&lt;String&gt;"/></li>
-    ///   <li>Diagnostics, Attributes --&gt; <seealso cref="DataOutput#writeStringStringMap Map&lt;String,String&gt;"/></li>
-    ///   <li>IsCompoundFile --&gt; <seealso cref="DataOutput#writeByte Int8"/></li>
-    /// </ul>
-    /// </p>
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///   <item><description>SegSize --&gt; Int32 (<see cref="Store.DataOutput.WriteInt32(int)"/>) </description></item>
+    ///   <item><description>SegVersion --&gt; String (<see cref="Store.DataOutput.WriteString(string)"/>) </description></item>
+    ///   <item><description>Files --&gt; ISet&lt;String&gt; (<see cref="Store.DataOutput.WriteStringSet(System.Collections.Generic.ISet{string})"/>) </description></item>
+    ///   <item><description>Diagnostics, Attributes --&gt; IDictionary&lt;String,String&gt; (<see cref="Store.DataOutput.WriteStringStringMap(System.Collections.Generic.IDictionary{string, string})"/>) </description></item>
+    ///   <item><description>IsCompoundFile --&gt; Int8 (<see cref="Store.DataOutput.WriteByte(byte)"/>) </description></item>
+    /// </list>
+    /// </para>
     /// Field Descriptions:
-    /// <p>
-    /// <ul>
-    ///   <li>SegVersion is the code version that created the segment.</li>
-    ///   <li>SegSize is the number of documents contained in the segment index.</li>
-    ///   <li>IsCompoundFile records whether the segment is written as a compound file or
+    /// <para>
+    /// <list type="bullet">
+    ///   <item><description>SegVersion is the code version that created the segment.</description></item>
+    ///   <item><description>SegSize is the number of documents contained in the segment index.</description></item>
+    ///   <item><description>IsCompoundFile records whether the segment is written as a compound file or
     ///       not. If this is -1, the segment is not a compound file. If it is 1, the segment
-    ///       is a compound file.</li>
-    ///   <li>Checksum contains the CRC32 checksum of all bytes in the segments_N file up
-    ///       until the checksum. this is used to verify integrity of the file on opening the
-    ///       index.</li>
-    ///   <li>The Diagnostics Map is privately written by <seealso cref="IndexWriter"/>, as a debugging aid,
+    ///       is a compound file.</description></item>
+    ///   <item><description>Checksum contains the CRC32 checksum of all bytes in the segments_N file up
+    ///       until the checksum. This is used to verify integrity of the file on opening the
+    ///       index.</description></item>
+    ///   <item><description>The Diagnostics Map is privately written by <see cref="Index.IndexWriter"/>, as a debugging aid,
     ///       for each segment it creates. It includes metadata like the current Lucene
-    ///       version, OS, Java version, why the segment was created (merge, flush,
-    ///       addIndexes), etc.</li>
-    ///   <li>Attributes: a key-value map of codec-private attributes.</li>
-    ///   <li>Files is a list of files referred to by this segment.</li>
-    /// </ul>
-    /// </p>
+    ///       version, OS, .NET/Java version, why the segment was created (merge, flush,
+    ///       addIndexes), etc.</description></item>
+    ///   <item><description>Attributes: a key-value map of codec-private attributes.</description></item>
+    ///   <item><description>Files is a list of files referred to by this segment.</description></item>
+    /// </list>
+    /// </para>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= SegmentInfos
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.0-4.5 segments, and supporting IndexWriter.addIndexes
+    /// <seealso cref="Index.SegmentInfos"/>
     [Obsolete("Only for reading old 4.0-4.5 segments, and supporting IndexWriter.AddIndexes()")]
     public class Lucene40SegmentInfoFormat : SegmentInfoFormat
     {
@@ -98,7 +93,7 @@ namespace Lucene.Net.Codecs.Lucene40
         }
 
         /// <summary>
-        /// File extension used to store <seealso cref="SegmentInfo"/>. </summary>
+        /// File extension used to store <see cref="SegmentInfo"/>. </summary>
         public readonly static string SI_EXTENSION = "si";
 
         internal readonly static string CODEC_NAME = "Lucene40SegmentInfo";

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoReader.cs
index aec213d..07b728f 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoReader.cs
@@ -29,11 +29,11 @@ namespace Lucene.Net.Codecs.Lucene40
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene 4.0 implementation of <seealso cref="SegmentInfoReader"/>.
+    /// Lucene 4.0 implementation of <see cref="SegmentInfoReader"/>.
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
-    /// <seealso cref= Lucene40SegmentInfoFormat
-    /// @lucene.experimental </seealso>
-    /// @deprecated Only for reading old 4.0-4.5 segments
+    /// <seealso cref="Lucene40SegmentInfoFormat"/>
     [Obsolete("Only for reading old 4.0-4.5 segments")]
     public class Lucene40SegmentInfoReader : SegmentInfoReader
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoWriter.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoWriter.cs
index a2d2925..ef8807f 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoWriter.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40SegmentInfoWriter.cs
@@ -29,10 +29,11 @@ namespace Lucene.Net.Codecs.Lucene40
     using SegmentInfo = Lucene.Net.Index.SegmentInfo;
 
     /// <summary>
-    /// Lucene 4.0 implementation of <seealso cref="SegmentInfoWriter"/>.
+    /// Lucene 4.0 implementation of <see cref="SegmentInfoWriter"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <seealso cref= Lucene40SegmentInfoFormat
-    /// @lucene.experimental </seealso>
+    /// <seealso cref="Lucene40SegmentInfoFormat"/>
     [Obsolete]
     public class Lucene40SegmentInfoWriter : SegmentInfoWriter
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27cdd048/src/Lucene.Net/Codecs/Lucene40/Lucene40SkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40SkipListReader.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40SkipListReader.cs
index cacafe5..1dcec33 100644
--- a/src/Lucene.Net/Codecs/Lucene40/Lucene40SkipListReader.cs
+++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40SkipListReader.cs
@@ -26,8 +26,7 @@ namespace Lucene.Net.Codecs.Lucene40
     /// Implements the skip list reader for the 4.0 posting list format
     /// that stores positions and payloads.
     /// </summary>
-    /// <seealso cref= Lucene40PostingsFormat </seealso>
-    /// @deprecated Only for reading old 4.0 segments
+    /// <seealso cref="Lucene40PostingsFormat"/>
     [Obsolete("Only for reading old 4.0 segments")]
     public class Lucene40SkipListReader : MultiLevelSkipListReader
     {
@@ -72,7 +71,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the freq pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)"/> has skipped.
         /// </summary>
         public virtual long FreqPointer
         {
@@ -84,7 +83,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the prox pointer of the doc to which the last call of
-        /// <seealso cref="MultiLevelSkipListReader#skipTo(int)"/> has skipped.
+        /// <see cref="MultiLevelSkipListReader.SkipTo(int)"/> has skipped.
         /// </summary>
         public virtual long ProxPointer
         {
@@ -96,7 +95,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the payload length of the payload stored just before
-        /// the doc to which the last call of <seealso cref="MultiLevelSkipListReader#skipTo(int)"/>
+        /// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)"/>
         /// has skipped.
         /// </summary>
         public virtual int PayloadLength
@@ -109,7 +108,7 @@ namespace Lucene.Net.Codecs.Lucene40
 
         /// <summary>
         /// Returns the offset length (endOffset-startOffset) of the position stored just before
-        /// the doc to which the last call of <seealso cref="MultiLevelSkipListReader#skipTo(int)"/>
+        /// the doc to which the last call of <see cref="MultiLevelSkipListReader.SkipTo(int)"/>
         /// has skipped.
         /// </summary>
         public virtual int OffsetLength