You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/06 00:12:19 UTC

[46/48] lucenenet git commit: Lucene.Net.Codecs: Fixed XML documentation warnings

Lucene.Net.Codecs: Fixed XML documentation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/666de32b
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/666de32b
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/666de32b

Branch: refs/heads/master
Commit: 666de32b0fbe555f5d635c3c867c500f25d0cbdf
Parents: 6f22b5a
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Tue Jun 6 06:40:12 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Tue Jun 6 06:58:43 2017 +0700

----------------------------------------------------------------------
 CONTRIBUTING.md                                 |  21 +---
 .../Appending/AppendingCodec.cs                 |   4 +-
 .../Appending/AppendingPostingsFormat.cs        |   2 +-
 .../Appending/AppendingTermsReader.cs           |   2 +-
 .../BlockTerms/BlockTermsReader.cs              |  12 +-
 .../BlockTerms/BlockTermsWriter.cs              |  15 ++-
 .../BlockTerms/FixedGapTermsIndexReader.cs      |  11 +-
 .../BlockTerms/FixedGapTermsIndexWriter.cs      |  10 +-
 .../BlockTerms/TermsIndexReaderBase.cs          |  34 +++---
 .../BlockTerms/TermsIndexWriterBase.cs          |  10 +-
 .../BlockTerms/VariableGapTermsIndexReader.cs   |   8 +-
 .../BlockTerms/VariableGapTermsIndexWriter.cs   |  30 ++---
 .../Bloom/BloomFilterFactory.cs                 |  32 +++--
 .../Bloom/BloomFilteringPostingsFormat.cs       |  66 +++++-----
 .../Bloom/DefaultBloomFilterFactory.cs          |   4 +-
 src/Lucene.Net.Codecs/Bloom/FuzzySet.cs         |  66 +++++-----
 src/Lucene.Net.Codecs/Bloom/HashFunction.cs     |  10 +-
 src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs      |  18 +--
 .../DiskDV/DiskDocValuesFormat.cs               |   2 +-
 src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs |   2 +-
 .../IntBlock/FixedIntBlockIndexInput.cs         |  19 +--
 .../IntBlock/FixedIntBlockIndexOutput.cs        |  22 ++--
 .../IntBlock/VariableIntBlockIndexInput.cs      |  17 +--
 .../IntBlock/VariableIntBlockIndexOutput.cs     |  30 +++--
 .../Memory/DirectDocValuesConsumer.cs           |   2 +-
 .../Memory/DirectDocValuesFormat.cs             |  36 +++---
 .../Memory/DirectDocValuesProducer.cs           |   2 +-
 .../Memory/DirectPostingsFormat.cs              |  45 ++++---
 .../Memory/FSTOrdPulsing41PostingsFormat.cs     |   3 +-
 .../Memory/FSTOrdTermsReader.cs                 |  28 ++---
 .../Memory/FSTOrdTermsWriter.cs                 | 120 ++++++++++---------
 .../Memory/FSTPulsing41PostingsFormat.cs        |   5 +-
 src/Lucene.Net.Codecs/Memory/FSTTermOutputs.cs  |  17 ++-
 src/Lucene.Net.Codecs/Memory/FSTTermsReader.cs  |  46 +++----
 src/Lucene.Net.Codecs/Memory/FSTTermsWriter.cs  |  85 ++++++-------
 .../Memory/MemoryDocValuesConsumer.cs           |   2 +-
 .../Memory/MemoryDocValuesFormat.cs             |  19 +--
 .../Memory/MemoryDocValuesProducer.cs           |   2 +-
 .../Memory/MemoryPostingsFormat.cs              |  13 +-
 .../Pulsing/Pulsing41PostingsFormat.cs          |   8 +-
 .../Pulsing/PulsingPostingsFormat.cs            |   3 +-
 .../Pulsing/PulsingPostingsReader.cs            |  29 +++--
 .../Pulsing/PulsingPostingsWriter.cs            |  38 +++---
 src/Lucene.Net.Codecs/Sep/IntIndexInput.cs      |  13 +-
 src/Lucene.Net.Codecs/Sep/IntIndexOutput.cs     |  27 ++---
 src/Lucene.Net.Codecs/Sep/IntStreamFactory.cs   |   8 +-
 src/Lucene.Net.Codecs/Sep/SepPostingsReader.cs  |  11 +-
 src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs  |  16 +--
 src/Lucene.Net.Codecs/Sep/SepSkipListReader.cs  |   4 +-
 src/Lucene.Net.Codecs/Sep/SepSkipListWriter.cs  |  13 +-
 .../SimpleText/SimpleTextCodec.cs               |   6 +-
 .../SimpleText/SimpleTextDocValuesFormat.cs     |  67 ++++++-----
 .../SimpleText/SimpleTextDocValuesWriter.cs     |   2 +-
 .../SimpleText/SimpleTextFieldInfosFormat.cs    |   6 +-
 .../SimpleText/SimpleTextFieldInfosReader.cs    |   6 +-
 .../SimpleText/SimpleTextFieldInfosWriter.cs    |   8 +-
 .../SimpleText/SimpleTextFieldsReader.cs        |   2 +-
 .../SimpleText/SimpleTextLiveDocsFormat.cs      |   8 +-
 .../SimpleText/SimpleTextNormsFormat.cs         |  18 ++-
 .../SimpleText/SimpleTextPostingsFormat.cs      |  13 +-
 .../SimpleText/SimpleTextSegmentInfoFormat.cs   |   6 +-
 .../SimpleText/SimpleTextSegmentInfoReader.cs   |   6 +-
 .../SimpleText/SimpleTextSegmentInfoWriter.cs   |   6 +-
 .../SimpleText/SimpleTextStoredFieldsFormat.cs  |   6 +-
 .../SimpleText/SimpleTextStoredFieldsReader.cs  |  12 +-
 .../SimpleText/SimpleTextStoredFieldsWriter.cs  |   4 +-
 .../SimpleText/SimpleTextTermVectorsFormat.cs   |   6 +-
 .../SimpleText/SimpleTextTermVectorsReader.cs   |   6 +-
 .../SimpleText/SimpleTextTermVectorsWriter.cs   |   4 +-
 69 files changed, 606 insertions(+), 628 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/CONTRIBUTING.md
----------------------------------------------------------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5736674..eda09e5 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -48,23 +48,6 @@ with minimal cleaning up. We are working on tools and code
 helpers to help with that, see for examples see our [Java style methods to avoid many search-replace in porting tests](https://github.com/apache/lucenenet/tree/master/src/Lucene.Net.TestFramework/JavaCompatibility), and a
 [R# plugin that will help making some stuff auto-port when pasting](https://resharper-plugins.jetbrains.com/packages/ReSharper.ExJava/).
 
-### Documentation Comments == up for grabs:
-
-1. Lucene.Net.Codecs (project)
-   1. Appending (namespace)
-   2. BlockTerms (namespace)
-   3. Bloom (namespace)
-   4. DiskDV (namespace)
-   5. IntBlock (namespace)
-   6. Memory (namespace)
-   7. Pulsing (namespace)
-   8. Sep (namespace)
-   9. SimpleText (namespace)
-
-See [Documenting Lucene.Net](https://cwiki.apache.org/confluence/display/LUCENENET/Documenting+Lucene.Net) for instructions. 
-
-> While it is assumed that the documentation comments for the other projects are finished, they could probably all use a review. Also be sure to check the comments against [Lucene 4.8.0](https://github.com/apache/lucene-solr/tree/releases/lucene-solr/4.8.0/lucene) to ensure they are correct and complete!
-
 ### Code that is currently pending being ported from scratch (+ tests) == up for grabs:
 
 * [Lucene.Net.Demo](https://github.com/apache/lucene-solr/tree/releases/lucene-solr/4.8.0/lucene/demo) (might be a good learning experience)
@@ -92,7 +75,7 @@ probably may also want to set a constant seed for working locally. See
 and
 <https://github.com/apache/lucenenet/blob/master/src/Lucene.Net.TestFramework/Util/LuceneTestCase.cs#L610>
 
-* Note that tests should be run both on .NET Framework and .NET Core. Currently, we have 2 different solutions (Lucene.Net.sln for .NET Framework and Lucene.Net.Portable.sln for .NET Core) that only run in Visual Studio 2015 and onwards. We are setup to use NUnit 3.x and you will need the appropriate [test adapter](https://marketplace.visualstudio.com/items?itemName=NUnitDevelopers.NUnit3TestAdapter) for Visual Studio to detect the tests. Tests can also be run from the command line using the [dotnet test]() command
+* Note that tests should be run both on .NET Framework and .NET Core. Currently, we have 2 different solutions (Lucene.Net.sln for .NET Framework and Lucene.Net.Portable.sln for .NET Core) that only run in Visual Studio 2015 and onwards. We are setup to use NUnit 3.x and you will need the appropriate [test adapter](https://marketplace.visualstudio.com/items?itemName=NUnitDevelopers.NUnit3TestAdapter) for Visual Studio to detect the tests. Tests can also be run from the command line using the [dotnet test](https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-test) command
 
 * Run, debug, iterate. When you think you fixed a bug or a test, please
 send a PR as fast as possible. There are multiple people working in this
@@ -105,7 +88,7 @@ you will receive notifications also via this list.
 
 ## Other types of help
 
-We will definitely need more help (like optimizing code, normalizing tabs/spaces, license headers, automating stuff, etc) but we are not there yet!
+We will definitely need more help (like optimizing code, normalizing tabs/spaces, writing tutorials, helping with API documentation, automating stuff, etc) but we are not there yet!
 
 Also, check out the [JIRA issue tracker](https://issues.apache.org/jira/browse/LUCENENET-586?jql=project%20%3D%20LUCENENET%20AND%20status%20%3D%20Open%20AND%20assignee%20in%20(EMPTY)) for any other issues that you might be interested in helping with. You can signup for a JIRA account [here](https://cwiki.apache.org/confluence/signup.action) (it just takes a minute).
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
index f074237..96d2f0f 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingCodec.cs
@@ -21,10 +21,10 @@ namespace Lucene.Net.Codecs.Appending
      */
 
     /// <summary>
-    /// This codec uses an index format that is very similar to Lucene40Codec 
+    /// This codec uses an index format that is very similar to <see cref="Lucene40Codec"/> 
     /// but works on append-only outputs, such as plain output streams and 
     /// append-only filesystems.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [Obsolete(

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
index 252c6e1..0eb57dd 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingPostingsFormat.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Codecs.Appending
      */
 
     /// <summary>
-    /// Appending Postigns Implementation
+    /// Appending Postings Implementation
     /// </summary>
     [PostingsFormatName("Appending")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     internal class AppendingPostingsFormat : PostingsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
index ae95ef3..cdde1a4 100644
--- a/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Appending/AppendingTermsReader.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Codecs.Appending
 
     /// <summary>
     /// Reads append-only terms from AppendingTermsWriter.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [Obsolete("Only for reading old Appending segments")]

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
index 631349c..fc848b5 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsReader.cs
@@ -27,16 +27,16 @@ namespace Lucene.Net.Codecs.BlockTerms
 
     /// <summary>
     /// Handles a terms dict, but decouples all details of
-    /// doc/freqs/positions reading to an instance of {@link
-    /// PostingsReaderBase}.  This class is reusable for
+    /// doc/freqs/positions reading to an instance of 
+    /// <see cref="PostingsReaderBase"/>.  This class is reusable for
     /// codecs that use a different format for
     /// docs/freqs/positions (though codecs are also free to
     /// make their own terms dict impl).
-    ///
-    /// This class also interacts with an instance of {@link
-    /// TermsIndexReaderBase}, to abstract away the specific
+    /// <para/>
+    /// This class also interacts with an instance of
+    /// <see cref="TermsIndexReaderBase"/>, to abstract away the specific
     /// implementation of the terms dict index. 
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class BlockTermsReader : FieldsProducer

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
index 7c25d01..149a8f7 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs
@@ -25,16 +25,15 @@ namespace Lucene.Net.Codecs.BlockTerms
      * limitations under the License.
      */
 
+    // TODO: Currently we encode all terms between two indexed terms as a block
+    // But we could decouple the two, ie allow several blocks in between two indexed terms
+
     /// <summary>
     /// Writes terms dict, block-encoding (column stride) each term's metadata 
-    /// for each set of terms between two index terms
-    /// 
-    /// lucene.experimental
+    /// for each set of terms between two index terms.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
-    /// <remarks>
-    /// TODO Currently we encode all terms between two indexed terms as a block
-    /// But we could decouple the two, ie allow several blocks in between two indexed terms
-    /// </remarks>
     public class BlockTermsWriter : FieldsConsumer
     {
         internal readonly static string CODEC_NAME = "BLOCK_TERMS_DICT";
@@ -64,7 +63,7 @@ namespace Lucene.Net.Codecs.BlockTerms
             public long SumDocFreq { get; private set; }
             public int DocCount { get; private set; }
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             public int Int64sSize { get; private set; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
index 92f6a95..1265661 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexReader.cs
@@ -28,12 +28,11 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    /// TermsIndexReader for simple every Nth terms indexes
-    /// 
-    /// See FixedGapTermsIndexWriter
-    /// 
-    /// lucene.experimental
+    /// <see cref="TermsIndexReaderBase"/> for simple every Nth terms indexes.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
+    /// <seealso cref="FixedGapTermsIndexWriter"/>
     public class FixedGapTermsIndexReader : TermsIndexReaderBase
     {
         // NOTE: long is overkill here, since this number is 128
@@ -438,7 +437,7 @@ namespace Lucene.Net.Codecs.BlockTerms
                     }
                 }
 
-                /// <summary>Returns approximate RAM bytes Used</summary>
+                /// <summary>Returns approximate RAM bytes used.</summary>
                 public long RamBytesUsed()
                 {
                     return ((termOffsets != null) ? termOffsets.RamBytesUsed() : 0) +

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
index b27938e..41ed974 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/FixedGapTermsIndexWriter.cs
@@ -28,11 +28,11 @@ namespace Lucene.Net.Codecs.BlockTerms
     /// <summary>
     /// Selects every Nth term as and index term, and hold term
     /// bytes (mostly) fully expanded in memory.  This terms index
-    /// supports seeking by ord.  See {@link
-    /// VariableGapTermsIndexWriter} for a more memory efficient
+    /// supports seeking by ord.  See 
+    /// <see cref="VariableGapTermsIndexWriter"/> for a more memory efficient
     /// terms index that does not support seeking by ord.
-    ///
-    /// @lucene.experimental */    
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public class FixedGapTermsIndexWriter : TermsIndexWriterBase
     {
@@ -90,7 +90,7 @@ namespace Lucene.Net.Codecs.BlockTerms
         /// <summary>
         /// NOTE: if your codec does not sort in unicode code
         /// point order, you must override this method, to simply
-        /// return indexedTerm.Length.
+        /// return <c>indexedTerm.Length</c>.
         /// </summary>
         protected virtual int IndexedTermPrefixLength(BytesRef priorTerm, BytesRef indexedTerm)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
index d7b905f..766385b 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexReaderBase.cs
@@ -21,20 +21,20 @@ namespace Lucene.Net.Codecs.BlockTerms
      * limitations under the License.
      */
 
+    // TODO
+    //   - allow for non-regular index intervals?  eg with a
+    //     long string of rare terms, you don't need such
+    //     frequent indexing
+
     /// <summary>
-    /// TODO
-    ///   - allow for non-regular index intervals?  eg with a
-    ///     long string of rare terms, you don't need such
-    ///     frequent indexing
-    /// 
-    /// {@link BlockTermsReader} interacts with an instance of this class
+    /// <see cref="BlockTermsReader"/> interacts with an instance of this class
     /// to manage its terms index.  The writer must accept
-    /// indexed terms (many pairs of BytesRef text + long
+    /// indexed terms (many pairs of <see cref="BytesRef"/> text + long
     /// fileOffset), and then this reader must be able to
     /// retrieve the nearest index term to a provided term
     /// text. 
-    ///
-    ///  @lucene.experimental */
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public abstract class TermsIndexReaderBase : IDisposable
     {
@@ -54,7 +54,7 @@ namespace Lucene.Net.Codecs.BlockTerms
         public abstract int Divisor { get; }
 
         /// <summary>
-        /// Similar to TermsEnum, except, the only "metadata" it
+        /// Similar to <see cref="TermsEnum"/>, except, the only "metadata" it
         /// reports for a given indexed term is the long fileOffset
         /// into the main terms dictionary file.
         /// </summary>
@@ -63,29 +63,29 @@ namespace Lucene.Net.Codecs.BlockTerms
             /// <summary> 
             /// Seeks to "largest" indexed term that's less than or equal
             /// to term; returns file pointer index (into the main
-            /// terms index file) for that term 
+            /// terms index file) for that term.
             /// </summary>
             public abstract long Seek(BytesRef term);
 
-            /// <summary>Returns -1 at end</summary>
+            /// <summary>Returns -1 at end/</summary>
             public abstract long Next();
 
             public abstract BytesRef Term { get; }
 
             /// <summary></summary>
-            /// <remarks>Only implemented if {@link TermsIndexReaderBase.supportsOrd()} 
-            /// returns true</remarks>
+            /// <remarks>Only implemented if <see cref="TermsIndexReaderBase.SupportsOrd"/>
+            /// returns <c>true</c></remarks>
             /// <returns></returns>
             public abstract long Seek(long ord);
 
             /// <summary></summary>
-            /// <remarks>Only implemented if {@link TermsIndexReaderBase.supportsOrd()} 
-            /// returns true</remarks>
+            /// <remarks>Only implemented if <see cref="TermsIndexReaderBase.SupportsOrd"/> 
+            /// returns <c>true</c></remarks>
             /// <returns></returns>
             public abstract long Ord { get; }
         }
 
-        /// <summary>Returns approximate RAM bytes used</summary>
+        /// <summary>Returns approximate RAM bytes used.</summary>
         public abstract long RamBytesUsed();
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
index 5568252..4ac3745 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/TermsIndexWriterBase.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    ///  Base class for terms index implementations to plug
-    /// into {@link BlockTermsWriter}.
-    /// 
-    /// @see TermsIndexReaderBase
+    /// Base class for terms index implementations to plug
+    /// into <see cref="BlockTermsWriter"/>.
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
+    /// <seealso cref="TermsIndexReaderBase"/>
     public abstract class TermsIndexWriterBase : IDisposable
     {
-        /// <summary>Terms index API for a single field</summary>
+        /// <summary>Terms index API for a single field.</summary>
         public abstract class FieldWriter
         {
             public abstract bool CheckIndexTerm(BytesRef text, TermStats stats);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
index ca773c0..a9e1199 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexReader.cs
@@ -27,9 +27,9 @@ namespace Lucene.Net.Codecs.BlockTerms
      */
 
     /// <summary>
-    /// See VariableGapTermsIndexWriter
-    /// 
-    /// lucene.experimental
+    /// See <see cref="VariableGapTermsIndexWriter"/>.
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public class VariableGapTermsIndexReader : TermsIndexReaderBase
     {
@@ -258,7 +258,7 @@ namespace Lucene.Net.Codecs.BlockTerms
                 }
             }
 
-            /// <summary>Returns approximate RAM bytes used</summary>
+            /// <summary>Returns approximate RAM bytes used.</summary>
             public virtual long RamBytesUsed()
             {
                 return fst == null ? 0 : fst.GetSizeInBytes();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
index 9e05531..47a46cc 100644
--- a/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
+++ b/src/Lucene.Net.Codecs/BlockTerms/VariableGapTermsIndexWriter.cs
@@ -27,18 +27,18 @@ namespace Lucene.Net.Codecs.BlockTerms
 
     /// <summary>
     /// Selects index terms according to provided pluggable
-    /// {@link IndexTermSelector}, and stores them in a prefix trie that's
-    /// loaded entirely in RAM stored as an FST.  This terms
+    /// <see cref="IndexTermSelector"/>, and stores them in a prefix trie that's
+    /// loaded entirely in RAM stored as an <see cref="FST{T}"/>.  This terms
     /// index only supports unsigned byte term sort order
     /// (unicode codepoint order when the bytes are UTF8).
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class VariableGapTermsIndexWriter : TermsIndexWriterBase
     {
         protected IndexOutput m_output;
 
-        /// <summary>Extension of terms index file</summary>
+        /// <summary>Extension of terms index file.</summary>
         internal readonly static string TERMS_INDEX_EXTENSION = "tiv";
 
         internal readonly static string CODEC_NAME = "VARIABLE_GAP_TERMS_INDEX";
@@ -53,27 +53,27 @@ namespace Lucene.Net.Codecs.BlockTerms
         private readonly IndexTermSelector policy;
 
         /// <summary>
-        /// Hook for selecting which terms should be placed in the terms index
-        /// 
-        /// IsIndexTerm for each term in that field
-        /// NewField is called at the start of each new field
-        /// 
+        /// Hook for selecting which terms should be placed in the terms index.
+        /// <para/>
+        /// <see cref="NewField(FieldInfo)"/> is called at the start of each new field, and
+        /// <see cref="IsIndexTerm(BytesRef, TermStats)"/> for each term in that field.
+        /// <para/>
         /// @lucene.experimental
         /// </summary>
         public abstract class IndexTermSelector
         {
             /// <summary>
-            /// Called sequentially on every term being written
-            /// returning true if this term should be indexed
+            /// Called sequentially on every term being written,
+            /// returning <c>true</c> if this term should be indexed.
             /// </summary>
             public abstract bool IsIndexTerm(BytesRef term, TermStats stats);
 
-            /// <summary>Called when a new field is started</summary>
+            /// <summary>Called when a new field is started.</summary>
             public abstract void NewField(FieldInfo fieldInfo);
         }
 
         /// <remarks>
-        /// Same policy as {@link FixedGapTermsIndexWriter}
+        /// Same policy as <see cref="FixedGapTermsIndexWriter"/>
         /// </remarks>
         public sealed class EveryNTermSelector : IndexTermSelector
         {
@@ -216,8 +216,8 @@ namespace Lucene.Net.Codecs.BlockTerms
         }
 
         /// <remarks>
-        /// Note: If your codec does not sort in unicode code point order,
-        /// you must override this method to simplly return IndexedTerm.Length
+        /// NOTE: If your codec does not sort in unicode code point order,
+        /// you must override this method to simply return <c>indexedTerm.Length</c>.
         /// </remarks>
         protected virtual int IndexedTermPrefixLength(BytesRef priorTerm, BytesRef indexedTerm)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
index 377806c..3bfe3ac 100644
--- a/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
+++ b/src/Lucene.Net.Codecs/Bloom/BloomFilterFactory.cs
@@ -20,27 +20,25 @@ namespace Lucene.Net.Codecs.Bloom
      */
 
     /// <summary>
-    /// Class used to create index-time {@link FuzzySet} appropriately configured for
+    /// Class used to create index-time <see cref="FuzzySet"/> appropriately configured for
     /// each field. Also called to right-size bitsets for serialization.
-    ///
-    ///  @lucene.experimental
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     public abstract class BloomFilterFactory
     {
-        /// <summary>
-        /// 
-        /// </summary>
-        /// <param name="state">The content to be indexed</param>
-        /// <param name="info">The field requiring a BloomFilter</param>
-        /// <returns>An appropriately sized set or null if no BloomFiltering required</returns>
+
+        /// <param name="state">The content to be indexed.</param>
+        /// <param name="info">The field requiring a BloomFilter.</param>
+        /// <returns>An appropriately sized set or <c>null</c> if no BloomFiltering required.</returns>
         public abstract FuzzySet GetSetForField(SegmentWriteState state, FieldInfo info);
 
         /// <summary>
-        /// Called when downsizing bitsets for serialization
+        /// Called when downsizing bitsets for serialization.
         /// </summary>
-        /// <param name="fieldInfo">The field with sparse set bits</param>
-        /// <param name="initialSet">The bits accumulated</param>
-        /// <returns> null or a hopefully more densely packed, smaller bitset</returns>
+        /// <param name="fieldInfo">The field with sparse set bits.</param>
+        /// <param name="initialSet">The bits accumulated.</param>
+        /// <returns> <c>null</c> or a hopefully more densely packed, smaller bitset.</returns>
         public virtual FuzzySet Downsize(FieldInfo fieldInfo, FuzzySet initialSet)
         {
             // Aim for a bitset size that would have 10% of bits set (so 90% of searches
@@ -50,11 +48,11 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        /// Used to determine if the given filter has reached saturation and should be retired i.e. not saved any more
+        /// Used to determine if the given filter has reached saturation and should be retired i.e. not saved any more.
         /// </summary>
-        /// <param name="bloomFilter">The bloomFilter being tested</param>
-        /// <param name="fieldInfo">The field with which this filter is associated</param>
-        /// <returns>true if the set has reached saturation and should be retired</returns>
+        /// <param name="bloomFilter">The bloomFilter being tested.</param>
+        /// <param name="fieldInfo">The field with which this filter is associated.</param>
+        /// <returns>true if the set has reached saturation and should be retired.</returns>
         public abstract bool IsSaturated(FuzzySet bloomFilter, FieldInfo fieldInfo);
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
index 8ab4952..cf0ca01 100644
--- a/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Bloom/BloomFilteringPostingsFormat.cs
@@ -29,33 +29,33 @@ namespace Lucene.Net.Codecs.Bloom
 
     /// <summary>
     /// 
-    /// A {@link PostingsFormat} useful for low doc-frequency fields such as primary
+    /// A <see cref="PostingsFormat"/> useful for low doc-frequency fields such as primary
     /// keys. Bloom filters are maintained in a ".blm" file which offers "fast-fail"
     /// for reads in segments known to have no record of the key. A choice of
-    /// delegate PostingsFormat is used to record all other Postings data.
-    /// 
-    /// A choice of {@link BloomFilterFactory} can be passed to tailor Bloom Filter
+    /// delegate <see cref="PostingsFormat"/> is used to record all other Postings data.
+    /// <para/>
+    /// A choice of <see cref="BloomFilterFactory"/> can be passed to tailor Bloom Filter
     /// settings on a per-field basis. The default configuration is
-    /// {@link DefaultBloomFilterFactory} which allocates a ~8mb bitset and hashes
-    /// values using {@link MurmurHash2}. This should be suitable for most purposes.
-    ///
+    /// <see cref="DefaultBloomFilterFactory"/> which allocates a ~8mb bitset and hashes
+    /// values using <see cref="MurmurHash2"/>. This should be suitable for most purposes.
+    /// <para/>
     /// The format of the blm file is as follows:
     ///
-    /// <ul>
-    /// <li>BloomFilter (.blm) --&gt; Header, DelegatePostingsFormatName,
-    /// NumFilteredFields, Filter<sup>NumFilteredFields</sup>, Footer</li>
-    /// <li>Filter --&gt; FieldNumber, FuzzySet</li>
-    /// <li>FuzzySet --&gt;See {@link FuzzySet#serialize(DataOutput)}</li>
-    /// <li>Header --&gt; {@link CodecUtil#writeHeader CodecHeader}</li>
-    /// <li>DelegatePostingsFormatName --&gt; {@link DataOutput#writeString(String)
-    /// String} The name of a ServiceProvider registered {@link PostingsFormat}</li>
-    /// <li>NumFilteredFields --&gt; {@link DataOutput#writeInt Uint32}</li>
-    /// <li>FieldNumber --&gt; {@link DataOutput#writeInt Uint32} The number of the
-    /// field in this segment</li>
-    /// <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
-    /// </ul>
-    ///
-    ///  @lucene.experimental
+    /// <list type="bullet">
+    ///     <item><description>BloomFilter (.blm) --&gt; Header, DelegatePostingsFormatName,
+    ///         NumFilteredFields, Filter<sup>NumFilteredFields</sup>, Footer</description></item>
+    ///     <item><description>Filter --&gt; FieldNumber, FuzzySet</description></item>
+    ///     <item><description>FuzzySet --&gt;See <see cref="FuzzySet.Serialize(DataOutput)"/></description></item>
+    ///     <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(DataOutput, string, int)"/>) </description></item>
+    ///     <item><description>DelegatePostingsFormatName --&gt; String (<see cref="DataOutput.WriteString(string)"/>)
+    ///         The name of a ServiceProvider registered <see cref="PostingsFormat"/></description></item>
+    ///     <item><description>NumFilteredFields --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) </description></item>
+    ///     <item><description>FieldNumber --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The number of the
+    ///         field in this segment</description></item>
+    ///     <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
+    /// <para/>
+    /// @lucene.experimental
     /// </summary>
     [PostingsFormatName("BloomFilter")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class BloomFilteringPostingsFormat : PostingsFormat
@@ -66,20 +66,20 @@ namespace Lucene.Net.Codecs.Bloom
         public static readonly int VERSION_CHECKSUM = 2;
         public static readonly int VERSION_CURRENT = VERSION_CHECKSUM;
 
-        /// <summary>Extension of Bloom Filters file</summary>
+        /// <summary>Extension of Bloom Filters file.</summary>
         private const string BLOOM_EXTENSION = "blm";
 
         private readonly BloomFilterFactory _bloomFilterFactory = new DefaultBloomFilterFactory();
         private readonly PostingsFormat _delegatePostingsFormat;
-        
+
         /// <summary>
-        ///  Creates Bloom filters for a selection of fields created in the index. This
+        /// Creates Bloom filters for a selection of fields created in the index. This
         /// is recorded as a set of Bitsets held as a segment summary in an additional
-        /// "blm" file. This PostingsFormat delegates to a choice of delegate
-        /// PostingsFormat for encoding all other postings data.
+        /// "blm" file. This <see cref="PostingsFormat"/> delegates to a choice of delegate
+        /// <see cref="PostingsFormat"/> for encoding all other postings data.
         /// </summary>
-        /// <param name="delegatePostingsFormat">The PostingsFormat that records all the non-bloom filter data i.e. postings info.</param>
-        /// <param name="bloomFilterFactory">The {@link BloomFilterFactory} responsible for sizing BloomFilters appropriately</param>
+        /// <param name="delegatePostingsFormat">The <see cref="PostingsFormat"/> that records all the non-bloom filter data i.e. postings info.</param>
+        /// <param name="bloomFilterFactory">The <see cref="BloomFilterFactory"/> responsible for sizing BloomFilters appropriately.</param>
         public BloomFilteringPostingsFormat(PostingsFormat delegatePostingsFormat,
             BloomFilterFactory bloomFilterFactory) : base()
         {
@@ -90,12 +90,12 @@ namespace Lucene.Net.Codecs.Bloom
         /// <summary>
         /// Creates Bloom filters for a selection of fields created in the index. This
         /// is recorded as a set of Bitsets held as a segment summary in an additional
-        /// "blm" file. This PostingsFormat delegates to a choice of delegate
-        /// PostingsFormat for encoding all other postings data. This choice of
-        /// constructor defaults to the {@link DefaultBloomFilterFactory} for
+        /// "blm" file. This <see cref="PostingsFormat"/> delegates to a choice of delegate
+        /// <see cref="PostingsFormat"/> for encoding all other postings data. This choice of
+        /// constructor defaults to the <see cref="DefaultBloomFilterFactory"/> for
         /// configuring per-field BloomFilters.
         /// </summary>
-        /// <param name="delegatePostingsFormat">The PostingsFormat that records all the non-bloom filter data i.e. postings info.</param>
+        /// <param name="delegatePostingsFormat">The <see cref="PostingsFormat"/> that records all the non-bloom filter data i.e. postings info.</param>
         public BloomFilteringPostingsFormat(PostingsFormat delegatePostingsFormat)
             : this(delegatePostingsFormat, new DefaultBloomFilterFactory())
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
index 0b2e1ad..5d51999 100644
--- a/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
+++ b/src/Lucene.Net.Codecs/Bloom/DefaultBloomFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Codecs.Bloom
 
     /// <summary>
     /// Default policy is to allocate a bitset with 10% saturation given a unique term per document.
-    /// Bits are set via MurmurHash2 hashing function.
-    ///
+    /// Bits are set via <see cref="MurmurHash2"/> hashing function.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class DefaultBloomFilterFactory : BloomFilterFactory

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
index 282c790..36a6f29 100644
--- a/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
+++ b/src/Lucene.Net.Codecs/Bloom/FuzzySet.cs
@@ -26,20 +26,20 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// A class used to represent a set of many, potentially large, values (e.g. many
     /// long strings such as URLs), using a significantly smaller amount of memory.
-    ///
+    /// <para/>
     /// The set is "lossy" in that it cannot definitively state that is does contain
     /// a value but it <em>can</em> definitively say if a value is <em>not</em> in
     /// the set. It can therefore be used as a Bloom Filter.
-    /// 
+    /// <para/>
     /// Another application of the set is that it can be used to perform fuzzy counting because
     /// it can estimate reasonably accurately how many unique values are contained in the set. 
-    ///
+    /// <para/>
     /// This class is NOT threadsafe.
-    ///
+    /// <para/>
     /// Internally a Bitset is used to record values and once a client has finished recording
-    /// a stream of values the {@link #downsize(float)} method can be used to create a suitably smaller set that
+    /// a stream of values the <see cref="Downsize(float)"/> method can be used to create a suitably smaller set that
     /// is sized appropriately for the number of values recorded and desired saturation levels. 
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FuzzySet
@@ -62,7 +62,7 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <remarks>
-        /// Result from {@link FuzzySet#contains(BytesRef)}:
+        /// Result from <see cref="FuzzySet.Contains(BytesRef)"/>:
         /// can never return definitively YES (always MAYBE), 
         /// but can sometimes definitely return NO.
         /// </remarks>
@@ -99,7 +99,7 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        /// Rounds down required maxNumberOfBits to the nearest number that is made up
+        /// Rounds down required <paramref name="maxNumberOfBits"/> to the nearest number that is made up
         /// of all ones as a binary number.  
         /// Use this method where controlling memory use is paramount.
         /// </summary>
@@ -118,8 +118,8 @@ namespace Lucene.Net.Codecs.Bloom
         /// than deciding how much memory to throw at the problem.
         /// </summary>
         /// <param name="maxNumberOfValuesExpected"></param>
-        /// <param name="desiredSaturation">A number between 0 and 1 expressing the % of bits set once all values have been recorded</param>
-        /// <returns>The size of the set nearest to the required size</returns>
+        /// <param name="desiredSaturation">A number between 0 and 1 expressing the % of bits set once all values have been recorded.</param>
+        /// <returns>The size of the set nearest to the required size.</returns>
         public static int GetNearestSetSize(int maxNumberOfValuesExpected,
             float desiredSaturation)
         {
@@ -156,9 +156,10 @@ namespace Lucene.Net.Codecs.Bloom
 
         /// <summary>
         /// The main method required for a Bloom filter which, given a value determines set membership.
-        /// Unlike a conventional set, the fuzzy set returns NO or MAYBE rather than true or false.
+        /// Unlike a conventional set, the fuzzy set returns <see cref="ContainsResult.NO"/> or 
+        /// <see cref="ContainsResult.MAYBE"/> rather than <c>true</c> or <c>false</c>.
         /// </summary>
-        /// <returns>NO or MAYBE</returns>
+        /// <returns><see cref="ContainsResult.NO"/> or <see cref="ContainsResult.MAYBE"/></returns>
         public virtual ContainsResult Contains(BytesRef value)
         {
             var hash = _hashFunction.Hash(value);
@@ -170,23 +171,23 @@ namespace Lucene.Net.Codecs.Bloom
         }
 
         /// <summary>
-        ///  Serializes the data set to file using the following format:
-        ///  <ul>
-        ///   <li>FuzzySet --&gt;FuzzySetVersion,HashFunctionName,BloomSize,
-        ///  NumBitSetWords,BitSetWord<sup>NumBitSetWords</sup></li> 
-        ///  <li>HashFunctionName --&gt; {@link DataOutput#writeString(String) String} The
-        ///  name of a ServiceProvider registered {@link HashFunction}</li>
-        ///  <li>FuzzySetVersion --&gt; {@link DataOutput#writeInt Uint32} The version number of the {@link FuzzySet} class</li>
-        ///  <li>BloomSize --&gt; {@link DataOutput#writeInt Uint32} The modulo value used
-        ///  to project hashes into the field's Bitset</li>
-        ///  <li>NumBitSetWords --&gt; {@link DataOutput#writeInt Uint32} The number of
-        ///  longs (as returned from {@link FixedBitSet#getBits})</li>
-        ///  <li>BitSetWord --&gt; {@link DataOutput#writeLong Long} A long from the array
-        ///  returned by {@link FixedBitSet#getBits}</li>
-        ///  </ul>
-        ///  @param out Data output stream
-        ///  @ If there is a low-level I/O error
+        /// Serializes the data set to file using the following format:
+        /// <list type="bullet">
+        ///     <item><description>FuzzySet --&gt;FuzzySetVersion,HashFunctionName,BloomSize,
+        ///         NumBitSetWords,BitSetWord<sup>NumBitSetWords</sup></description></item> 
+        ///     <item><description>HashFunctionName --&gt; String (<see cref="DataOutput.WriteString(string)"/>) The
+        ///         name of a ServiceProvider registered <see cref="HashFunction"/></description></item>
+        ///     <item><description>FuzzySetVersion --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The version number of the {@link FuzzySet} class</description></item>
+        ///     <item><description>BloomSize --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The modulo value used
+        ///         to project hashes into the field's Bitset</description></item>
+        ///     <item><description>NumBitSetWords --&gt; Uint32 (<see cref="DataOutput.WriteInt32(int)"/>) The number of
+        ///         longs (as returned from <see cref="FixedBitSet.GetBits()"/>)</description></item>
+        ///     <item><description>BitSetWord --&gt; Long (<see cref="DataOutput.WriteInt64(long)"/>) A long from the array
+        ///         returned by <see cref="FixedBitSet.GetBits()"/></description></item>
+        /// </list>
         /// </summary>
+        /// <param name="output">Data output stream.</param>
+        /// <exception cref="System.IO.IOException">If there is a low-level I/O error.</exception>
         public virtual void Serialize(DataOutput output)
         {
             output.WriteInt32(VERSION_CURRENT);
@@ -232,7 +233,8 @@ namespace Lucene.Net.Codecs.Bloom
         /// Records a value in the set. The referenced bytes are hashed and then modulo n'd where n is the
         /// chosen size of the internal bitset.
         /// </summary>
-        /// <param name="value">The Key value to be hashed</param>
+        /// <param name="value">The Key value to be hashed.</param>
+        /// <exception cref="System.IO.IOException">If there is a low-level I/O error.</exception>
         public virtual void AddValue(BytesRef value)
         {
             var hash = _hashFunction.Hash(value);
@@ -249,7 +251,7 @@ namespace Lucene.Net.Codecs.Bloom
         /// A number between 0 and 1 describing the % of bits that would ideally be set in the result. 
         /// Lower values have better accuracy but require more space.
         /// </param>
-        /// <return>A smaller FuzzySet or null if the current set is already over-saturated</return>
+        /// <return>A smaller <see cref="FuzzySet"/> or <c>null</c> if the current set is already over-saturated.</return>
         public virtual FuzzySet Downsize(float targetMaxSaturation)
         {
             var numBitsSet = _filter.Cardinality();
@@ -295,7 +297,9 @@ namespace Lucene.Net.Codecs.Bloom
             return GetEstimatedNumberUniqueValuesAllowingForCollisions(_bloomSize, _filter.Cardinality());
         }
 
-        // Given a set size and a the number of set bits, produces an estimate of the number of unique values recorded
+        /// <summary>
+        /// Given a <paramref name="setSize"/> and a the number of set bits, produces an estimate of the number of unique values recorded.
+        /// </summary>
         public static int GetEstimatedNumberUniqueValuesAllowingForCollisions(
             int setSize, int numRecordedBits)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/HashFunction.cs b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
index 9be4889..d523d70 100644
--- a/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
+++ b/src/Lucene.Net.Codecs/Bloom/HashFunction.cs
@@ -22,18 +22,18 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// Base class for hashing functions that can be referred to by name.
     /// Subclasses are expected to provide threadsafe implementations of the hash function
-    /// on the range of bytes referenced in the provided {@link BytesRef}
-    /// 
+    /// on the range of bytes referenced in the provided <see cref="BytesRef"/>.
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public abstract class HashFunction
     {
 
         /// <summary>
-        /// Hashes the contents of the referenced bytes
-        /// @param bytes the data to be hashed
-        /// @return the hash of the bytes referenced by bytes.offset and length bytes.length
+        /// Hashes the contents of the referenced bytes.
         /// </summary>
+        /// <param name="bytes">The data to be hashed.</param>
+        /// <returns>The hash of the bytes referenced by bytes.offset and length bytes.Length.</returns>
         public abstract int Hash(BytesRef bytes);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
index 83504fc..5e4254e 100644
--- a/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
+++ b/src/Lucene.Net.Codecs/Bloom/MurmurHash2.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Codecs.Bloom
     /// <summary>
     /// This is a very fast, non-cryptographic hash suitable for general hash-based
     /// lookup. See http://murmurhash.googlepages.com/ for more details.
-    ///
+    /// <para/>
     /// The C version of MurmurHash 2.0 found at that site was ported to Java by
     /// Andrzej Bialecki (ab at getopt org).
-    ///
-    ///  The code from getopt.org was adapted by Mark Harwood in the form here as one of a pluggable choice of 
-    /// hashing functions as the core function had to be adapted to work with BytesRefs with offsets and lengths
+    /// <para/>
+    /// The code from getopt.org was adapted by Mark Harwood in the form here as one of a pluggable choice of 
+    /// hashing functions as the core function had to be adapted to work with <see cref="BytesRef"/>s with offsets and lengths
     /// rather than raw byte arrays.  
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public sealed class MurmurHash2 : HashFunction
@@ -89,10 +89,10 @@ namespace Lucene.Net.Codecs.Bloom
         /// <summary>
         /// Generates 32 bit hash from byte array with default seed value.
         /// </summary>
-        /// <param name="data">byte array to hash</param>
-        /// <param name="offset">the start position in the array to hash</param>
-        /// <param name="len">length of the array elements to hash</param>
-        /// <returns>32 bit hash of the given array</returns>
+        /// <param name="data">Byte array to hash.</param>
+        /// <param name="offset">The start position in the array to hash.</param>
+        /// <param name="len">Length of the array elements to hash.</param>
+        /// <returns>32 bit hash of the given array.</returns>
         public static int Hash32(byte[] data, int offset, int len)
         {
             return Hash(data, unchecked((int)0x9747b28c), offset, len);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
index e121942..4ff48ab 100644
--- a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Codecs.DiskDV
     /// <summary>
     /// DocValues format that keeps most things on disk.
     /// Only things like disk offsets are loaded into ram.
-    ///
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     [DocValuesFormatName("Disk")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
index 4ba4e5e..01a1949 100644
--- a/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
+++ b/src/Lucene.Net.Codecs/DiskDV/DiskNormsFormat.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Codecs.DiskDV
      */
 
     /// <summary>
-    /// Norms format that keeps all norms on disk
+    /// Norms format that keeps all norms on disk.
     /// </summary>
     public sealed class DiskNormsFormat : NormsFormat
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
index 0d9ec7e..ce02b7e 100644
--- a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexInput.cs
@@ -21,21 +21,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
+
 
     /// <summary>
     /// Abstract base class that reads fixed-size blocks of ints
-    ///  from an IndexInput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexInput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// from an <see cref="IndexInput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexInput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexInput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was FixedIntBlockIndexInput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead.
+    /// </remarks>
     public abstract class FixedInt32BlockIndexInput : Int32IndexInput
     {
         private readonly IndexInput input;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
index a0d7eb7..bc2a0c7 100644
--- a/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/FixedIntBlockIndexOutput.cs
@@ -21,24 +21,22 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-
-    // Naive int block API that writes vInts.  This is
-    //  expected to give poor performance; it's really only for
-    //  testing the pluggability.  One should typically use pfor instead. 
-
-
-
     /// <summary>
     /// Abstract base class that writes fixed-size blocks of ints
-    ///  to an IndexOutput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexOutput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// to an <see cref="IndexOutput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexOutput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexOutput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was FixedIntBlockIndexOutput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead.
+    /// </remarks>
     public abstract class FixedInt32BlockIndexOutput : Int32IndexOutput
     {
         protected readonly IndexOutput m_output;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
index 3bae6fd..acb7a1f 100644
--- a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexInput.cs
@@ -23,23 +23,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
-
     // TODO: much of this can be shared code w/ the fixed case
 
     /// <summary>
     /// Abstract base class that reads variable-size blocks of ints
-    /// from an IndexInput.  While this is a simple approach, a
+    /// from an <see cref="IndexInput"/>.  While this is a simple approach, a
     /// more performant approach would directly create an impl
-    /// of IntIndexInput inside Directory.  Wrapping a generic
-    /// IndexInput will likely cost performance.
+    /// of <see cref="Int32IndexInput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexInput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was VariableIntBlockIndexInput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead. 
+    /// </remarks>
     public abstract class VariableInt32BlockIndexInput : Int32IndexInput
     {
         private readonly IndexInput input;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
index f26a3bc..0b4eb5f 100644
--- a/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
+++ b/src/Lucene.Net.Codecs/IntBlock/VariableIntBlockIndexOutput.cs
@@ -21,26 +21,24 @@ namespace Lucene.Net.Codecs.IntBlock
      * limitations under the License.
      */
 
-
-    // Naive int block API that writes vInts.  This is
-    // expected to give poor performance; it's really only for
-    // testing the pluggability.  One should typically use pfor instead. 
-
-
-
     // TODO: much of this can be shared code w/ the fixed case
 
     /// <summary>
     /// Abstract base class that writes variable-size blocks of ints
-    ///  to an IndexOutput.  While this is a simple approach, a
-    ///  more performant approach would directly create an impl
-    ///  of IntIndexOutput inside Directory.  Wrapping a generic
-    ///  IndexInput will likely cost performance.
+    /// to an <see cref="IndexOutput"/>.  While this is a simple approach, a
+    /// more performant approach would directly create an impl
+    /// of <see cref="Int32IndexOutput"/> inside <see cref="Directory"/>.  Wrapping a generic
+    /// <see cref="IndexOutput"/> will likely cost performance.
     /// <para/>
     /// NOTE: This was VariableIntBlockIndexOutput in Lucene
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
+    /// <remarks>
+    /// Naive int block API that writes vInts.  This is
+    /// expected to give poor performance; it's really only for
+    /// testing the pluggability.  One should typically use pfor instead. 
+    /// </remarks>
     public abstract class VariableInt32BlockIndexOutput : Int32IndexOutput
     {
         protected readonly IndexOutput m_output;
@@ -52,10 +50,10 @@ namespace Lucene.Net.Codecs.IntBlock
         // if its less than 128 we should set that as max and use byte?
 
         /// <summary>
-        /// NOTE: maxBlockSize must be the maximum block size 
-        ///  plus the max non-causal lookahead of your codec.  EG Simple9
-        ///  requires lookahead=1 because on seeing the Nth value
-        ///  it knows it must now encode the N-1 values before it. 
+        /// NOTE: <paramref name="maxBlockSize"/> must be the maximum block size 
+        /// plus the max non-causal lookahead of your codec.  EG Simple9
+        /// requires lookahead=1 because on seeing the Nth value
+        /// it knows it must now encode the N-1 values before it. 
         /// </summary>
         protected VariableInt32BlockIndexOutput(IndexOutput output, int maxBlockSize)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
index 0b8f258..ca2c405 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesConsumer.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Codecs.Memory
     using SegmentWriteState = Index.SegmentWriteState;
 
     /// <summary>
-    /// Writer for <seealso cref="DirectDocValuesFormat"/>
+    /// Writer for <see cref="DirectDocValuesFormat"/>.
     /// </summary>
     internal class DirectDocValuesConsumer : DocValuesConsumer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
index e5a9312..959906c 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesFormat.cs
@@ -23,27 +23,27 @@
 
     /// <summary>
     /// In-memory docvalues format that does no (or very little)
-    ///  compression.  Indexed values are stored on disk, but
-    ///  then at search time all values are loaded into memory as
-    ///  simple java arrays.  For numeric values, it uses
-    ///  byte[], short[], int[], long[] as necessary to fit the
-    ///  range of the values.  For binary values, there is an int
-    ///  (4 bytes) overhead per value.
+    /// compression.  Indexed values are stored on disk, but
+    /// then at search time all values are loaded into memory as
+    /// simple .NET arrays.  For numeric values, it uses
+    /// byte[], short[], int[], long[] as necessary to fit the
+    /// range of the values.  For binary values, there is an <see cref="int"/>
+    /// (4 bytes) overhead per value.
     /// 
-    ///  <para>Limitations:
-    ///  <ul>
-    ///    <li>For binary and sorted fields the total space
+    /// <para>Limitations:
+    /// <list type="bullet">
+    ///    <item><description>For binary and sorted fields the total space
     ///        required for all binary values cannot exceed about
-    ///        2.1 GB (see #MAX_TOTAL_BYTES_LENGTH).</li>
+    ///        2.1 GB (see <see cref="MAX_TOTAL_BYTES_LENGTH"/>).</description></item>
     /// 
-    ///    <li>For sorted set fields, the sum of the size of each
+    ///    <item><description>For sorted set fields, the sum of the size of each
     ///        document's set of values cannot exceed about 2.1 B
-    ///        values (see #MAX_SORTED_SET_ORDS).  For example,
+    ///        values (see <see cref="MAX_SORTED_SET_ORDS"/>).  For example,
     ///        if every document has 10 values (10 instances of
-    ///        <seealso cref="SortedSetDocValuesField"/>) added, then no
+    ///        <see cref="Documents.SortedSetDocValuesField"/>) added, then no
     ///        more than ~210 M documents can be added to one
-    ///        segment. </li>
-    ///  </ul> 
+    ///        segment. </description></item>
+    /// </list> 
     /// </para>
     /// </summary>
     [DocValuesFormatName("Direct")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name
@@ -51,14 +51,14 @@
     {
         /// <summary>
         /// The sum of all byte lengths for binary field, or for
-        ///  the unique values in sorted or sorted set fields, cannot
-        ///  exceed this. 
+        /// the unique values in sorted or sorted set fields, cannot
+        /// exceed this. 
         /// </summary>
         public static readonly int MAX_TOTAL_BYTES_LENGTH = ArrayUtil.MAX_ARRAY_LENGTH;
 
         /// <summary>
         /// The sum of the number of values across all documents
-        ///  in a sorted set field cannot exceed this. 
+        /// in a sorted set field cannot exceed this. 
         /// </summary>
         public static readonly int MAX_SORTED_SET_ORDS = ArrayUtil.MAX_ARRAY_LENGTH;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
index a216109..fe4c95a 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectDocValuesProducer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Codecs.Memory
 	 */
 
     /// <summary>
-    /// TextReader for <seealso cref="DirectDocValuesFormat"/>
+    /// TextReader for <see cref="DirectDocValuesFormat"/>.
     /// </summary>
     internal class DirectDocValuesProducer : DocValuesProducer
     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
index 95211d2..084c733 100644
--- a/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/DirectPostingsFormat.cs
@@ -51,27 +51,26 @@ namespace Lucene.Net.Codecs.Memory
     //   - or: longer dense skip lists than just next byte?
 
     /// <summary>
-    /// Wraps <seealso cref="Lucene41PostingsFormat"/> format for on-disk
-    ///  storage, but then at read time loads and stores all
-    ///  terms & postings directly in RAM as byte[], int[].
+    /// Wraps <see cref="Lucene41.Lucene41PostingsFormat"/> format for on-disk
+    /// storage, but then at read time loads and stores all
+    /// terms &amp; postings directly in RAM as byte[], int[].
     /// 
-    ///  <para><b>WARNING</b>: This is
-    ///  exceptionally RAM intensive: it makes no effort to
-    ///  compress the postings data, storing terms as separate
-    ///  byte[] and postings as separate int[], but as a result it 
-    ///  gives substantial increase in search performance.
+    /// <para><b>WARNING</b>: This is
+    /// exceptionally RAM intensive: it makes no effort to
+    /// compress the postings data, storing terms as separate
+    /// byte[] and postings as separate int[], but as a result it 
+    /// gives substantial increase in search performance.
     /// 
     /// </para>
-    ///  <para>This postings format supports <seealso cref="TermsEnum#ord"/>
-    ///  and <seealso cref="TermsEnum#seekExact(long)"/>.
+    /// <para>This postings format supports <see cref="TermsEnum.Ord"/>
+    /// and <see cref="TermsEnum.SeekExact(long)"/>.
     /// 
     /// </para>
-    ///  <para>Because this holds all term bytes as a single
-    ///  byte[], you cannot have more than 2.1GB worth of term
-    ///  bytes in a single segment.
-    /// 
-    /// @lucene.experimental 
+    /// <para>Because this holds all term bytes as a single
+    /// byte[], you cannot have more than 2.1GB worth of term
+    /// bytes in a single segment.
     /// </para>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("Direct")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public sealed class DirectPostingsFormat : PostingsFormat
@@ -90,11 +89,11 @@ namespace Lucene.Net.Codecs.Memory
         }
 
         /// <summary>
-        /// minSkipCount is how many terms in a row must have the
-        ///  same prefix before we put a skip pointer down.  Terms
-        ///  with docFreq less than or equal lowFreqCutoff will use a single int[]
-        ///  to hold all docs, freqs, position and offsets; terms
-        ///  with higher docFreq will use separate arrays. 
+        /// <paramref name="minSkipCount"/> is how many terms in a row must have the
+        /// same prefix before we put a skip pointer down.  Terms
+        /// with docFreq less than or equal <paramref name="lowFreqCutoff"/> will use a single int[]
+        /// to hold all docs, freqs, position and offsets; terms
+        /// with higher docFreq will use separate arrays. 
         /// </summary>
         public DirectPostingsFormat(int minSkipCount, int lowFreqCutoff) 
             : base()
@@ -207,7 +206,7 @@ namespace Lucene.Net.Codecs.Memory
                 private int[] skips;
 
                 /// <summary>
-                /// Returns the approximate number of RAM bytes used </summary>
+                /// Returns the approximate number of RAM bytes used. </summary>
                 public abstract long RamBytesUsed();
             }
 
@@ -362,7 +361,7 @@ namespace Lucene.Net.Codecs.Memory
             private readonly int minSkipCount;
 
             /// <summary>
-            /// NOTE: This was IntArrayWriter in Lucene
+            /// NOTE: This was IntArrayWriter in Lucene.
             /// </summary>
             private sealed class Int32ArrayWriter
             {
@@ -647,7 +646,7 @@ namespace Lucene.Net.Codecs.Memory
                 Debug.Assert(skipOffset == skipCount);
             }
 
-            /// <summary>Returns approximate RAM bytes used </summary>
+            /// <summary>Returns approximate RAM bytes used. </summary>
             public long RamBytesUsed()
             {
                 long sizeInBytes = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
index 9cc8776..01826bb 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdPulsing41PostingsFormat.cs
@@ -26,7 +26,8 @@
 
     /// <summary>
     /// FSTOrd + Pulsing41
-    ///  @lucene.experimental 
+    /// <para/>
+    /// @lucene.experimental 
     /// </summary>
     [PostingsFormatName("FSTOrdPulsing41")] // LUCENENET specific - using PostingsFormatName attribute to ensure the default name passed from subclasses is the same as this class name
     public class FSTOrdPulsing41PostingsFormat : PostingsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
index 9d7e778..84ad53c 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsReader.cs
@@ -30,11 +30,11 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based terms dictionary reader.
-    /// 
+    /// <para/>
     /// The FST index maps each term and its ord, and during seek 
     /// the ord is used fetch metadata from a single block.
     /// The term dictionary is fully memory resident.
-    /// 
+    /// <para/>
     /// @lucene.experimental
     /// </summary>
     public class FSTOrdTermsReader : FieldsProducer
@@ -377,7 +377,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Decodes stats data into term state </summary>
+                /// Decodes stats data into term state. </summary>
                 internal virtual void DecodeStats()
                 {
                     int upto = (int)ord % INTERVAL;
@@ -392,7 +392,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Let PBF decode metadata </summary>
+                /// Let PBF decode metadata. </summary>
                 internal virtual void DecodeMetaData()
                 {
                     int upto = (int)ord % INTERVAL;
@@ -407,7 +407,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load current stats shard </summary>
+                /// Load current stats shard. </summary>
                 internal void RefillStats()
                 {
                     var offset = statsBlockOrd * outerInstance.numSkipInfo;
@@ -436,7 +436,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load current metadata shard </summary>
+                /// Load current metadata shard. </summary>
                 internal void RefillMetadata()
                 {
                     var offset = metaBlockOrd * outerInstance.numSkipInfo;
@@ -604,10 +604,10 @@ namespace Lucene.Net.Codecs.Memory
             {
                 private readonly FSTOrdTermsReader.TermsReader outerInstance;
 
-                /// <summary>True when current term's metadata is decoded</summary>
+                /// <summary>True when current term's metadata is decoded.</summary>
                 private bool decoded;
 
-                /// <summary>True when there is pending term when calling Next()</summary>
+                /// <summary>True when there is pending term when calling <see cref="Next()"/>.</summary>
                 private bool pending;
 
                 /// <summary>
@@ -624,7 +624,7 @@ namespace Lucene.Net.Codecs.Memory
                 private readonly FST.BytesReader fstReader;
                 private readonly Outputs<long?> fstOutputs;
 
-                /// <summary>query automaton to intersect with</summary>
+                /// <summary>Query automaton to intersect with.</summary>
                 private readonly ByteRunAutomaton fsa;
 
                 private sealed class Frame
@@ -792,7 +792,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Virtual frame, never pop </summary>
+                /// Virtual frame, never pop. </summary>
                 private Frame LoadVirtualFrame(Frame frame)
                 {
                     frame.arc.Output = fstOutputs.NoOutput;
@@ -802,7 +802,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for start arc(node) on fst </summary>
+                /// Load frame for start arc(node) on fst. </summary>
                 private Frame LoadFirstFrame(Frame frame)
                 {
                     frame.arc = fst.GetFirstArc(frame.arc);
@@ -811,7 +811,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for target arc(node) on fst </summary>
+                /// Load frame for target arc(node) on fst. </summary>
                 private Frame LoadExpandFrame(Frame top, Frame frame)
                 {
                     if (!CanGrow(top))
@@ -829,7 +829,7 @@ namespace Lucene.Net.Codecs.Memory
                 }
 
                 /// <summary>
-                /// Load frame for sibling arc(node) on fst </summary>
+                /// Load frame for sibling arc(node) on fst. </summary>
                 private Frame LoadNextFrame(Frame top, Frame frame)
                 {
                     if (!CanRewind(frame))
@@ -855,7 +855,7 @@ namespace Lucene.Net.Codecs.Memory
 
                 /// <summary>
                 /// Load frame for target arc(node) on fst, so that 
-                ///  arc.label >= label and !fsa.reject(arc.label) 
+                /// arc.label >= label and !fsa.reject(arc.label) 
                 /// </summary>
                 private Frame LoadCeilFrame(int label, Frame top, Frame frame)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/666de32b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
index ecd90db..888cb12 100644
--- a/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
+++ b/src/Lucene.Net.Codecs/Memory/FSTOrdTermsWriter.cs
@@ -38,22 +38,24 @@ namespace Lucene.Net.Codecs.Memory
 
     /// <summary>
     /// FST-based term dict, using ord as FST output.
-    /// 
+    /// <para/>
     /// The FST holds the mapping between &lt;term, ord&gt;, and 
     /// term's metadata is delta encoded into a single byte block.
-    /// 
+    /// <para/>
     /// Typically the byte block consists of four parts:
-    /// 1. term statistics: docFreq, totalTermFreq;
-    /// 2. monotonic long[], e.g. the pointer to the postings list for that term;
-    /// 3. generic byte[], e.g. other information customized by postings base.
-    /// 4. single-level skip list to speed up metadata decoding by ord.
-    /// 
+    /// <list type="number">
+    ///     <item><description>term statistics: docFreq, totalTermFreq;</description></item>
+    ///     <item><description>monotonic long[], e.g. the pointer to the postings list for that term;</description></item>
+    ///     <item><description>generic byte[], e.g. other information customized by postings base.</description></item>
+    ///     <item><description>single-level skip list to speed up metadata decoding by ord.</description></item>
+    /// </list>
+    /// <para/>
     /// <para>
     /// Files:
-    /// <ul>
-    ///  <li><tt>.tix</tt>: <a href="#Termindex">Term Index</a></li>
-    ///  <li><tt>.tbk</tt>: <a href="#Termblock">Term Block</a></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///  <item><description><c>.tix</c>: <a href="#Termindex">Term Index</a></description></item>
+    ///  <item><description><c>.tbk</c>: <a href="#Termblock">Term Block</a></description></item>
+    /// </list>
     /// </para>
     /// 
     /// <a name="Termindex" id="Termindex"></a>
@@ -63,76 +65,76 @@ namespace Lucene.Net.Codecs.Memory
     ///  The FST maps a term to its corresponding order in current field.
     /// </para>
     /// 
-    /// <ul>
-    ///  <li>TermIndex(.tix) --&gt; Header, TermFST<sup>NumFields</sup>, Footer</li>
-    ///  <li>TermFST --&gt; <seealso cref="FST"/></li>
-    ///  <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///  <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
+    /// <list type="bullet">
+    ///  <item><description>TermIndex(.tix) --&gt; Header, TermFST<sup>NumFields</sup>, Footer</description></item>
+    ///  <item><description>TermFST --&gt; <see cref="FST{T}"/></description></item>
+    ///  <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///  <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
     /// 
     /// <para>Notes:</para>
-    /// <ul>
-    ///  <li>
+    /// <list type="bullet">
+    ///  <item><description>
     ///  Since terms are already sorted before writing to <a href="#Termblock">Term Block</a>, 
     ///  their ords can directly used to seek term metadata from term block.
-    ///  </li>
-    /// </ul>
+    ///  </description></item>
+    /// </list>
     /// 
     /// <a name="Termblock" id="Termblock"></a>
     /// <h3>Term Block</h3>
     /// <para>
-    ///  The .tbk contains all the statistics and metadata for terms, along with field summary (e.g. 
-    ///  per-field data like number of documents in current field). For each field, there are four blocks:
-    ///  <ul>
-    ///   <li>statistics bytes block: contains term statistics; </li>
-    ///   <li>metadata longs block: delta-encodes monotonic part of metadata; </li>
-    ///   <li>metadata bytes block: encodes other parts of metadata; </li>
-    ///   <li>skip block: contains skip data, to speed up metadata seeking and decoding</li>
-    ///  </ul>
+    /// The .tbk contains all the statistics and metadata for terms, along with field summary (e.g. 
+    /// per-field data like number of documents in current field). For each field, there are four blocks:
+    /// <list type="bullet">
+    ///   <item><description>statistics bytes block: contains term statistics; </description></item>
+    ///   <item><description>metadata longs block: delta-encodes monotonic part of metadata; </description></item>
+    ///   <item><description>metadata bytes block: encodes other parts of metadata; </description></item>
+    ///   <item><description>skip block: contains skip data, to speed up metadata seeking and decoding</description></item>
+    /// </list>
     /// </para>
     /// 
     /// <para>File Format:</para>
-    /// <ul>
-    ///  <li>TermBlock(.tbk) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</li>
-    ///  <li>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, SumDocFreq,
-    ///                                         DocCount, LongsSize, DataBlock &gt; <sup>NumFields</sup>, Footer</li>
+    /// <list type="bullet">
+    ///  <item><description>TermBlock(.tbk) --&gt; Header, <i>PostingsHeader</i>, FieldSummary, DirOffset</description></item>
+    ///  <item><description>FieldSummary --&gt; NumFields, &lt;FieldNumber, NumTerms, SumTotalTermFreq?, SumDocFreq,
+    ///                                         DocCount, LongsSize, DataBlock &gt; <sup>NumFields</sup>, Footer</description></item>
     /// 
-    ///  <li>DataBlock --&gt; StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength, 
-    ///                       SkipBlock, StatsBlock, MetaLongsBlock, MetaBytesBlock </li>
-    ///  <li>SkipBlock --&gt; &lt; StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, 
-    ///                            MetaLongsSkipDelta<sup>LongsSize</sup> &gt;<sup>NumTerms</sup></li>
-    ///  <li>StatsBlock --&gt; &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) ? &gt; <sup>NumTerms</sup></li>
-    ///  <li>MetaLongsBlock --&gt; &lt; LongDelta<sup>LongsSize</sup>, BytesSize &gt; <sup>NumTerms</sup></li>
-    ///  <li>MetaBytesBlock --&gt; Byte <sup>MetaBytesBlockLength</sup></li>
-    ///  <li>Header --&gt; <seealso cref="CodecUtil#writeHeader CodecHeader"/></li>
-    ///  <li>DirOffset --&gt; <seealso cref="DataOutput#writeLong Uint64"/></li>
-    ///  <li>NumFields, FieldNumber, DocCount, DocFreq, LongsSize, 
-    ///        FieldNumber, DocCount --&gt; <seealso cref="DataOutput#writeVInt VInt"/></li>
-    ///  <li>NumTerms, SumTotalTermFreq, SumDocFreq, StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength,
+    ///  <item><description>DataBlock --&gt; StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength, 
+    ///                       SkipBlock, StatsBlock, MetaLongsBlock, MetaBytesBlock </description></item>
+    ///  <item><description>SkipBlock --&gt; &lt; StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, 
+    ///                            MetaLongsSkipDelta<sup>LongsSize</sup> &gt;<sup>NumTerms</sup></description></item>
+    ///  <item><description>StatsBlock --&gt; &lt; DocFreq[Same?], (TotalTermFreq-DocFreq) ? &gt; <sup>NumTerms</sup></description></item>
+    ///  <item><description>MetaLongsBlock --&gt; &lt; LongDelta<sup>LongsSize</sup>, BytesSize &gt; <sup>NumTerms</sup></description></item>
+    ///  <item><description>MetaBytesBlock --&gt; Byte <sup>MetaBytesBlockLength</sup></description></item>
+    ///  <item><description>Header --&gt; CodecHeader (<see cref="CodecUtil.WriteHeader(Store.DataOutput, string, int)"/>) </description></item>
+    ///  <item><description>DirOffset --&gt; Uint64 (<see cref="Store.DataOutput.WriteInt64(long)"/>) </description></item>
+    ///  <item><description>NumFields, FieldNumber, DocCount, DocFreq, LongsSize, 
+    ///        FieldNumber, DocCount --&gt; VInt (<see cref="Store.DataOutput.WriteVInt32(int)"/>) </description></item>
+    ///  <item><description>NumTerms, SumTotalTermFreq, SumDocFreq, StatsBlockLength, MetaLongsBlockLength, MetaBytesBlockLength,
     ///        StatsFPDelta, MetaLongsSkipFPDelta, MetaBytesSkipFPDelta, MetaLongsSkipStart, TotalTermFreq, 
-    ///        LongDelta,--&gt; <seealso cref="DataOutput#writeVLong VLong"/></li>
-    ///  <li>Footer --&gt; <seealso cref="CodecUtil#writeFooter CodecFooter"/></li>
-    /// </ul>
+    ///        LongDelta,--&gt; VLong (<see cref="Store.DataOutput.WriteVInt64(long)"/>) </description></item>
+    ///  <item><description>Footer --&gt; CodecFooter (<see cref="CodecUtil.WriteFooter(IndexOutput)"/>) </description></item>
+    /// </list>
     /// <para>Notes: </para>
-    /// <ul>
-    ///  <li>
+    /// <list type="bullet">
+    ///  <item><description>
     ///   The format of PostingsHeader and MetaBytes are customized by the specific postings implementation:
     ///   they contain arbitrary per-file data (such as parameters or versioning information), and per-term data 
     ///   (non-monotonic ones like pulsed postings data).
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   During initialization the reader will load all the blocks into memory. SkipBlock will be decoded, so that during seek
     ///   term dict can lookup file pointers directly. StatsFPDelta, MetaLongsSkipFPDelta, etc. are file offset
     ///   for every SkipInterval's term. MetaLongsSkipDelta is the difference from previous one, which indicates
     ///   the value of preceding metadata longs for every SkipInterval's term.
-    ///  </li>
-    ///  <li>
+    ///  </description></item>
+    ///  <item><description>
     ///   DocFreq is the count of documents which contain the term. TotalTermFreq is the total number of occurrences of the term. 
     ///   Usually these two values are the same for long tail terms, therefore one bit is stole from DocFreq to check this case,
     ///   so that encoding of TotalTermFreq may be omitted.
-    ///  </li>
-    /// </ul>
-    /// 
+    ///  </description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.experimental 
     /// </summary>
     public class FSTOrdTermsWriter : FieldsConsumer
@@ -253,7 +255,7 @@ namespace Lucene.Net.Codecs.Memory
             public long SumDocFreq { get; set; }
             public int DocCount { get; set; }
             /// <summary>
-            /// NOTE: This was longsSize (field) in Lucene
+            /// NOTE: This was longsSize (field) in Lucene.
             /// </summary>
             public int Int64sSize { get; set; }
             public FST<long?> Dict { get; set; }
@@ -266,7 +268,7 @@ namespace Lucene.Net.Codecs.Memory
             public RAMOutputStream StatsOut { get; set; }
             // vint encode monotonic long[] and length for corresponding byte[]
             /// <summary>
-            /// NOTE: This was metaLongsOut (field) in Lucene
+            /// NOTE: This was metaLongsOut (field) in Lucene.
             /// </summary>
             public RAMOutputStream MetaInt64sOut { get; set; }
             // generic byte[]