You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/06 00:11:47 UTC

[14/48] lucenenet git commit: Lucene.Net.Analysis.Common: Fixed XML documenation warnings

Lucene.Net.Analysis.Common: Fixed XML documenation warnings


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/93eef424
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/93eef424
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/93eef424

Branch: refs/heads/master
Commit: 93eef424080dd74533632459d41fe846ca841a47
Parents: d7cb70c
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Sun Jun 4 03:18:09 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Sun Jun 4 03:22:58 2017 +0700

----------------------------------------------------------------------
 src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs   | 4 ++--
 .../Analysis/Bg/BulgarianAnalyzer.cs                           | 4 ++--
 .../Analysis/Br/BrazilianAnalyzer.cs                           | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs   | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs    | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs    | 4 ++--
 .../Analysis/Hu/HungarianAnalyzer.cs                           | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs | 4 ++--
 .../Analysis/Id/IndonesianAnalyzer.cs                          | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs  | 4 ++--
 .../Analysis/NGram/NGramTokenizer.cs                           | 6 +++---
 .../Analysis/No/NorwegianAnalyzer.cs                           | 4 ++--
 .../Analysis/No/NorwegianLightStemFilter.cs                    | 2 +-
 .../Analysis/No/NorwegianMinimalStemFilter.cs                  | 6 +++---
 .../Analysis/Pt/PortugueseAnalyzer.cs                          | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs  | 4 ++--
 src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs  | 4 ++--
 .../Analysis/Util/BufferedCharFilter.cs                        | 2 +-
 36 files changed, 72 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
index 095d92f..6076d11 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ar/ArabicAnalyzer.cs
@@ -127,10 +127,10 @@ namespace Lucene.Net.Analysis.Ar
         }
 
         /// <summary>
-        /// Creates <see cref="Analyzer.TokenStreamComponents"/>
+        /// Creates <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="ArabicNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
index cac88af..adeb9cc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Bg/BulgarianAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Bg
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>, 
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
index 1b5edf7..4252701 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Br/BrazilianAnalyzer.cs
@@ -120,10 +120,10 @@ namespace Lucene.Net.Analysis.Br
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="StandardFilter"/>, <see cref="StopFilter"/>,
         ///         and <see cref="BrazilianStemFilter"/>. </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
index ba84523..5266f30 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ca/CatalanAnalyzer.cs
@@ -119,11 +119,11 @@ namespace Lucene.Net.Analysis.Ca
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, 
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
index 64648e9..647645d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs
@@ -116,7 +116,7 @@ namespace Lucene.Net.Analysis.Cjk
         private bool exhausted;
 
         /// <summary>
-        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int)">
+        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, CJKScript)">
         ///       CJKBigramFilter(@in, CJKScript.HAN | CJKScript.HIRAGANA | CJKScript.KATAKANA | CJKScript.HANGUL)</see>
         /// </summary>
         /// <param name="in">
@@ -127,7 +127,7 @@ namespace Lucene.Net.Analysis.Cjk
         }
 
         /// <summary>
-        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, int, bool)">
+        /// Calls <see cref="CJKBigramFilter.CJKBigramFilter(TokenStream, CJKScript, bool)">
         ///       CJKBigramFilter(in, flags, false)</see>
         /// </summary>
         /// <param name="in">

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
index e99d70f..2e177a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ckb/SoraniAnalyzer.cs
@@ -109,11 +109,11 @@ namespace Lucene.Net.Analysis.Ckb
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="SoraniNormalizationFilter"/>, 
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
index de0b5e7..6105ec8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/ChineseAnalyzer.cs
@@ -31,10 +31,10 @@ namespace Lucene.Net.Analysis.Cn
     {
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="ChineseTokenizer"/> filtered with
         ///         <see cref="ChineseFilter"/> </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
index 0a4d34c..991a12f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/StopAnalyzer.cs
@@ -95,10 +95,10 @@ namespace Lucene.Net.Analysis.Core
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="LowerCaseTokenizer"/> filtered with
         ///         <see cref="StopFilter"/> </returns>
         protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
index cffbe49..7138e72 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Cz/CzechAnalyzer.cs
@@ -124,10 +124,10 @@ namespace Lucene.Net.Analysis.Cz
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         and <see cref="CzechStemFilter"/> (only if version is >= LUCENE_31). If

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
index 453e9c6..af436da 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Da/DanishAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Da
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 1a6a350..7af943b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -165,10 +165,10 @@ namespace Lucene.Net.Analysis.De
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index 061ed9e..d496322 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -105,10 +105,10 @@ namespace Lucene.Net.Analysis.El
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="GreekLowerCaseFilter"/>, <see cref="StandardFilter"/>,
         ///         <see cref="StopFilter"/>, and <see cref="GreekStemFilter"/> </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
index 3c6e0ff..4c4d16c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
@@ -87,11 +87,11 @@ namespace Lucene.Net.Analysis.En
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="EnglishPossessiveFilter"/>, 
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index b537856..655c653 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -119,11 +119,11 @@ namespace Lucene.Net.Analysis.Es
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index 9e4a7e9..099ffdd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -106,11 +106,11 @@ namespace Lucene.Net.Analysis.Eu
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
index 7c4fe02..df88f5a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
@@ -108,10 +108,10 @@ namespace Lucene.Net.Analysis.Fa
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="ArabicNormalizationFilter"/>,
         ///         <see cref="PersianNormalizationFilter"/> and Persian Stop words </returns>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
index 18f7df4..89d67ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.Fi
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index 1d117a8..495d081 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -176,10 +176,10 @@ namespace Lucene.Net.Analysis.Fr
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>,
         ///         <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
index 1d5d0ce..b88988a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ga/IrishAnalyzer.cs
@@ -124,11 +124,11 @@ namespace Lucene.Net.Analysis.Ga
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="IrishLowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
index 7130348..fb06c84 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Gl/GalicianAnalyzer.cs
@@ -109,11 +109,11 @@ namespace Lucene.Net.Analysis.Gl
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
index 28198f2..2e465d6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hi/HindiAnalyzer.cs
@@ -115,10 +115,10 @@ namespace Lucene.Net.Analysis.Hi
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="LowerCaseFilter"/>, <see cref="IndicNormalizationFilter"/>,
         ///         <see cref="HindiNormalizationFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
index 6e7fe57..1ae25bf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hu/HungarianAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Hu
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
index 6611208..7242b60 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Hy/ArmenianAnalyzer.cs
@@ -107,11 +107,11 @@ namespace Lucene.Net.Analysis.Hy
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
index eb74551..22e19ea 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Id/IndonesianAnalyzer.cs
@@ -109,10 +109,10 @@ namespace Lucene.Net.Analysis.Id
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
index d428e63..058e560 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/It/ItalianAnalyzer.cs
@@ -128,11 +128,11 @@ namespace Lucene.Net.Analysis.It
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
index fb643f8..2e60a58 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Lv/LatvianAnalyzer.cs
@@ -110,11 +110,11 @@ namespace Lucene.Net.Analysis.Lv
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
index bd62835..83ad83a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/NGram/NGramTokenizer.cs
@@ -86,11 +86,11 @@ namespace Lucene.Net.Analysis.NGram
     /// <para>This tokenizer changed a lot in Lucene 4.4 in order to:
     /// <list type="bullet">
     ///     <item><description>tokenize in a streaming fashion to support streams which are larger
-    ///         than 1024 chars (limit of the previous version),</item>
+    ///         than 1024 chars (limit of the previous version),</description></item>
     ///     <item><description>count grams based on unicode code points instead of java chars (and
-    ///         never split in the middle of surrogate pairs),</item>
+    ///         never split in the middle of surrogate pairs),</description></item>
     ///     <item><description>give the ability to pre-tokenize the stream (<see cref="IsTokenChar(int)"/>)
-    ///         before computing n-grams.</item>
+    ///         before computing n-grams.</description></item>
     /// </list>
     /// </para>
     /// <para>Additionally, this class doesn't trim trailing whitespaces and emits

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
index 206e45d..d22eec3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
@@ -111,11 +111,11 @@ namespace Lucene.Net.Analysis.No
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
index ca36da0..5df5074 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.No
         private readonly IKeywordAttribute keywordAttr;
 
         /// <summary>
-        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, int)"/>
+        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, NorwegianStandard)"/>
         /// - NorwegianLightStemFilter(input, BOKMAAL)
         /// </summary>
         /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
index 996a8a9..7e17ed9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.No
         private readonly IKeywordAttribute keywordAttr;
 
         /// <summary>
-        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, int)"/> -
+        /// Calls <see cref="NorwegianLightStemFilter.NorwegianLightStemFilter(TokenStream, NorwegianStandard)"/> -
         /// NorwegianMinimalStemFilter(input, BOKMAAL)
         /// </summary>
         public NorwegianMinimalStemFilter(TokenStream input)
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
         /// <summary>
         /// Creates a new <see cref="NorwegianLightStemFilter"/> </summary>
         /// <param name="input"> the source <see cref="TokenStream"/> to filter </param>
-        /// <param name="flags"> set to <see cref="NorwegianLightStemmer.BOKMAAL"/>, 
-        ///                     <see cref="NorwegianLightStemmer.NYNORSK"/>, or both. </param>
+        /// <param name="flags"> set to <see cref="NorwegianStandard.BOKMAAL"/>, 
+        ///                     <see cref="NorwegianStandard.NYNORSK"/>, or both. </param>
         public NorwegianMinimalStemFilter(TokenStream input, NorwegianStandard flags)
             : base(input)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
index 5f09576..410c58f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Pt/PortugueseAnalyzer.cs
@@ -116,11 +116,11 @@ namespace Lucene.Net.Analysis.Pt
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
         ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
index 83f9b53..5212e7f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ro/RomanianAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Ro
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
index bb086a7..139a710 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ru/RussianAnalyzer.cs
@@ -142,10 +142,10 @@ namespace Lucene.Net.Analysis.Ru
 
         /// <summary>
         /// Creates
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// used to tokenize all the text in the provided <see cref="TextReader"/>.
         /// </summary>
-        /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+        /// <returns> <see cref="TokenStreamComponents"/>
         ///         built from a <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
         ///         , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
index a54ec89..2ca5af7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sv/SwedishAnalyzer.cs
@@ -112,11 +112,11 @@ namespace Lucene.Net.Analysis.Sv
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
         ///         <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
index c7212a0..9d550a8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Tr/TurkishAnalyzer.cs
@@ -113,11 +113,11 @@ namespace Lucene.Net.Analysis.Tr
 
         /// <summary>
         /// Creates a
-        /// <see cref="Analyzer.TokenStreamComponents"/>
+        /// <see cref="TokenStreamComponents"/>
         /// which tokenizes all the text in the provided <see cref="TextReader"/>.
         /// </summary>
         /// <returns> A
-        ///         <see cref="Analyzer.TokenStreamComponents"/>
+        ///         <see cref="TokenStreamComponents"/>
         ///         built from an <see cref="StandardTokenizer"/> filtered with
         ///         <see cref="StandardFilter"/>, <see cref="TurkishLowerCaseFilter"/>,
         ///         <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/93eef424/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
index b26993a..c9c4426 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/BufferedCharFilter.cs
@@ -489,7 +489,7 @@ namespace Lucene.Net.Analysis.Util
         /// Indicates whether this reader is ready to be read without blocking.
         /// </summary>
         /// <returns>
-        /// <c>true</c> if this reader will not block when <see cref="Read"/> is
+        /// <c>true</c> if this reader will not block when <see cref="Read()"/> is
         /// called, <c>false</c> if unknown or blocking will occur.
         /// </returns>
         public override bool IsReady