You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:55 UTC
[08/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
Lucene.Net.Analysis.Common: find and replace for document comments - <pre class="prettyprint"> > <code>, </pre> > </code>, <seealso cref=" > <see cref=", org.apache.lucene.analysis.Analyzer.TokenStreamComponents > Analyzer.TokenStreamComponents, <see cref="Version"/> > <see cref="LuceneVersion"/>
Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/31d8cbde
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/31d8cbde
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/31d8cbde
Branch: refs/heads/api-work
Commit: 31d8cbde992061bdd70a62349f2c7fcfac7733e5
Parents: 829f8ee
Author: Shad Storhaug <sh...@shadstorhaug.com>
Authored: Fri Feb 3 10:38:45 2017 +0700
Committer: Shad Storhaug <sh...@shadstorhaug.com>
Committed: Fri Feb 3 10:38:45 2017 +0700
----------------------------------------------------------------------
.../Analysis/De/GermanAnalyzer.cs | 26 ++++-----
.../Analysis/De/GermanLightStemFilter.cs | 6 +-
.../Analysis/De/GermanLightStemFilterFactory.cs | 6 +-
.../Analysis/De/GermanMinimalStemFilter.cs | 6 +-
.../De/GermanMinimalStemFilterFactory.cs | 6 +-
.../De/GermanNormalizationFilterFactory.cs | 6 +-
.../Analysis/De/GermanStemFilter.cs | 14 ++---
.../Analysis/De/GermanStemFilterFactory.cs | 6 +-
.../Analysis/El/GreekAnalyzer.cs | 22 ++++----
.../Analysis/El/GreekLowerCaseFilter.cs | 2 +-
.../Analysis/El/GreekLowerCaseFilterFactory.cs | 6 +-
.../Analysis/El/GreekStemFilter.cs | 8 +--
.../Analysis/El/GreekStemFilterFactory.cs | 6 +-
.../Analysis/El/GreekStemmer.cs | 4 +-
.../Analysis/En/EnglishAnalyzer.cs | 22 ++++----
.../Analysis/En/EnglishMinimalStemFilter.cs | 6 +-
.../En/EnglishMinimalStemFilterFactory.cs | 6 +-
.../Analysis/En/EnglishPossessiveFilter.cs | 6 +-
.../En/EnglishPossessiveFilterFactory.cs | 6 +-
.../Analysis/En/KStemFilter.cs | 8 +--
.../Analysis/En/KStemFilterFactory.cs | 6 +-
.../Analysis/En/PorterStemFilter.cs | 12 ++--
.../Analysis/En/PorterStemFilterFactory.cs | 6 +-
.../Analysis/Es/SpanishAnalyzer.cs | 22 ++++----
.../Analysis/Es/SpanishLightStemFilter.cs | 6 +-
.../Es/SpanishLightStemFilterFactory.cs | 6 +-
.../Analysis/Eu/BasqueAnalyzer.cs | 20 +++----
.../Analysis/Fa/PersianAnalyzer.cs | 20 +++----
.../Analysis/Fa/PersianCharFilterFactory.cs | 6 +-
.../Analysis/Fa/PersianNormalizationFilter.cs | 2 +-
.../Fa/PersianNormalizationFilterFactory.cs | 6 +-
.../Analysis/Fi/FinnishAnalyzer.cs | 20 +++----
.../Analysis/Fi/FinnishLightStemFilter.cs | 6 +-
.../Fi/FinnishLightStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchAnalyzer.cs | 26 ++++-----
.../Analysis/Fr/FrenchLightStemFilter.cs | 6 +-
.../Analysis/Fr/FrenchLightStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchMinimalStemFilter.cs | 6 +-
.../Fr/FrenchMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Fr/FrenchStemFilter.cs | 14 ++---
.../Analysis/Fr/FrenchStemmer.cs | 2 +-
.../Analysis/Ga/IrishAnalyzer.cs | 20 +++----
.../Analysis/Ga/IrishLowerCaseFilterFactory.cs | 6 +-
.../Analysis/Gl/GalicianAnalyzer.cs | 20 +++----
.../Analysis/Gl/GalicianMinimalStemFilter.cs | 6 +-
.../Gl/GalicianMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Gl/GalicianStemFilter.cs | 6 +-
.../Analysis/Gl/GalicianStemFilterFactory.cs | 6 +-
.../Analysis/Hi/HindiAnalyzer.cs | 18 +++---
.../Analysis/Hi/HindiNormalizationFilter.cs | 6 +-
.../Hi/HindiNormalizationFilterFactory.cs | 6 +-
.../Analysis/Hi/HindiStemFilter.cs | 2 +-
.../Analysis/Hi/HindiStemFilterFactory.cs | 6 +-
.../Analysis/Hu/HungarianAnalyzer.cs | 20 +++----
.../Analysis/Hu/HungarianLightStemFilter.cs | 6 +-
.../Hu/HungarianLightStemFilterFactory.cs | 6 +-
.../Analysis/Hunspell/Dictionary.cs | 10 ++--
.../Analysis/Hunspell/HunspellStemFilter.cs | 12 ++--
.../Hunspell/HunspellStemFilterFactory.cs | 6 +-
.../Analysis/Hy/ArmenianAnalyzer.cs | 20 +++----
.../Analysis/Id/IndonesianAnalyzer.cs | 20 +++----
.../Analysis/Id/IndonesianStemFilter.cs | 4 +-
.../Analysis/Id/IndonesianStemFilterFactory.cs | 6 +-
.../Analysis/In/IndicNormalizationFilter.cs | 2 +-
.../In/IndicNormalizationFilterFactory.cs | 6 +-
.../Analysis/In/IndicTokenizer.cs | 2 +-
.../Analysis/It/ItalianAnalyzer.cs | 22 ++++----
.../Analysis/It/ItalianLightStemFilter.cs | 6 +-
.../It/ItalianLightStemFilterFactory.cs | 6 +-
.../Analysis/Lv/LatvianAnalyzer.cs | 20 +++----
.../Analysis/Lv/LatvianStemFilter.cs | 6 +-
.../Analysis/Lv/LatvianStemFilterFactory.cs | 6 +-
.../Miscellaneous/ASCIIFoldingFilter.cs | 2 +-
.../Miscellaneous/ASCIIFoldingFilterFactory.cs | 6 +-
.../CapitalizationFilterFactory.cs | 6 +-
.../Miscellaneous/CodepointCountFilter.cs | 8 +--
.../CodepointCountFilterFactory.cs | 6 +-
.../Miscellaneous/HyphenatedWordsFilter.cs | 4 +-
.../HyphenatedWordsFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/KeepWordFilter.cs | 4 +-
.../Miscellaneous/KeepWordFilterFactory.cs | 6 +-
.../Miscellaneous/KeywordMarkerFilter.cs | 4 +-
.../Miscellaneous/KeywordMarkerFilterFactory.cs | 6 +-
.../Miscellaneous/KeywordRepeatFilter.cs | 4 +-
.../Miscellaneous/KeywordRepeatFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/LengthFilter.cs | 8 +--
.../Miscellaneous/LengthFilterFactory.cs | 6 +-
.../Miscellaneous/LimitTokenCountAnalyzer.cs | 2 +-
.../Miscellaneous/LimitTokenCountFilter.cs | 4 +-
.../LimitTokenCountFilterFactory.cs | 8 +--
.../Miscellaneous/LimitTokenPositionFilter.cs | 2 +-
.../LimitTokenPositionFilterFactory.cs | 8 +--
.../Lucene47WordDelimiterFilter.cs | 12 ++--
.../Analysis/Miscellaneous/PatternAnalyzer.cs | 22 ++++----
.../Miscellaneous/PatternKeywordMarkerFilter.cs | 8 +--
.../Miscellaneous/PerFieldAnalyzerWrapper.cs | 6 +-
.../PrefixAndSuffixAwareTokenFilter.cs | 2 +-
.../RemoveDuplicatesTokenFilterFactory.cs | 6 +-
.../Miscellaneous/ScandinavianFoldingFilter.cs | 2 +-
.../ScandinavianFoldingFilterFactory.cs | 6 +-
.../ScandinavianNormalizationFilter.cs | 2 +-
.../ScandinavianNormalizationFilterFactory.cs | 6 +-
.../Miscellaneous/SetKeywordMarkerFilter.cs | 6 +-
.../Miscellaneous/SingleTokenTokenStream.cs | 2 +-
.../Miscellaneous/StemmerOverrideFilter.cs | 22 ++++----
.../StemmerOverrideFilterFactory.cs | 6 +-
.../Analysis/Miscellaneous/TrimFilter.cs | 4 +-
.../Analysis/Miscellaneous/TrimFilterFactory.cs | 6 +-
.../Miscellaneous/TruncateTokenFilterFactory.cs | 6 +-
.../Miscellaneous/WordDelimiterFilter.cs | 16 +++---
.../Miscellaneous/WordDelimiterFilterFactory.cs | 6 +-
.../Miscellaneous/WordDelimiterIterator.cs | 2 +-
.../Analysis/Ngram/EdgeNGramFilterFactory.cs | 6 +-
.../Analysis/Ngram/EdgeNGramTokenFilter.cs | 14 ++---
.../Analysis/Ngram/EdgeNGramTokenizer.cs | 12 ++--
.../Analysis/Ngram/EdgeNGramTokenizerFactory.cs | 6 +-
.../Ngram/Lucene43EdgeNGramTokenizer.cs | 28 +++++-----
.../Analysis/Ngram/Lucene43NGramTokenizer.cs | 10 ++--
.../Analysis/Ngram/NGramFilterFactory.cs | 6 +-
.../Analysis/Ngram/NGramTokenFilter.cs | 18 +++---
.../Analysis/Ngram/NGramTokenizer.cs | 14 ++---
.../Analysis/Ngram/NGramTokenizerFactory.cs | 8 +--
.../Analysis/Nl/DutchAnalyzer.cs | 26 ++++-----
.../Analysis/Nl/DutchStemFilter.cs | 14 ++---
.../Analysis/Nl/DutchStemmer.cs | 2 +-
.../Analysis/No/NorwegianAnalyzer.cs | 20 +++----
.../Analysis/No/NorwegianLightStemFilter.cs | 10 ++--
.../No/NorwegianLightStemFilterFactory.cs | 6 +-
.../Analysis/No/NorwegianLightStemmer.cs | 2 +-
.../Analysis/No/NorwegianMinimalStemFilter.cs | 10 ++--
.../No/NorwegianMinimalStemFilterFactory.cs | 6 +-
.../Analysis/No/NorwegianMinimalStemmer.cs | 4 +-
.../Analysis/Path/PathHierarchyTokenizer.cs | 4 +-
.../Path/PathHierarchyTokenizerFactory.cs | 10 ++--
.../Path/ReversePathHierarchyTokenizer.cs | 4 +-
.../Pattern/PatternCaptureGroupFilterFactory.cs | 6 +-
.../Pattern/PatternCaptureGroupTokenFilter.cs | 6 +-
.../Pattern/PatternReplaceCharFilterFactory.cs | 6 +-
.../Analysis/Pattern/PatternReplaceFilter.cs | 2 +-
.../Pattern/PatternReplaceFilterFactory.cs | 6 +-
.../Analysis/Pattern/PatternTokenizer.cs | 4 +-
.../Analysis/Pattern/PatternTokenizerFactory.cs | 6 +-
.../Payloads/DelimitedPayloadTokenFilter.cs | 2 +-
.../DelimitedPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Payloads/FloatEncoder.cs | 2 +-
.../Analysis/Payloads/IntegerEncoder.cs | 4 +-
.../Payloads/NumericPayloadTokenFilter.cs | 2 +-
.../NumericPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Payloads/PayloadEncoder.cs | 6 +-
.../Analysis/Payloads/PayloadHelper.cs | 2 +-
.../Payloads/TokenOffsetPayloadTokenFilter.cs | 4 +-
.../TokenOffsetPayloadTokenFilterFactory.cs | 6 +-
.../Payloads/TypeAsPayloadTokenFilter.cs | 4 +-
.../Payloads/TypeAsPayloadTokenFilterFactory.cs | 6 +-
.../Analysis/Position/PositionFilter.cs | 2 +-
.../Analysis/Position/PositionFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseAnalyzer.cs | 22 ++++----
.../Analysis/Pt/PortugueseLightStemFilter.cs | 6 +-
.../Pt/PortugueseLightStemFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseMinimalStemFilter.cs | 6 +-
.../Pt/PortugueseMinimalStemFilterFactory.cs | 6 +-
.../Analysis/Pt/PortugueseStemFilter.cs | 6 +-
.../Analysis/Pt/PortugueseStemFilterFactory.cs | 6 +-
.../Analysis/Pt/RSLPStemmerBase.cs | 2 +-
.../Analysis/Query/QueryAutoStopWordAnalyzer.cs | 14 ++---
.../Analysis/Reverse/ReverseStringFilter.cs | 10 ++--
.../Reverse/ReverseStringFilterFactory.cs | 6 +-
.../Analysis/Ro/RomanianAnalyzer.cs | 20 +++----
.../Analysis/Ru/RussianAnalyzer.cs | 18 +++---
.../Analysis/Ru/RussianLetterTokenizer.cs | 24 ++++----
.../Ru/RussianLetterTokenizerFactory.cs | 2 +-
.../Analysis/Ru/RussianLightStemFilter.cs | 6 +-
.../Ru/RussianLightStemFilterFactory.cs | 6 +-
.../Analysis/Shingle/ShingleAnalyzerWrapper.cs | 6 +-
.../Analysis/Shingle/ShingleFilter.cs | 28 +++++-----
.../Analysis/Shingle/ShingleFilterFactory.cs | 6 +-
.../Analysis/Sinks/DateRecognizerSinkFilter.cs | 38 ++++++-------
.../Analysis/Sinks/TeeSinkTokenFilter.cs | 22 ++++----
.../Analysis/Snowball/SnowballAnalyzer.cs | 18 +++---
.../Analysis/Snowball/SnowballFilter.cs | 18 +++---
.../Snowball/SnowballPorterFilterFactory.cs | 6 +-
.../Analysis/Standard/ClassicAnalyzer.cs | 8 +--
.../Analysis/Standard/ClassicFilter.cs | 2 +-
.../Analysis/Standard/ClassicFilterFactory.cs | 6 +-
.../Analysis/Standard/ClassicTokenizer.cs | 6 +-
.../Standard/ClassicTokenizerFactory.cs | 6 +-
.../Analysis/Standard/StandardAnalyzer.cs | 8 +--
.../Analysis/Standard/StandardFilter.cs | 2 +-
.../Analysis/Standard/StandardFilterFactory.cs | 6 +-
.../Analysis/Standard/StandardTokenizer.cs | 8 +--
.../Standard/StandardTokenizerFactory.cs | 6 +-
.../Standard/StandardTokenizerInterface.cs | 2 +-
.../Analysis/Standard/UAX29URLEmailAnalyzer.cs | 10 ++--
.../Analysis/Standard/UAX29URLEmailTokenizer.cs | 4 +-
.../Standard/UAX29URLEmailTokenizerFactory.cs | 6 +-
.../Analysis/Sv/SwedishAnalyzer.cs | 20 +++----
.../Analysis/Sv/SwedishLightStemFilter.cs | 6 +-
.../Sv/SwedishLightStemFilterFactory.cs | 6 +-
.../Analysis/Synonym/FSTSynonymFilterFactory.cs | 4 +-
.../Analysis/Synonym/SlowSynonymFilter.cs | 2 +-
.../Synonym/SlowSynonymFilterFactory.cs | 6 +-
.../Analysis/Synonym/SlowSynonymMap.cs | 4 +-
.../Analysis/Synonym/SynonymFilter.cs | 6 +-
.../Analysis/Synonym/SynonymFilterFactory.cs | 12 ++--
.../Analysis/Synonym/SynonymMap.cs | 6 +-
.../Analysis/Th/ThaiAnalyzer.cs | 16 +++---
.../Analysis/Th/ThaiTokenizer.cs | 2 +-
.../Analysis/Th/ThaiTokenizerFactory.cs | 6 +-
.../Analysis/Th/ThaiWordFilter.cs | 6 +-
.../Analysis/Th/ThaiWordFilterFactory.cs | 8 +--
.../Analysis/Tr/ApostropheFilterFactory.cs | 6 +-
.../Analysis/Tr/TurkishAnalyzer.cs | 20 +++----
.../Tr/TurkishLowerCaseFilterFactory.cs | 6 +-
.../Analysis/Util/AbstractAnalysisFactory.cs | 16 +++---
.../Analysis/Util/AnalysisSPILoader.cs | 4 +-
.../Analysis/Util/CharArrayIterator.cs | 6 +-
.../Analysis/Util/CharArrayMap.cs | 2 +-
.../Analysis/Util/CharFilterFactory.cs | 6 +-
.../Analysis/Util/CharTokenizer.cs | 34 ++++++------
.../Analysis/Util/CharacterUtils.cs | 58 ++++++++++----------
.../Analysis/Util/ClasspathResourceLoader.cs | 4 +-
.../Analysis/Util/ElisionFilter.cs | 6 +-
.../Analysis/Util/ElisionFilterFactory.cs | 6 +-
.../Analysis/Util/FilesystemResourceLoader.cs | 10 ++--
.../Analysis/Util/FilteringTokenFilter.cs | 14 ++---
.../Analysis/Util/ResourceLoaderAware.cs | 2 +-
.../Analysis/Util/RollingCharBuffer.cs | 2 +-
.../Analysis/Util/StopwordAnalyzerBase.cs | 8 +--
.../Analysis/Util/TokenFilterFactory.cs | 6 +-
.../Analysis/Util/TokenizerFactory.cs | 6 +-
.../Analysis/Util/WordlistLoader.cs | 26 ++++-----
.../Analysis/Wikipedia/WikipediaTokenizer.cs | 14 ++---
.../Wikipedia/WikipediaTokenizerFactory.cs | 6 +-
.../Collation/CollationAttributeFactory.cs | 10 ++--
.../Collation/CollationKeyAnalyzer.cs | 16 +++---
.../Collation/CollationKeyFilter.cs | 12 ++--
.../Collation/CollationKeyFilterFactory.cs | 12 ++--
.../CollatedTermAttributeImpl.cs | 2 +-
238 files changed, 1044 insertions(+), 1044 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
index 255fa54..eae217f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanAnalyzer.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for German language.
+ /// <see cref="Analyzer"/> for German language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all) and an external list of exclusions (word that will
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.De
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GermanAnalyzer:
/// <ul>
/// <li> As of 3.6, GermanLightStemFilter is used for less aggressive stemming.
@@ -54,8 +54,8 @@ namespace Lucene.Net.Analysis.De
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class GermanAnalyzer : StopwordAnalyzerBase
{
@@ -106,7 +106,7 @@ namespace Lucene.Net.Analysis.De
}
/// <summary>
- /// Contains the stopwords used with the <seealso cref="StopFilter"/>.
+ /// Contains the stopwords used with the <see cref="StopFilter"/>.
/// </summary>
/// <summary>
@@ -116,7 +116,7 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <seealso cref="#getDefaultStopSet()"/>.
+ /// <see cref="#getDefaultStopSet()"/>.
/// </summary>
public GermanAnalyzer(LuceneVersion matchVersion)
#pragma warning disable 612, 618
@@ -155,14 +155,14 @@ namespace Lucene.Net.Analysis.De
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, <seealso cref="GermanNormalizationFilter"/> and <seealso cref="GermanLightStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, <see cref="GermanNormalizationFilter"/> and <see cref="GermanLightStemFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
index 480c2cf..a58138c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GermanLightStemmer"/> to stem German
+ /// A <see cref="TokenFilter"/> that applies <see cref="GermanLightStemmer"/> to stem German
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GermanLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
index dc08d57..f16956c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_delgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
index 424057d..84f1f4b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GermanMinimalStemmer"/> to stem German
+ /// A <see cref="TokenFilter"/> that applies <see cref="GermanMinimalStemmer"/> to stem German
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class GermanMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
index d9a07f1..25c6cb1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_deminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
index d44c274..85cd62d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanNormalizationFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_denorm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
index e4ea2b6..542c6a7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilter.cs
@@ -20,16 +20,16 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems German words.
+ /// A <see cref="TokenFilter"/> that stems German words.
/// <para>
/// It supports a table of words that should
/// not be stemmed at all. The stemmer used can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="GermanStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="GermanStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= SetKeywordMarkerFilter </seealso>
public sealed class GermanStemFilter : TokenFilter
@@ -43,8 +43,8 @@ namespace Lucene.Net.Analysis.De
private readonly IKeywordAttribute keywordAttr;
/// <summary>
- /// Creates a <seealso cref="GermanStemFilter"/> instance </summary>
- /// <param name="in"> the source <seealso cref="TokenStream"/> </param>
+ /// Creates a <see cref="GermanStemFilter"/> instance </summary>
+ /// <param name="in"> the source <see cref="TokenStream"/> </param>
public GermanStemFilter(TokenStream @in)
: base(@in)
{
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.De
}
/// <summary>
- /// Set a alternative/custom <seealso cref="GermanStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="GermanStemmer"/> for this filter.
/// </summary>
public GermanStemmer Stemmer
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
index 5e1ccae..d182b4a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/De/GermanStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.De
*/
/// <summary>
- /// Factory for <seealso cref="GermanStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GermanStemFilter"/>.
+ /// <code>
/// <fieldType name="text_destem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.GermanStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GermanStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
index 0d46eba..56024bd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekAnalyzer.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for the Greek language.
+ /// <see cref="Analyzer"/> for the Greek language.
/// <para>
/// Supports an external list of stopwords (words
/// that will not be indexed at all).
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.El
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GreekAnalyzer:
/// <ul>
/// <li> As of 3.1, StandardFilter and GreekStemmer are used by default.
@@ -42,8 +42,8 @@ namespace Lucene.Net.Analysis.El
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class GreekAnalyzer : StopwordAnalyzerBase
{
@@ -95,7 +95,7 @@ namespace Lucene.Net.Analysis.El
/// Builds an analyzer with the given stop words.
/// <para>
/// <b>NOTE:</b> The stopwords set should be pre-processed with the logic of
- /// <seealso cref="GreekLowerCaseFilter"/> for best results.
+ /// <see cref="GreekLowerCaseFilter"/> for best results.
///
/// </para>
/// </summary>
@@ -109,13 +109,13 @@ namespace Lucene.Net.Analysis.El
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="GreekLowerCaseFilter"/>, <seealso cref="StandardFilter"/>,
- /// <seealso cref="StopFilter"/>, and <seealso cref="GreekStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="GreekLowerCaseFilter"/>, <see cref="StandardFilter"/>,
+ /// <see cref="StopFilter"/>, and <see cref="GreekStemFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
index b6d1271..559e15e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilter.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.El
/// Normalizes token text to lower case, removes some Greek diacritics,
/// and standardizes final sigma to sigma.
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating GreekLowerCaseFilter:
/// <ul>
/// <li> As of 3.1, supplementary characters are properly lowercased.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
index 2dd8d8d..2e2daee 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekLowerCaseFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// Factory for <seealso cref="GreekLowerCaseFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GreekLowerCaseFilter"/>.
+ /// <code>
/// <fieldType name="text_glc" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.GreekLowerCaseFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GreekLowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
index 9fb06cc..39b77f9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilter.cs
@@ -20,17 +20,17 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="GreekStemmer"/> to stem Greek
+ /// A <see cref="TokenFilter"/> that applies <see cref="GreekStemmer"/> to stem Greek
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved by using
- /// either <seealso cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before GreekStemFilter.
+ /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter before GreekStemFilter.
/// @lucene.experimental
/// </para>
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
index b7015c1..c09df42 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.El
*/
/// <summary>
- /// Factory for <seealso cref="GreekStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="GreekStemFilter"/>.
+ /// <code>
/// <fieldType name="text_gstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.GreekLowerCaseFilterFactory"/>
/// <filter class="solr.GreekStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class GreekStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
index dbf3289..4e6dda3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/El/GreekStemmer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.El
/// <para>
/// NOTE: Input is expected to be casefolded for Greek (including folding of final
/// sigma to sigma), and with diacritics removed. This can be achieved with
- /// either <seealso cref="GreekLowerCaseFilter"/> or ICUFoldingFilter.
+ /// either <see cref="GreekLowerCaseFilter"/> or ICUFoldingFilter.
/// @lucene.experimental
/// </para>
/// </summary>
@@ -1001,7 +1001,7 @@ namespace Lucene.Net.Analysis.El
/// </summary>
/// <param name="s"> A char[] array that represents a word. </param>
/// <param name="len"> The length of the char[] array. </param>
- /// <param name="suffix"> A <seealso cref="String"/> object to check if the word given ends with these characters. </param>
+ /// <param name="suffix"> A <see cref="String"/> object to check if the word given ends with these characters. </param>
/// <returns> True if the word ends with the suffix given , false otherwise. </returns>
private bool EndsWith(char[] s, int len, string suffix)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
index f198e0c..629744b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishAnalyzer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Analysis.En
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for English.
+ /// <see cref="Analyzer"/> for English.
/// </summary>
public sealed class EnglishAnalyzer : StopwordAnalyzerBase
{
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.En
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#getDefaultStopSet"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#getDefaultStopSet"/>.
/// </summary>
public EnglishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -86,16 +86,16 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="EnglishPossessiveFilter"/>,
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="PorterStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="EnglishPossessiveFilter"/>,
+ /// <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="PorterStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
index 752769a..5c95a9c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilter.cs
@@ -24,12 +24,12 @@ namespace Lucene.Net.Analysis.En
//using KeywordAttribute = org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="EnglishMinimalStemmer"/> to stem
+ /// A <see cref="TokenFilter"/> that applies <see cref="EnglishMinimalStemmer"/> to stem
/// English words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class EnglishMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
index 1812b4c..48fec3c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishMinimalStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.En
*/
/// <summary>
- /// Factory for <seealso cref="EnglishMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="EnglishMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_enminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.EnglishMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EnglishMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
index 1aeb6bb..c6ca5ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.En
/// <summary>
/// TokenFilter that removes possessives (trailing 's) from words.
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating EnglishPossessiveFilter:
/// <ul>
/// <li> As of 3.6, U+2019 RIGHT SINGLE QUOTATION MARK and
@@ -38,8 +38,8 @@ namespace Lucene.Net.Analysis.En
private readonly ICharTermAttribute termAtt;
private LuceneVersion matchVersion;
- /// @deprecated Use <seealso cref="#EnglishPossessiveFilter(Version, TokenStream)"/> instead.
- [Obsolete(@"Use <seealso cref=""#EnglishPossessiveFilter(org.apache.lucene.util.Version, org.apache.lucene.analysis.TokenStream)""/> instead.")]
+ /// @deprecated Use <see cref="#EnglishPossessiveFilter(Version, TokenStream)"/> instead.
+ [Obsolete(@"Use <see cref=""#EnglishPossessiveFilter(org.apache.lucene.util.Version, org.apache.lucene.analysis.TokenStream)""/> instead.")]
public EnglishPossessiveFilter(TokenStream input) : this(LuceneVersion.LUCENE_35, input)
{
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
index db82287..5718f8c 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/EnglishPossessiveFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="EnglishPossessiveFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="EnglishPossessiveFilter"/>.
+ /// <code>
/// <fieldType name="text_enpossessive" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.EnglishPossessiveFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EnglishPossessiveFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
index 4e7af70..4de595b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilter.cs
@@ -30,13 +30,13 @@ namespace Lucene.Net.Analysis.En
/// All terms must already be lowercased for this filter to work correctly.
///
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
///
///
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
index 20c71ce..17374f0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/KStemFilterFactory.cs
@@ -24,15 +24,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="KStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="KStemFilter"/>.
+ /// <code>
/// <fieldType name="text_kstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.KStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class KStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
index 822895f..af42187 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilter.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.En
/// To use this with LowerCaseTokenizer, for example, you'd write an
/// analyzer like this:
/// <P>
- /// <PRE class="prettyprint">
+ /// <code>
/// class MyAnalyzer extends Analyzer {
/// {@literal @Override}
/// protected virtual TokenStreamComponents CreateComponents(string fieldName, TextReader reader) {
@@ -38,15 +38,15 @@ namespace Lucene.Net.Analysis.En
/// return new TokenStreamComponents(source, new PorterStemFilter(source));
/// }
/// }
- /// </PRE>
+ /// </code>
/// <para>
- /// Note: This filter is aware of the <seealso cref="KeywordAttribute"/>. To prevent
+ /// Note: This filter is aware of the <see cref="KeywordAttribute"/>. To prevent
/// certain terms from being passed to the stemmer
- /// <seealso cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
- /// in a previous <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordAttribute#isKeyword()"/> should be set to <code>true</code>
+ /// in a previous <see cref="TokenStream"/>.
///
/// Note: For including the original term as well as the stemmed version, see
- /// <seealso cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
+ /// <see cref="org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilterFactory"/>
/// </para>
/// </summary>
public sealed class PorterStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
index 27217e1..2cc4831 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/En/PorterStemFilterFactory.cs
@@ -23,15 +23,15 @@ namespace Lucene.Net.Analysis.En
//using TokenFilterFactory = org.apache.lucene.analysis.util.TokenFilterFactory;
/// <summary>
- /// Factory for <seealso cref="PorterStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PorterStemFilter"/>.
+ /// <code>
/// <fieldType name="text_porterstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.PorterStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PorterStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
index 387ae1e..edcde59 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishAnalyzer.cs
@@ -29,11 +29,11 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Spanish.
+ /// <see cref="Analyzer"/> for Spanish.
/// <para>
/// <a name="version"/>
/// </para>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating SpanishAnalyzer:
/// <ul>
/// <li> As of 3.6, SpanishLightStemFilter is used for less aggressive stemming.
@@ -88,7 +88,7 @@ namespace Lucene.Net.Analysis.Es
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public SpanishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -107,7 +107,7 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -121,15 +121,15 @@ namespace Lucene.Net.Analysis.Es
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SpanishLightStemFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SpanishLightStemFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
index 8587fbd..b7810fc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="SpanishLightStemmer"/> to stem Spanish
+ /// A <see cref="TokenFilter"/> that applies <see cref="SpanishLightStemmer"/> to stem Spanish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class SpanishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
index 5088e0a..31ef33b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Es/SpanishLightStemFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Es
*/
/// <summary>
- /// Factory for <seealso cref="SpanishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="SpanishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_eslgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.SpanishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class SpanishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
index d575922..b6c20dc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Eu/BasqueAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Eu
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Basque.
+ /// <see cref="Analyzer"/> for Basque.
/// </summary>
public sealed class BasqueAnalyzer : StopwordAnalyzerBase
{
@@ -74,7 +74,7 @@ namespace Lucene.Net.Analysis.Eu
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public BasqueAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -93,7 +93,7 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -107,15 +107,15 @@ namespace Lucene.Net.Analysis.Eu
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
index 853d202..97943be 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianAnalyzer.cs
@@ -26,9 +26,9 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Persian.
+ /// <see cref="Analyzer"/> for Persian.
/// <para>
- /// This Analyzer uses <seealso cref="PersianCharFilter"/> which implies tokenizing around
+ /// This Analyzer uses <see cref="PersianCharFilter"/> which implies tokenizing around
/// zero-width non-joiner in addition to whitespace. Some persian-specific variant forms (such as farsi
/// yeh and keheh) are standardized. "Stemming" is accomplished via stopwords.
/// </para>
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Builds an analyzer with the default stop words:
- /// <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public PersianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -110,13 +110,13 @@ namespace Lucene.Net.Analysis.Fa
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="ArabicNormalizationFilter"/>,
- /// <seealso cref="PersianNormalizationFilter"/> and Persian Stop words </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="LowerCaseFilter"/>, <see cref="ArabicNormalizationFilter"/>,
+ /// <see cref="PersianNormalizationFilter"/> and Persian Stop words </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source;
@@ -144,7 +144,7 @@ namespace Lucene.Net.Analysis.Fa
}
/// <summary>
- /// Wraps the TextReader with <seealso cref="PersianCharFilter"/>
+ /// Wraps the TextReader with <see cref="PersianCharFilter"/>
/// </summary>
protected override TextReader InitReader(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
index 5083bf9..260b530 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianCharFilterFactory.cs
@@ -22,14 +22,14 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// Factory for <seealso cref="PersianCharFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PersianCharFilter"/>.
+ /// <code>
/// <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PersianCharFilterFactory"/>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PersianCharFilterFactory : CharFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
index f1ff394..f3338ab 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilter.cs
@@ -20,7 +20,7 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="PersianNormalizer"/> to normalize the
+ /// A <see cref="TokenFilter"/> that applies <see cref="PersianNormalizer"/> to normalize the
/// orthography.
///
/// </summary>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
index e18ef09..be205af 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fa/PersianNormalizationFilterFactory.cs
@@ -21,15 +21,15 @@ namespace Lucene.Net.Analysis.Fa
*/
/// <summary>
- /// Factory for <seealso cref="PersianNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="PersianNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_fanormal" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <charFilter class="solr.PersianCharFilterFactory"/>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.PersianNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class PersianNormalizationFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
index 6b3e850..28af6f5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Finnish.
+ /// <see cref="Analyzer"/> for Finnish.
/// </summary>
public sealed class FinnishAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Fi
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public FinnishAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Fi
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.Fi
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
index d1fa00a..2082eb0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FinnishLightStemmer"/> to stem Finnish
+ /// A <see cref="TokenFilter"/> that applies <see cref="FinnishLightStemmer"/> to stem Finnish
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FinnishLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
index 411809c..ce982c5 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fi/FinnishLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.Fi
*/
/// <summary>
- /// Factory for <seealso cref="FinnishLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FinnishLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_filgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.FinnishLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FinnishLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
index 1f82309..b9c01d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for French language.
+ /// <see cref="Analyzer"/> for French language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all) and an external list of exclusions (word that will
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Fr
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating FrenchAnalyzer:
/// <ul>
/// <li> As of 3.6, FrenchLightStemFilter is used for less aggressive stemming.
@@ -51,8 +51,8 @@ namespace Lucene.Net.Analysis.Fr
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class FrenchAnalyzer : StopwordAnalyzerBase
{
@@ -118,7 +118,7 @@ namespace Lucene.Net.Analysis.Fr
}
/// <summary>
- /// Builds an analyzer with the default stop words (<seealso cref="#getDefaultStopSet"/>).
+ /// Builds an analyzer with the default stop words (<see cref="#getDefaultStopSet"/>).
/// </summary>
public FrenchAnalyzer(LuceneVersion matchVersion)
#pragma warning disable 612, 618
@@ -157,15 +157,15 @@ namespace Lucene.Net.Analysis.Fr
/// <summary>
/// Creates
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// used to tokenize all the text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from a <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="ElisionFilter"/>,
- /// <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>,
- /// <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided, and <seealso cref="FrenchLightStemFilter"/> </returns>
+ /// <returns> <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from a <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="ElisionFilter"/>,
+ /// <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>,
+ /// <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided, and <see cref="FrenchLightStemFilter"/> </returns>
///
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
index e82433e..cd97757 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FrenchLightStemmer"/> to stem French
+ /// A <see cref="TokenFilter"/> that applies <see cref="FrenchLightStemmer"/> to stem French
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FrenchLightStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
index 9bc0dd8..548489b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchLightStemFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// Factory for <seealso cref="FrenchLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FrenchLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_frlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Fr
/// <filter class="solr.ElisionFilterFactory"/>
/// <filter class="solr.FrenchLightStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FrenchLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
index f18fe1f..2cdf579 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilter.cs
@@ -20,12 +20,12 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="FrenchMinimalStemmer"/> to stem French
+ /// A <see cref="TokenFilter"/> that applies <see cref="FrenchMinimalStemmer"/> to stem French
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class FrenchMinimalStemFilter : TokenFilter
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
index d9cc419..ef587d9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchMinimalStemFilterFactory.cs
@@ -20,8 +20,8 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// Factory for <seealso cref="FrenchMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="FrenchMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_frminstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Fr
/// <filter class="solr.ElisionFilterFactory"/>
/// <filter class="solr.FrenchMinimalStemFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class FrenchMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
index 151e82b..f74b10a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Fr/FrenchStemFilter.cs
@@ -21,19 +21,19 @@ namespace Lucene.Net.Analysis.Fr
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems french words.
+ /// A <see cref="TokenFilter"/> that stems french words.
/// <para>
/// The used stemmer can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="FrenchStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="FrenchStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="KeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= KeywordMarkerFilter </seealso>
- /// @deprecated (3.1) Use <seealso cref="SnowballFilter"/> with
- /// <seealso cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, which has the
+ /// @deprecated (3.1) Use <see cref="SnowballFilter"/> with
+ /// <see cref="org.tartarus.snowball.ext.FrenchStemmer"/> instead, which has the
/// same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use SnowballFilter with FrenchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public sealed class FrenchStemFilter : TokenFilter
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.Fr
}
}
/// <summary>
- /// Set a alternative/custom <seealso cref="FrenchStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="FrenchStemmer"/> for this filter.
/// </summary>
public FrenchStemmer Stemmer
{