You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/02/03 04:41:53 UTC
[06/13] lucenenet git commit: Lucene.Net.Analysis.Common: find and
replace for document comments - > ,
> , Analyzer.T
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
index d46b6c5..15bf32a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/Lucene47WordDelimiterFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Old Broken version of <seealso cref="WordDelimiterFilter"/>
+ /// Old Broken version of <see cref="WordDelimiterFilter"/>
/// </summary>
[Obsolete]
public sealed class Lucene47WordDelimiterFilter : TokenFilter
@@ -170,7 +170,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new WordDelimiterFilter using <seealso cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
+ /// Creates a new WordDelimiterFilter using <see cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
/// as its charTypeTable
/// </summary>
/// <param name="in"> TokenStream to be filtered </param>
@@ -460,7 +460,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#ALPHA"/>
+ /// Checks if the given word type includes <see cref="#ALPHA"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains ALPHA, {@code false} otherwise </returns>
@@ -470,7 +470,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#DIGIT"/>
+ /// Checks if the given word type includes <see cref="#DIGIT"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains DIGIT, {@code false} otherwise </returns>
@@ -480,7 +480,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#SUBWORD_DELIM"/>
+ /// Checks if the given word type includes <see cref="#SUBWORD_DELIM"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains SUBWORD_DELIM, {@code false} otherwise </returns>
@@ -490,7 +490,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#UPPER"/>
+ /// Checks if the given word type includes <see cref="#UPPER"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains UPPER, {@code false} otherwise </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
index a3e6a9e..e33b446 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternAnalyzer.cs
@@ -28,32 +28,32 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Efficient Lucene analyzer/tokenizer that preferably operates on a String rather than a
- /// <seealso cref="TextReader"/>, that can flexibly separate text into terms via a regular expression <seealso cref="Pattern"/>
- /// (with behaviour identical to <seealso cref="String#split(String)"/>),
+ /// <see cref="TextReader"/>, that can flexibly separate text into terms via a regular expression <see cref="Pattern"/>
+ /// (with behaviour identical to <see cref="String#split(String)"/>),
/// and that combines the functionality of
- /// <seealso cref="LetterTokenizer"/>,
- /// <seealso cref="LowerCaseTokenizer"/>,
- /// <seealso cref="WhitespaceTokenizer"/>,
- /// <seealso cref="StopFilter"/> into a single efficient
+ /// <see cref="LetterTokenizer"/>,
+ /// <see cref="LowerCaseTokenizer"/>,
+ /// <see cref="WhitespaceTokenizer"/>,
+ /// <see cref="StopFilter"/> into a single efficient
/// multi-purpose class.
/// <para>
/// If you are unsure how exactly a regular expression should look like, consider
/// prototyping by simply trying various expressions on some test texts via
- /// <seealso cref="String#split(String)"/>. Once you are satisfied, give that regex to
+ /// <see cref="String#split(String)"/>. Once you are satisfied, give that regex to
/// PatternAnalyzer. Also see <a target="_blank"
/// href="http://java.sun.com/docs/books/tutorial/extra/regex/">Java Regular Expression Tutorial</a>.
/// </para>
/// <para>
/// This class can be considerably faster than the "normal" Lucene tokenizers.
/// It can also serve as a building block in a compound Lucene
- /// <seealso cref="TokenFilter"/> chain. For example as in this
+ /// <see cref="TokenFilter"/> chain. For example as in this
/// stemming example:
/// <pre>
/// PatternAnalyzer pat = ...
/// TokenStream tokenStream = new SnowballFilter(
/// pat.tokenStream("content", "James is running round in the woods"),
/// "English"));
- /// </pre>
+ /// </code>
/// </para>
/// </summary>
/// @deprecated (4.0) use the pattern-based analysis in the analysis/pattern package instead.
@@ -152,8 +152,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// if non-null, ignores all tokens that are contained in the
/// given stop set (after previously having applied toLowerCase()
/// if applicable). For example, created via
- /// <seealso cref="StopFilter#makeStopSet(Version, String[])"/>and/or
- /// <seealso cref="WordlistLoader"/>as in
+ /// <see cref="StopFilter#makeStopSet(Version, String[])"/>and/or
+ /// <see cref="WordlistLoader"/>as in
/// <code>WordlistLoader.getWordSet(new File("samples/fulltext/stopwords.txt")</code>
/// or <a href="http://www.unine.ch/info/clef/">other stop words
/// lists </a>. </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
index 5180127..200a934 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PatternKeywordMarkerFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>. Each token
+ /// Marks terms as keywords via the <see cref="KeywordAttribute"/>. Each token
/// that matches the provided pattern is marked as a keyword by setting
- /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
+ /// <see cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
/// </summary>
public sealed class PatternKeywordMarkerFilter : KeywordMarkerFilter
{
@@ -32,9 +32,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly Regex pattern;
/// <summary>
- /// Create a new <seealso cref="PatternKeywordMarkerFilter"/>, that marks the current
+ /// Create a new <see cref="PatternKeywordMarkerFilter"/>, that marks the current
/// token as a keyword if the tokens term buffer matches the provided
- /// <seealso cref="Pattern"/> via the <seealso cref="KeywordAttribute"/>.
+ /// <see cref="Pattern"/> via the <see cref="KeywordAttribute"/>.
/// </summary>
/// <param name="in">
/// TokenStream to filter </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
index 8a73498..32e9fa0 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PerFieldAnalyzerWrapper.cs
@@ -25,12 +25,12 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// This analyzer is used to facilitate scenarios where different
/// fields Require different analysis techniques. Use the Map
- /// argument in <seealso cref="#PerFieldAnalyzerWrapper(Analyzer, java.util.Map)"/>
+ /// argument in <see cref="#PerFieldAnalyzerWrapper(Analyzer, java.util.Map)"/>
/// to add non-default analyzers for fields.
///
/// <para>Example usage:
///
- /// <pre class="prettyprint">
+ /// <code>
/// {@code
/// Map<String,Analyzer> analyzerPerField = new HashMap<>();
/// analyzerPerField.put("firstname", new KeywordAnalyzer());
@@ -39,7 +39,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// PerFieldAnalyzerWrapper aWrapper =
/// new PerFieldAnalyzerWrapper(new StandardAnalyzer(version), analyzerPerField);
/// }
- /// </pre>
+ /// </code>
///
/// </para>
/// <para>In this example, StandardAnalyzer will be used for all fields except "firstname"
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
index 826e05b..f968659 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/PrefixAndSuffixAwareTokenFilter.cs
@@ -18,7 +18,7 @@
*/
/// <summary>
- /// Links two <seealso cref="PrefixAwareTokenFilter"/>.
+ /// Links two <see cref="PrefixAwareTokenFilter"/>.
/// <p/>
/// <b>NOTE:</b> This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than
/// the ones located in org.apache.lucene.analysis.tokenattributes.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
index 83b3ca7..1554866 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/RemoveDuplicatesTokenFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="RemoveDuplicatesTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="RemoveDuplicatesTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_rmdup" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class RemoveDuplicatesTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
index 833e751..51b115a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// This filter folds Scandinavian characters ������->a and ����->o.
/// It also discriminate against use of double vowels aa, ae, ao, oe and oo, leaving just the first one.
/// <p/>
- /// It's is a semantically more destructive solution than <seealso cref="ScandinavianNormalizationFilter"/> but
+ /// It's is a semantically more destructive solution than <see cref="ScandinavianNormalizationFilter"/> but
/// can in addition help with matching raksmorgas as r�ksm�rg�s.
/// <p/>
/// bl�b�rsyltet�j == bl�b�rsyltet�j == blaabaarsyltetoej == blabarsyltetoj
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
index ed182a3..c6930b2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianFoldingFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="ScandinavianFoldingFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="ScandinavianFoldingFilter"/>.
+ /// <code>
/// <fieldType name="text_scandfold" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ScandinavianFoldingFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ScandinavianFoldingFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
index 502eaef..5ad937b 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilter.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// This filter normalize use of the interchangeable Scandinavian characters ��������
/// and folded variants (aa, ao, ae, oe and oo) by transforming them to ������.
/// <p/>
- /// It's a semantically less destructive solution than <seealso cref="ScandinavianFoldingFilter"/>,
+ /// It's a semantically less destructive solution than <see cref="ScandinavianFoldingFilter"/>,
/// most useful when a person with a Norwegian or Danish keyboard queries a Swedish index
/// and vice versa. This filter does <b>not</b> the common Swedish folds of � and � to a nor � to o.
/// <p/>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
index 1068d08..e5a5832 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/ScandinavianNormalizationFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter"/>.
+ /// <code>
/// <fieldType name="text_scandnorm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.ScandinavianNormalizationFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class ScandinavianNormalizationFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
index fdddf81..f4adbfe 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SetKeywordMarkerFilter.cs
@@ -21,9 +21,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Marks terms as keywords via the <seealso cref="KeywordAttribute"/>. Each token
+ /// Marks terms as keywords via the <see cref="KeywordAttribute"/>. Each token
/// contained in the provided set is marked as a keyword by setting
- /// <seealso cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
+ /// <see cref="KeywordAttribute#setKeyword(boolean)"/> to <code>true</code>.
/// </summary>
public sealed class SetKeywordMarkerFilter : KeywordMarkerFilter
{
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Create a new KeywordSetMarkerFilter, that marks the current token as a
/// keyword if the tokens term buffer is contained in the given set via the
- /// <seealso cref="KeywordAttribute"/>.
+ /// <see cref="KeywordAttribute"/>.
/// </summary>
/// <param name="in">
/// TokenStream to filter </param>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
index 15d2c5e..f2c00ce 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/SingleTokenTokenStream.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// A <seealso cref="TokenStream"/> containing a single token.
+ /// A <see cref="TokenStream"/> containing a single token.
/// </summary>
public sealed class SingleTokenTokenStream : TokenStream
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
index a3b5bea..0e09209 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Provides the ability to override any <seealso cref="KeywordAttribute"/> aware stemmer
+ /// Provides the ability to override any <see cref="KeywordAttribute"/> aware stemmer
/// with custom dictionary-based stemming.
/// </summary>
public sealed class StemmerOverrideFilter : TokenFilter
@@ -42,7 +42,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// Create a new StemmerOverrideFilter, performing dictionary-based stemming
/// with the provided <code>dictionary</code>.
/// <para>
- /// Any dictionary-stemmed terms will be marked with <seealso cref="KeywordAttribute"/>
+ /// Any dictionary-stemmed terms will be marked with <see cref="KeywordAttribute"/>
/// so that they will not be stemmed with stemmers down the chain.
/// </para>
/// </summary>
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// A read-only 4-byte FST backed map that allows fast case-insensitive key
- /// value lookups for <seealso cref="StemmerOverrideFilter"/>
+ /// value lookups for <see cref="StemmerOverrideFilter"/>
/// </summary>
// TODO maybe we can generalize this and reuse this map somehow?
public sealed class StemmerOverrideMap
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
internal readonly bool ignoreCase;
/// <summary>
- /// Creates a new <seealso cref="StemmerOverrideMap"/> </summary>
+ /// Creates a new <see cref="StemmerOverrideMap"/> </summary>
/// <param name="fst"> the fst to lookup the overrides </param>
/// <param name="ignoreCase"> if the keys case should be ingored </param>
public StemmerOverrideMap(FST<BytesRef> fst, bool ignoreCase)
@@ -108,7 +108,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Returns a <seealso cref="BytesReader"/> to pass to the <seealso cref="#get(char[], int, FST.Arc, FST.BytesReader)"/> method.
+ /// Returns a <see cref="BytesReader"/> to pass to the <see cref="#get(char[], int, FST.Arc, FST.BytesReader)"/> method.
/// </summary>
public FST.BytesReader BytesReader
{
@@ -153,7 +153,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// This builder builds an <seealso cref="FST"/> for the <seealso cref="StemmerOverrideFilter"/>
+ /// This builder builds an <see cref="FST"/> for the <see cref="StemmerOverrideFilter"/>
/// </summary>
public class Builder
{
@@ -164,7 +164,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
internal readonly CharsRef charsSpare = new CharsRef();
/// <summary>
- /// Creates a new <seealso cref="Builder"/> with ignoreCase set to <code>false</code>
+ /// Creates a new <see cref="Builder"/> with ignoreCase set to <code>false</code>
/// </summary>
public Builder()
: this(false)
@@ -172,7 +172,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new <seealso cref="Builder"/> </summary>
+ /// Creates a new <see cref="Builder"/> </summary>
/// <param name="ignoreCase"> if the input case should be ignored. </param>
public Builder(bool ignoreCase)
{
@@ -212,9 +212,9 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Returns an <seealso cref="StemmerOverrideMap"/> to be used with the <seealso cref="StemmerOverrideFilter"/> </summary>
- /// <returns> an <seealso cref="StemmerOverrideMap"/> to be used with the <seealso cref="StemmerOverrideFilter"/> </returns>
- /// <exception cref="IOException"> if an <seealso cref="IOException"/> occurs; </exception>
+ /// Returns an <see cref="StemmerOverrideMap"/> to be used with the <see cref="StemmerOverrideFilter"/> </summary>
+ /// <returns> an <see cref="StemmerOverrideMap"/> to be used with the <see cref="StemmerOverrideFilter"/> </returns>
+ /// <exception cref="IOException"> if an <see cref="IOException"/> occurs; </exception>
public virtual StemmerOverrideMap Build()
{
ByteSequenceOutputs outputs = ByteSequenceOutputs.Singleton;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
index 6934c91..e0c9323 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/StemmerOverrideFilterFactory.cs
@@ -23,14 +23,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="StemmerOverrideFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="StemmerOverrideFilter"/>.
+ /// <code>
/// <fieldType name="text_dicstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.StemmerOverrideFilterFactory" dictionary="dictionary.txt" ignoreCase="false"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class StemmerOverrideFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
index aec9f50..98539c7 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
@@ -34,7 +34,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
private readonly IOffsetAttribute offsetAtt;
/// <summary>
- /// Create a new <seealso cref="TrimFilter"/>. </summary>
+ /// Create a new <see cref="TrimFilter"/>. </summary>
/// <param name="version"> the Lucene match version </param>
/// <param name="in"> the stream to consume </param>
/// <param name="updateOffsets"> whether to update offsets </param>
@@ -53,7 +53,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Create a new <seealso cref="TrimFilter"/> on top of <code>in</code>. </summary>
+ /// Create a new <see cref="TrimFilter"/> on top of <code>in</code>. </summary>
public TrimFilter(LuceneVersion version, TokenStream @in)
#pragma warning disable 612, 618
: this(version, @in, false)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
index 1b47ea7..d091842 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="TrimFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="TrimFilter"/>.
+ /// <code>
/// <fieldType name="text_trm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.NGramTokenizerFactory"/>
/// <filter class="solr.TrimFilterFactory" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
/// <seealso cref= TrimFilter </seealso>
public class TrimFilterFactory : TokenFilterFactory
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
index a3577af..2b738ec 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TruncateTokenFilterFactory.cs
@@ -21,8 +21,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter"/>. The following type is recommended for "<i>diacritics-insensitive search</i>" for Turkish.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter"/>. The following type is recommended for "<i>diacritics-insensitive search</i>" for Turkish.
+ /// <code>
/// <fieldType name="text_tr_ascii_f5" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
@@ -33,7 +33,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <filter class="solr.TruncateTokenFilterFactory" prefixLength="5"/>
/// <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class TruncateTokenFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
index 77f643e..3c639d2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
@@ -64,14 +64,14 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// </ul>
/// </li>
/// </ul>
- /// One use for <seealso cref="WordDelimiterFilter"/> is to help match words with different
+ /// One use for <see cref="WordDelimiterFilter"/> is to help match words with different
/// subword delimiters. For example, if the source text contained "wi-fi" one may
/// want "wifi" "WiFi" "wi-fi" "wi+fi" queries to all match. One way of doing so
/// is to specify combinations="1" in the analyzer used for indexing, and
/// combinations="0" (the default) in the analyzer used for querying. Given that
- /// the current <seealso cref="StandardTokenizer"/> immediately removes many intra-word
+ /// the current <see cref="StandardTokenizer"/> immediately removes many intra-word
/// delimiters, it is recommended that this filter be used after a tokenizer that
- /// does not do this (such as <seealso cref="WhitespaceTokenizer"/>).
+ /// does not do this (such as <see cref="WhitespaceTokenizer"/>).
/// </summary>
public sealed class WordDelimiterFilter : TokenFilter
{
@@ -225,7 +225,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Creates a new WordDelimiterFilter using <seealso cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
+ /// Creates a new WordDelimiterFilter using <see cref="WordDelimiterIterator#DEFAULT_WORD_DELIM_TABLE"/>
/// as its charTypeTable
/// </summary>
/// <param name="in"> TokenStream to be filtered </param>
@@ -605,7 +605,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#ALPHA"/>
+ /// Checks if the given word type includes <see cref="#ALPHA"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains ALPHA, {@code false} otherwise </returns>
@@ -615,7 +615,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#DIGIT"/>
+ /// Checks if the given word type includes <see cref="#DIGIT"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains DIGIT, {@code false} otherwise </returns>
@@ -625,7 +625,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#SUBWORD_DELIM"/>
+ /// Checks if the given word type includes <see cref="#SUBWORD_DELIM"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains SUBWORD_DELIM, {@code false} otherwise </returns>
@@ -635,7 +635,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
}
/// <summary>
- /// Checks if the given word type includes <seealso cref="#UPPER"/>
+ /// Checks if the given word type includes <see cref="#UPPER"/>
/// </summary>
/// <param name="type"> Word type to check </param>
/// <returns> {@code true} if the type contains UPPER, {@code false} otherwise </returns>
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
index ce4959c..a0cc42d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
@@ -27,8 +27,8 @@ namespace Lucene.Net.Analysis.Miscellaneous
*/
/// <summary>
- /// Factory for <seealso cref="WordDelimiterFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="WordDelimiterFilter"/>.
+ /// <code>
/// <fieldType name="text_wd" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -38,7 +38,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// generateWordParts="1" generateNumberParts="1" stemEnglishPossessive="1"
/// types="wdfftypes.txt" />
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class WordDelimiterFilterFactory : TokenFilterFactory, IResourceLoaderAware
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
index 3fe61b6..f507cf2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
@@ -124,7 +124,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// <summary>
/// Advance to the next subword in the string.
/// </summary>
- /// <returns> index of the next subword, or <seealso cref="#DONE"/> if all subwords have been returned </returns>
+ /// <returns> index of the next subword, or <see cref="#DONE"/> if all subwords have been returned </returns>
internal int Next()
{
current = end;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
index a740241..2efb5fc 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Creates new instances of <seealso cref="EdgeNGramTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Creates new instances of <see cref="EdgeNGramTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.EdgeNGramFilterFactory" minGramSize="1" maxGramSize="1"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EdgeNGramFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
index 6224080..01677cf 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenFilter.cs
@@ -25,10 +25,10 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the given token into n-grams of given size(s).
/// <para>
- /// This <seealso cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
+ /// This <see cref="TokenFilter"/> create n-grams from the beginning edge or ending edge of a input token.
/// </para>
/// <para><a name="version"/>As of Lucene 4.4, this filter does not support
- /// <seealso cref="Side#BACK"/> (you can use <seealso cref="ReverseStringFilter"/> up-front and
+ /// <see cref="Side#BACK"/> (you can use <see cref="ReverseStringFilter"/> up-front and
/// afterward to get the same behavior), handles supplementary characters
/// correctly and does not update offsets anymore.
/// </para>
@@ -89,8 +89,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -139,8 +139,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -153,7 +153,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenFilter(LuceneVersion version, TokenStream input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
index a4fc18f..09ad7f8 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizer.cs
@@ -23,19 +23,19 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input from an edge into n-grams of given size(s).
/// <para>
- /// This <seealso cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
+ /// This <see cref="Tokenizer"/> create n-grams from the beginning edge or ending edge of a input token.
/// </para>
/// <para><a name="version" /> As of Lucene 4.4, this tokenizer<ul>
/// <li>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage
/// <li>doesn't trim the input,
/// <li>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones
/// <li>doesn't support backward n-grams anymore.
- /// <li>supports <seealso cref="#isTokenChar(int) pre-tokenization"/>,
+ /// <li>supports <see cref="#isTokenChar(int) pre-tokenization"/>,
/// <li>correctly handles supplementary characters.
/// </ul>
/// </para>
/// <para>Although <b style="color:red">highly</b> discouraged, it is still possible
- /// to use the old behavior through <seealso cref="Lucene43EdgeNGramTokenizer"/>.
+ /// to use the old behavior through <see cref="Lucene43EdgeNGramTokenizer"/>.
/// </para>
/// </summary>
public class EdgeNGramTokenizer : NGramTokenizer
@@ -47,7 +47,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -59,8 +59,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public EdgeNGramTokenizer(LuceneVersion version, AttributeSource.AttributeFactory factory, TextReader input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
index aebf551..5273ae4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/EdgeNGramTokenizerFactory.cs
@@ -24,13 +24,13 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Creates new instances of <seealso cref="EdgeNGramTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Creates new instances of <see cref="EdgeNGramTokenizer"/>.
+ /// <code>
/// <fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.EdgeNGramTokenizerFactory" minGramSize="1" maxGramSize="1"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class EdgeNGramTokenizerFactory : TokenizerFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
index d6f29c2..3ed7187 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43EdgeNGramTokenizer.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Old version of <seealso cref="EdgeNGramTokenizer"/> which doesn't handle correctly
+ /// Old version of <see cref="EdgeNGramTokenizer"/> which doesn't handle correctly
/// supplementary characters.
/// </summary>
[Obsolete]
@@ -76,8 +76,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -91,9 +91,9 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="side"> the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="side"> the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -107,8 +107,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -121,9 +121,9 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
- /// <param name="sideLabel"> the name of the <seealso cref="Side"/> from which to chop off an n-gram </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="sideLabel"> the name of the <see cref="Side"/> from which to chop off an n-gram </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
[Obsolete]
@@ -136,7 +136,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43EdgeNGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -148,8 +148,8 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
/// </summary>
/// <param name="version"> the <a href="#version">Lucene match version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43EdgeNGramTokenizer(LuceneVersion version, AttributeFactory factory, TextReader input, int minGram, int maxGram)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
index 5d8d410..a0f210a 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/Lucene43NGramTokenizer.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Old broken version of <seealso cref="NGramTokenizer"/>.
+ /// Old broken version of <see cref="NGramTokenizer"/>.
/// </summary>
[Obsolete]
public sealed class Lucene43NGramTokenizer : Tokenizer
@@ -43,7 +43,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43NGramTokenizer(TextReader input, int minGram, int maxGram)
@@ -54,8 +54,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public Lucene43NGramTokenizer(AttributeFactory factory, TextReader input, int minGram, int maxGram)
@@ -66,7 +66,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with default min and max n-grams. </summary>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
public Lucene43NGramTokenizer(TextReader input)
: this(input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
index 3de3466..3c9f738 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramFilterFactory.cs
@@ -21,14 +21,14 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Factory for <seealso cref="NGramTokenFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NGramTokenFilter"/>.
+ /// <code>
/// <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.WhitespaceTokenizerFactory"/>
/// <filter class="solr.NGramFilterFactory" minGramSize="1" maxGramSize="2"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NGramFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
index 10cd39c..561e575 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenFilter.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input into n-grams of the given size(s).
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/> compatibility when
- /// creating a <seealso cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:<ul>
+ /// <para>You must specify the required <see cref="LuceneVersion"/> compatibility when
+ /// creating a <see cref="NGramTokenFilter"/>. As of Lucene 4.4, this token filters:<ul>
/// <li>handles supplementary characters correctly,</li>
/// <li>emits all n-grams for the same token at the same position,</li>
/// <li>does not modify offsets,</li>
@@ -35,14 +35,14 @@ namespace Lucene.Net.Analysis.Ngram
/// "c").</li></ul>
/// </para>
/// <para>You can make this filter use the old behavior by providing a version <
- /// <seealso cref="Version#LUCENE_44"/> in the constructor but this is not recommended as
- /// it will lead to broken <seealso cref="TokenStream"/>s that will cause highlighting
+ /// <see cref="Version#LUCENE_44"/> in the constructor but this is not recommended as
+ /// it will lead to broken <see cref="TokenStream"/>s that will cause highlighting
/// bugs.
/// </para>
- /// <para>If you were using this <seealso cref="TokenFilter"/> to perform partial highlighting,
+ /// <para>If you were using this <see cref="TokenFilter"/> to perform partial highlighting,
/// this won't work anymore since this filter doesn't update offsets. You should
- /// modify your analysis chain to use <seealso cref="NGramTokenizer"/>, and potentially
- /// override <seealso cref="NGramTokenizer#isTokenChar(int)"/> to perform pre-tokenization.
+ /// modify your analysis chain to use <see cref="NGramTokenizer"/>, and potentially
+ /// override <see cref="NGramTokenizer#isTokenChar(int)"/> to perform pre-tokenization.
/// </para>
/// </summary>
public sealed class NGramTokenFilter : TokenFilter
@@ -73,7 +73,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates NGramTokenFilter with given min and max n-grams. </summary>
/// <param name="version"> Lucene version to enable correct position increments.
/// See <a href="#version">above</a> for details. </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenFilter(LuceneVersion version, TokenStream input, int minGram, int maxGram)
@@ -157,7 +157,7 @@ namespace Lucene.Net.Analysis.Ngram
/// Creates NGramTokenFilter with default min and max n-grams. </summary>
/// <param name="version"> Lucene version to enable correct position increments.
/// See <a href="#version">above</a> for details. </param>
- /// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TokenStream"/> holding the input to be tokenized </param>
public NGramTokenFilter(LuceneVersion version, TokenStream input)
: this(version, input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
index bae9f38..acc42c3 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizer.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Tokenizes the input into n-grams of the given size(s).
- /// <para>On the contrary to <seealso cref="NGramTokenFilter"/>, this class sets offsets so
+ /// <para>On the contrary to <see cref="NGramTokenFilter"/>, this class sets offsets so
/// that characters between startOffset and endOffset in the original stream are
/// the same as the term chars.
/// </para>
@@ -45,7 +45,7 @@ namespace Lucene.Net.Analysis.Ngram
/// than 1024 chars (limit of the previous version),
/// <li>count grams based on unicode code points instead of java chars (and
/// never split in the middle of surrogate pairs),
- /// <li>give the ability to <seealso cref="#isTokenChar(int) pre-tokenize"/> the stream
+ /// <li>give the ability to <see cref="#isTokenChar(int) pre-tokenize"/> the stream
/// before computing n-grams.</ul>
/// </para>
/// <para>Additionally, this class doesn't trim trailing whitespaces and emits
@@ -54,7 +54,7 @@ namespace Lucene.Net.Analysis.Ngram
/// from supporting large input streams).
/// </para>
/// <para>Although <b style="color:red">highly</b> discouraged, it is still possible
- /// to use the old behavior through <seealso cref="Lucene43NGramTokenizer"/>.
+ /// to use the old behavior through <see cref="Lucene43NGramTokenizer"/>.
/// </para>
/// </summary>
// non-final to allow for overriding isTokenChar, but all other methods should be final
@@ -89,7 +89,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenizer(LuceneVersion version, TextReader input, int minGram, int maxGram)
@@ -106,8 +106,8 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with given min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="factory"> <seealso cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
- /// <param name="input"> <seealso cref="Reader"/> holding the input to be tokenized </param>
+ /// <param name="factory"> <see cref="org.apache.lucene.util.AttributeSource.AttributeFactory"/> to use </param>
+ /// <param name="input"> <see cref="Reader"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenizer(LuceneVersion version, AttributeFactory factory, TextReader input, int minGram, int maxGram)
@@ -118,7 +118,7 @@ namespace Lucene.Net.Analysis.Ngram
/// <summary>
/// Creates NGramTokenizer with default min and max n-grams. </summary>
/// <param name="version"> the lucene compatibility <a href="#version">version</a> </param>
- /// <param name="input"> <seealso cref="TextReader"/> holding the input to be tokenized </param>
+ /// <param name="input"> <see cref="TextReader"/> holding the input to be tokenized </param>
public NGramTokenizer(LuceneVersion version, TextReader input)
: this(version, input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
index 6aaab8b..73865fb 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Ngram/NGramTokenizerFactory.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Analysis.Ngram
*/
/// <summary>
- /// Factory for <seealso cref="NGramTokenizer"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NGramTokenizer"/>.
+ /// <code>
/// <fieldType name="text_ngrm" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.NGramTokenizerFactory" minGramSize="1" maxGramSize="2"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NGramTokenizerFactory : TokenizerFactory
{
@@ -50,7 +50,7 @@ namespace Lucene.Net.Analysis.Ngram
}
/// <summary>
- /// Creates the <seealso cref="TokenStream"/> of n-grams from the given <seealso cref="TextReader"/> and <seealso cref="AttributeSource.AttributeFactory"/>. </summary>
+ /// Creates the <see cref="TokenStream"/> of n-grams from the given <see cref="TextReader"/> and <see cref="AttributeSource.AttributeFactory"/>. </summary>
public override Tokenizer Create(AttributeSource.AttributeFactory factory, TextReader input)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
index a3430a9..d38d922 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchAnalyzer.cs
@@ -28,7 +28,7 @@ namespace Lucene.Net.Analysis.Nl
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Dutch language.
+ /// <see cref="Analyzer"/> for Dutch language.
/// <para>
/// Supports an external list of stopwords (words that
/// will not be indexed at all), an external list of exclusions (word that will
@@ -39,11 +39,11 @@ namespace Lucene.Net.Analysis.Nl
/// </para>
///
/// <a name="version"/>
- /// <para>You must specify the required <seealso cref="Version"/>
+ /// <para>You must specify the required <see cref="LuceneVersion"/>
/// compatibility when creating DutchAnalyzer:
/// <ul>
- /// <li> As of 3.6, <seealso cref="#DutchAnalyzer(Version, CharArraySet)"/> and
- /// <seealso cref="#DutchAnalyzer(Version, CharArraySet, CharArraySet)"/> also populate
+ /// <li> As of 3.6, <see cref="#DutchAnalyzer(Version, CharArraySet)"/> and
+ /// <see cref="#DutchAnalyzer(Version, CharArraySet, CharArraySet)"/> also populate
/// the default entries for the stem override dictionary
/// <li> As of 3.1, Snowball stemming is done with SnowballFilter,
/// LowerCaseFilter is used prior to StopFilter, and Snowball
@@ -53,8 +53,8 @@ namespace Lucene.Net.Analysis.Nl
/// </ul>
///
/// </para>
- /// <para><b>NOTE</b>: This class uses the same <seealso cref="Version"/>
- /// dependent settings as <seealso cref="StandardAnalyzer"/>.</para>
+ /// <para><b>NOTE</b>: This class uses the same <see cref="LuceneVersion"/>
+ /// dependent settings as <see cref="StandardAnalyzer"/>.</para>
/// </summary>
public sealed class DutchAnalyzer : Analyzer
{
@@ -123,7 +123,7 @@ namespace Lucene.Net.Analysis.Nl
private readonly LuceneVersion matchVersion;
/// <summary>
- /// Builds an analyzer with the default stop words (<seealso cref="#getDefaultStopSet()"/>)
+ /// Builds an analyzer with the default stop words (<see cref="#getDefaultStopSet()"/>)
/// and a few default entries for the stem exclusion table.
///
/// </summary>
@@ -192,13 +192,13 @@ namespace Lucene.Net.Analysis.Nl
}
/// <summary>
- /// Returns a (possibly reused) <seealso cref="TokenStream"/> which tokenizes all the
- /// text in the provided <seealso cref="Reader"/>.
+ /// Returns a (possibly reused) <see cref="TokenStream"/> which tokenizes all the
+ /// text in the provided <see cref="Reader"/>.
/// </summary>
- /// <returns> A <seealso cref="TokenStream"/> built from a <seealso cref="StandardTokenizer"/>
- /// filtered with <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>,
- /// <seealso cref="StopFilter"/>, <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is provided,
- /// <seealso cref="StemmerOverrideFilter"/>, and <seealso cref="SnowballFilter"/> </returns>
+ /// <returns> A <see cref="TokenStream"/> built from a <see cref="StandardTokenizer"/>
+ /// filtered with <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>,
+ /// <see cref="StopFilter"/>, <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is provided,
+ /// <see cref="StemmerOverrideFilter"/>, and <see cref="SnowballFilter"/> </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader aReader)
{
#pragma warning disable 612, 618
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
index 53eb7ac..ba3d181 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemFilter.cs
@@ -23,20 +23,20 @@ namespace Lucene.Net.Analysis.Nl
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that stems Dutch words.
+ /// A <see cref="TokenFilter"/> that stems Dutch words.
/// <para>
/// It supports a table of words that should
/// not be stemmed at all. The stemmer used can be changed at runtime after the
- /// filter object is created (as long as it is a <seealso cref="DutchStemmer"/>).
+ /// filter object is created (as long as it is a <see cref="DutchStemmer"/>).
/// </para>
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="KeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="KeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para> </summary>
/// <seealso cref= KeywordMarkerFilter </seealso>
- /// @deprecated (3.1) Use <seealso cref="SnowballFilter"/> with
- /// <seealso cref="org.tartarus.snowball.ext.DutchStemmer"/> instead, which has the
+ /// @deprecated (3.1) Use <see cref="SnowballFilter"/> with
+ /// <see cref="org.tartarus.snowball.ext.DutchStemmer"/> instead, which has the
/// same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use SnowballFilter with DutchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public sealed class DutchStemFilter : TokenFilter
@@ -90,7 +90,7 @@ namespace Lucene.Net.Analysis.Nl
}
/// <summary>
- /// Set a alternative/custom <seealso cref="DutchStemmer"/> for this filter.
+ /// Set a alternative/custom <see cref="DutchStemmer"/> for this filter.
/// </summary>
public DutchStemmer Stemmer
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
index 6b7f003..036b761 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Nl/DutchStemmer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Nl
/// the <a href="http://snowball.tartarus.org/algorithms/dutch/stemmer.html">dutch stemming</a>
/// algorithm in Martin Porter's snowball project.
/// </para> </summary>
- /// @deprecated (3.1) Use <seealso cref="org.tartarus.snowball.ext.DutchStemmer"/> instead,
+ /// @deprecated (3.1) Use <see cref="org.tartarus.snowball.ext.DutchStemmer"/> instead,
/// which has the same functionality. This filter will be removed in Lucene 5.0
[Obsolete("(3.1) Use Tartarus.Snowball.Ext.DutchStemmer instead, which has the same functionality. This filter will be removed in Lucene 5.0")]
public class DutchStemmer
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
index d1edb2d..6d3b1dd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianAnalyzer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// <seealso cref="Analyzer"/> for Norwegian.
+ /// <see cref="Analyzer"/> for Norwegian.
/// </summary>
public sealed class NorwegianAnalyzer : StopwordAnalyzerBase
{
@@ -79,7 +79,7 @@ namespace Lucene.Net.Analysis.No
}
/// <summary>
- /// Builds an analyzer with the default stop words: <seealso cref="#DEFAULT_STOPWORD_FILE"/>.
+ /// Builds an analyzer with the default stop words: <see cref="#DEFAULT_STOPWORD_FILE"/>.
/// </summary>
public NorwegianAnalyzer(LuceneVersion matchVersion)
: this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET)
@@ -98,7 +98,7 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
- /// provided this analyzer will add a <seealso cref="SetKeywordMarkerFilter"/> before
+ /// provided this analyzer will add a <see cref="SetKeywordMarkerFilter"/> before
/// stemming.
/// </summary>
/// <param name="matchVersion"> lucene compatibility version </param>
@@ -112,15 +112,15 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// which tokenizes all the text in the provided <seealso cref="Reader"/>.
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// which tokenizes all the text in the provided <see cref="Reader"/>.
/// </summary>
/// <returns> A
- /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
- /// built from an <seealso cref="StandardTokenizer"/> filtered with
- /// <seealso cref="StandardFilter"/>, <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>
- /// , <seealso cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
- /// provided and <seealso cref="SnowballFilter"/>. </returns>
+ /// <see cref="Analyzer.TokenStreamComponents"/>
+ /// built from an <see cref="StandardTokenizer"/> filtered with
+ /// <see cref="StandardFilter"/>, <see cref="LowerCaseFilter"/>, <see cref="StopFilter"/>
+ /// , <see cref="SetKeywordMarkerFilter"/> if a stem exclusion set is
+ /// provided and <see cref="SnowballFilter"/>. </returns>
protected override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
{
Tokenizer source = new StandardTokenizer(m_matchVersion, reader);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
index d967988..6d0e0c1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="NorwegianLightStemmer"/> to stem Norwegian
+ /// A <see cref="TokenFilter"/> that applies <see cref="NorwegianLightStemmer"/> to stem Norwegian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class NorwegianLightStemFilter : TokenFilter
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemFilter </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianLightStemFilter(TokenStream input, int flags) : base(input)
{
stemmer = new NorwegianLightStemmer(flags);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
index ec3499f..cc28b03 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// Factory for <seealso cref="NorwegianLightStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NorwegianLightStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NorwegianLightStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
index ece0410..3a8a66e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianLightStemmer.cs
@@ -77,7 +77,7 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemmer </summary>
- /// <param name="flags"> set to <seealso cref="#BOKMAAL"/>, <seealso cref="#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="#BOKMAAL"/>, <see cref="#NYNORSK"/>, or both. </param>
public NorwegianLightStemmer(int flags)
{
if (flags <= 0 || flags > BOKMAAL + NYNORSK)
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
index 3e4605b..877fb59 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilter.cs
@@ -21,12 +21,12 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// A <seealso cref="TokenFilter"/> that applies <seealso cref="NorwegianMinimalStemmer"/> to stem Norwegian
+ /// A <see cref="TokenFilter"/> that applies <see cref="NorwegianMinimalStemmer"/> to stem Norwegian
/// words.
/// <para>
/// To prevent terms from being stemmed use an instance of
- /// <seealso cref="SetKeywordMarkerFilter"/> or a custom <seealso cref="TokenFilter"/> that sets
- /// the <seealso cref="KeywordAttribute"/> before this <seealso cref="TokenStream"/>.
+ /// <see cref="SetKeywordMarkerFilter"/> or a custom <see cref="TokenFilter"/> that sets
+ /// the <see cref="KeywordAttribute"/> before this <see cref="TokenStream"/>.
/// </para>
/// </summary>
public sealed class NorwegianMinimalStemFilter : TokenFilter
@@ -46,8 +46,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianLightStemFilter </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianMinimalStemFilter(TokenStream input, int flags)
: base(input)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
index 04e9f32..ee99e3d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemFilterFactory.cs
@@ -22,15 +22,15 @@ namespace Lucene.Net.Analysis.No
*/
/// <summary>
- /// Factory for <seealso cref="NorwegianMinimalStemFilter"/>.
- /// <pre class="prettyprint">
+ /// Factory for <see cref="NorwegianMinimalStemFilter"/>.
+ /// <code>
/// <fieldType name="text_svlgtstem" class="solr.TextField" positionIncrementGap="100">
/// <analyzer>
/// <tokenizer class="solr.StandardTokenizerFactory"/>
/// <filter class="solr.LowerCaseFilterFactory"/>
/// <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/>
/// </analyzer>
- /// </fieldType></pre>
+ /// </fieldType></code>
/// </summary>
public class NorwegianMinimalStemFilterFactory : TokenFilterFactory
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/31d8cbde/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
index 09afa6c..5724ef2 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/No/NorwegianMinimalStemmer.cs
@@ -68,8 +68,8 @@ namespace Lucene.Net.Analysis.No
/// <summary>
/// Creates a new NorwegianMinimalStemmer </summary>
- /// <param name="flags"> set to <seealso cref="NorwegianLightStemmer#BOKMAAL"/>,
- /// <seealso cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
+ /// <param name="flags"> set to <see cref="NorwegianLightStemmer#BOKMAAL"/>,
+ /// <see cref="NorwegianLightStemmer#NYNORSK"/>, or both. </param>
public NorwegianMinimalStemmer(int flags)
{
if (flags <= 0 || flags > NorwegianLightStemmer.BOKMAAL + NorwegianLightStemmer.NYNORSK)